Update debug partition and row printing

This commit updates the debug printing of rows and partitions in
metrics, log messages, and web UIs. The new format is one we have
settled on after consulting with committers on Apache Impala. In all
cases I think it makes it easier to work with complex partitions. The
most invasive aspect of this change is that string and binary column
values are now printed with quotes, which affects a bunch of otherwise
unrelated tests.

I attempted to minimize as much of the duplication as possible in debug
formatting functions in partition.cc, but there is still a lot of
duplication that was unavoidable (this was true before making this
change as well).

Change-Id: I4c444b155fe6621af65b86020be105fe56ae18ef
Reviewed-on: http://gerrit.cloudera.org:8080/5262
Tested-by: Kudu Jenkins
Reviewed-by: Todd Lipcon <t...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/kudu/repo
Commit: http://git-wip-us.apache.org/repos/asf/kudu/commit/f165ef7d
Tree: http://git-wip-us.apache.org/repos/asf/kudu/tree/f165ef7d
Diff: http://git-wip-us.apache.org/repos/asf/kudu/diff/f165ef7d

Branch: refs/heads/master
Commit: f165ef7d653137d29efea01d991945d65bef16d8
Parents: b30d4fc
Author: Dan Burkert <danburk...@apache.org>
Authored: Tue Nov 29 12:42:03 2016 -0800
Committer: Dan Burkert <danburk...@apache.org>
Committed: Thu Dec 1 22:46:56 2016 +0000

----------------------------------------------------------------------
 src/kudu/client/batcher.cc                     |   2 +-
 src/kudu/client/client-test.cc                 |  54 +--
 src/kudu/common/encoded_key-test.cc            |   6 +-
 src/kudu/common/key_util-test.cc               |   8 +-
 src/kudu/common/partial_row-test.cc            |   6 +-
 src/kudu/common/partition-test.cc              | 129 +++----
 src/kudu/common/partition.cc                   | 400 +++++++++++---------
 src/kudu/common/partition.h                    |  48 ++-
 src/kudu/common/row_changelist-test.cc         |   8 +-
 src/kudu/common/row_operations-test.cc         |   4 +-
 src/kudu/common/scan_spec-test.cc              |   8 +-
 src/kudu/common/schema-test.cc                 |   8 +-
 src/kudu/common/types.h                        |   4 +
 src/kudu/master/master-path-handlers.cc        |  35 +-
 src/kudu/tablet/compaction-test.cc             |  28 +-
 src/kudu/tablet/composite-pushdown-test.cc     |  57 ++-
 src/kudu/tablet/diskrowset-test.cc             |  15 +-
 src/kudu/tablet/major_delta_compaction-test.cc |   2 +-
 src/kudu/tablet/memrowset-test.cc              |  26 +-
 src/kudu/tablet/tablet-pushdown-test.cc        |   4 +-
 src/kudu/tablet/tablet-test-base.h             |   5 +-
 src/kudu/tablet/tablet.cc                      |   2 +-
 src/kudu/tablet/tablet_bootstrap-test.cc       |   8 +-
 src/kudu/tools/kudu-ts-cli-test.cc             |   2 +-
 src/kudu/tserver/tablet_server-test.cc         |  26 +-
 25 files changed, 465 insertions(+), 430 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/client/batcher.cc
----------------------------------------------------------------------
diff --git a/src/kudu/client/batcher.cc b/src/kudu/client/batcher.cc
index b1e9159..fb1d8a5 100644
--- a/src/kudu/client/batcher.cc
+++ b/src/kudu/client/batcher.cc
@@ -274,7 +274,7 @@ WriteRpc::WriteRpc(const scoped_refptr<Batcher>& batcher,
     bool partition_contains_row;
     CHECK(partition_schema.PartitionContainsRow(partition, row, 
&partition_contains_row).ok());
     CHECK(partition_contains_row)
-        << "Row " << partition_schema.RowDebugString(row)
+        << "Row " << partition_schema.PartitionKeyDebugString(row)
         << " not in partition " << 
partition_schema.PartitionDebugString(partition, *schema);
 #endif
 

http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/client/client-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/client/client-test.cc b/src/kudu/client/client-test.cc
index 3c4f962..caadd24 100644
--- a/src/kudu/client/client-test.cc
+++ b/src/kudu/client/client-test.cc
@@ -935,7 +935,7 @@ TEST_F(ClientTest, TestScanEmptyProjection) {
 TEST_F(ClientTest, TestProjectInvalidColumn) {
   KuduScanner scanner(client_table_.get());
   Status s = scanner.SetProjectedColumns({ "column-doesnt-exist" });
-  ASSERT_EQ("Not found: Column: \"column-doesnt-exist\" was not found in the 
table schema.",
+  ASSERT_EQ(R"(Not found: Column: "column-doesnt-exist" was not found in the 
table schema.)",
             s.ToString());
 
   // Test trying to use a projection where a column is used multiple times.
@@ -1276,9 +1276,9 @@ TEST_F(ClientTest, TestNonCoveringRangePartitions) {
     ScanToStrings(&scanner, &rows);
 
     ASSERT_EQ(200, rows.size());
-    ASSERT_EQ("(int32 key=0, int32 int_val=0, string string_val=hello 0,"
+    ASSERT_EQ(R"((int32 key=0, int32 int_val=0, string string_val="hello 0",)"
               " int32 non_null_with_default=0)", rows.front());
-    ASSERT_EQ("(int32 key=299, int32 int_val=598, string string_val=hello 299,"
+    ASSERT_EQ(R"((int32 key=299, int32 int_val=598, string string_val="hello 
299",)"
               " int32 non_null_with_default=897)", rows.back());
   }
 
@@ -1291,9 +1291,9 @@ TEST_F(ClientTest, TestNonCoveringRangePartitions) {
     ScanToStrings(&scanner, &rows);
 
     ASSERT_EQ(100, rows.size());
-    ASSERT_EQ("(int32 key=200, int32 int_val=400, string string_val=hello 200,"
+    ASSERT_EQ(R"((int32 key=200, int32 int_val=400, string string_val="hello 
200",)"
               " int32 non_null_with_default=600)", rows.front());
-    ASSERT_EQ("(int32 key=299, int32 int_val=598, string string_val=hello 299,"
+    ASSERT_EQ(R"((int32 key=299, int32 int_val=598, string string_val="hello 
299",)"
               " int32 non_null_with_default=897)", rows.back());
   }
 
@@ -1306,9 +1306,9 @@ TEST_F(ClientTest, TestNonCoveringRangePartitions) {
     ScanToStrings(&scanner, &rows);
 
     ASSERT_EQ(100, rows.size());
-    ASSERT_EQ("(int32 key=0, int32 int_val=0, string string_val=hello 0,"
+    ASSERT_EQ(R"((int32 key=0, int32 int_val=0, string string_val="hello 0",)"
               " int32 non_null_with_default=0)", rows.front());
-    ASSERT_EQ("(int32 key=99, int32 int_val=198, string string_val=hello 99,"
+    ASSERT_EQ(R"((int32 key=99, int32 int_val=198, string string_val="hello 
99",)"
               " int32 non_null_with_default=297)", rows.back());
   }
 
@@ -1867,7 +1867,7 @@ TEST_F(ClientTest, TestInsertSingleRowManualBatch) {
   KuduInsert* ptr = insert.get();
   Status s = session->Apply(insert.release());
   ASSERT_EQ("Illegal state: Key not specified: "
-            "INSERT int32 int_val=54321, string string_val=hello world",
+            R"(INSERT int32 int_val=54321, string string_val="hello world")",
             s.ToString());
 
   // Get error
@@ -1970,8 +1970,9 @@ TEST_F(ClientTest, TestWriteTimeout) {
     gscoped_ptr<KuduError> error = GetSingleErrorFromSession(session.get());
     ASSERT_TRUE(error->status().IsTimedOut()) << error->status().ToString();
     ASSERT_STR_CONTAINS(error->status().ToString(),
-                        "GetTableLocations { table: 'client-testtb', 
partition-key: (int32 key=1),"
-                        " attempt: 1 } failed: timed out after deadline 
expired");
+                        "GetTableLocations { table: 'client-testtb', "
+                        "partition-key: (RANGE (key): 1), attempt: 1 } failed: 
"
+                        "timed out after deadline expired");
   }
 
   // Next time out the actual write on the tablet server.
@@ -2109,7 +2110,7 @@ TEST_F(ClientTest, TestMultipleMultiRowManualBatches) {
   ScanTableToStrings(client_table_.get(), &rows);
   std::sort(rows.begin(), rows.end());
   ASSERT_EQ(kNumRowsPerTablet, rows.size());
-  ASSERT_EQ("(int32 key=0, int32 int_val=0, string string_val=hello world, "
+  ASSERT_EQ(R"((int32 key=0, int32 int_val=0, string string_val="hello world", 
)"
             "int32 non_null_with_default=12345)"
             , rows[0]);
 }
@@ -2136,16 +2137,16 @@ TEST_F(ClientTest, TestBatchWithPartialError) {
   gscoped_ptr<KuduError> error = GetSingleErrorFromSession(session.get());
   ASSERT_TRUE(error->status().IsAlreadyPresent());
   ASSERT_EQ(error->failed_op().ToString(),
-            "INSERT int32 key=1, int32 int_val=1, string string_val=Attempted 
dup");
+            R"(INSERT int32 key=1, int32 int_val=1, string 
string_val="Attempted dup")");
 
   // Verify that the other row was successfully inserted
   vector<string> rows;
   ScanTableToStrings(client_table_.get(), &rows);
   ASSERT_EQ(2, rows.size());
   std::sort(rows.begin(), rows.end());
-  ASSERT_EQ("(int32 key=1, int32 int_val=1, string string_val=original row, "
+  ASSERT_EQ(R"((int32 key=1, int32 int_val=1, string string_val="original 
row", )"
             "int32 non_null_with_default=12345)", rows[0]);
-  ASSERT_EQ("(int32 key=2, int32 int_val=1, string string_val=Should succeed, "
+  ASSERT_EQ(R"((int32 key=2, int32 int_val=1, string string_val="Should 
succeed", )"
             "int32 non_null_with_default=12345)", rows[1]);
 }
 
@@ -2182,7 +2183,7 @@ void 
ClientTest::DoTestWriteWithDeadServer(WhichServerToKill which) {
   }
 
   ASSERT_EQ(error->failed_op().ToString(),
-            "INSERT int32 key=1, int32 int_val=1, string string_val=x");
+            R"(INSERT int32 key=1, int32 int_val=1, string string_val="x")");
 }
 
 // Test error handling cases where the master is down (tablet resolution fails)
@@ -2918,7 +2919,7 @@ TEST_F(ClientTest, TestMutationsWork) {
   vector<string> rows;
   ScanTableToStrings(client_table_.get(), &rows);
   ASSERT_EQ(1, rows.size());
-  ASSERT_EQ("(int32 key=1, int32 int_val=2, string string_val=original row, "
+  ASSERT_EQ(R"((int32 key=1, int32 int_val=2, string string_val="original 
row", )"
             "int32 non_null_with_default=12345)", rows[0]);
   rows.clear();
 
@@ -3005,7 +3006,7 @@ TEST_F(ClientTest, TestUpsert) {
   {
     vector<string> rows;
     ScanTableToStrings(client_table_.get(), &rows);
-    EXPECT_EQ(vector<string>({"(int32 key=1, int32 int_val=1, string 
string_val=original row, "
+    EXPECT_EQ(vector<string>({R"((int32 key=1, int32 int_val=1, string 
string_val="original row", )"
               "int32 non_null_with_default=12345)"}),
       rows);
   }
@@ -3017,7 +3018,7 @@ TEST_F(ClientTest, TestUpsert) {
   {
     vector<string> rows;
     ScanTableToStrings(client_table_.get(), &rows);
-    EXPECT_EQ(vector<string>({"(int32 key=1, int32 int_val=2, string 
string_val=upserted row, "
+    EXPECT_EQ(vector<string>({R"((int32 key=1, int32 int_val=2, string 
string_val="upserted row", )"
               "int32 non_null_with_default=12345)"}),
         rows);
   }
@@ -3035,7 +3036,7 @@ TEST_F(ClientTest, TestUpsert) {
   {
     vector<string> rows;
     ScanTableToStrings(client_table_.get(), &rows);
-    EXPECT_EQ(vector<string>({"(int32 key=1, int32 int_val=2, string 
string_val=updated row, "
+    EXPECT_EQ(vector<string>({R"((int32 key=1, int32 int_val=2, string 
string_val="updated row", )"
               "int32 non_null_with_default=999)"}),
         rows);
   }
@@ -3047,8 +3048,9 @@ TEST_F(ClientTest, TestUpsert) {
   {
     vector<string> rows;
     ScanTableToStrings(client_table_.get(), &rows);
-    EXPECT_EQ(vector<string>({"(int32 key=1, int32 int_val=3, string 
string_val=upserted row 2, "
-              "int32 non_null_with_default=999)"}),
+    EXPECT_EQ(vector<string>({
+          R"((int32 key=1, int32 int_val=3, string string_val="upserted row 
2", )"
+          "int32 non_null_with_default=999)"}),
         rows);
   }
 
@@ -3105,7 +3107,7 @@ TEST_F(ClientTest, TestWriteWithBadSchema) {
             "Client provided column int_val[int32 NOT NULL] "
             "not present in tablet");
   ASSERT_EQ(error->failed_op().ToString(),
-            "INSERT int32 key=12345, int32 int_val=12345, string 
string_val=x");
+            R"(INSERT int32 key=12345, int32 int_val=12345, string 
string_val="x")");
 }
 
 TEST_F(ClientTest, TestBasicAlterOperations) {
@@ -3556,7 +3558,7 @@ TEST_F(ClientTest, TestSeveralRowMutatesPerBatch) {
   vector<string> rows;
   ScanTableToStrings(client_table_.get(), &rows);
   ASSERT_EQ(1, rows.size());
-  ASSERT_EQ("(int32 key=1, int32 int_val=2, string string_val=, "
+  ASSERT_EQ(R"((int32 key=1, int32 int_val=2, string string_val="", )"
             "int32 non_null_with_default=12345)", rows[0]);
   rows.clear();
 
@@ -3568,7 +3570,7 @@ TEST_F(ClientTest, TestSeveralRowMutatesPerBatch) {
   FlushSessionOrDie(session);
   ScanTableToStrings(client_table_.get(), &rows);
   ASSERT_EQ(1, rows.size());
-  ASSERT_EQ("(int32 key=1, int32 int_val=2, string string_val=, "
+  ASSERT_EQ(R"((int32 key=1, int32 int_val=2, string string_val="", )"
             "int32 non_null_with_default=12345)", rows[0]);
   rows.clear();
 
@@ -3586,7 +3588,7 @@ TEST_F(ClientTest, TestSeveralRowMutatesPerBatch) {
   FlushSessionOrDie(session);
   ScanTableToStrings(client_table_.get(), &rows);
   ASSERT_EQ(1, rows.size());
-  ASSERT_EQ("(int32 key=1, int32 int_val=1, string string_val=, "
+  ASSERT_EQ(R"((int32 key=1, int32 int_val=1, string string_val="", )"
             "int32 non_null_with_default=12345)", rows[0]);
   rows.clear();
   LOG(INFO) << "Testing delete/insert in same batch, key " << 1 << ".";
@@ -3595,7 +3597,7 @@ TEST_F(ClientTest, TestSeveralRowMutatesPerBatch) {
   FlushSessionOrDie(session);
   ScanTableToStrings(client_table_.get(), &rows);
   ASSERT_EQ(1, rows.size());
-  ASSERT_EQ("(int32 key=1, int32 int_val=2, string string_val=, "
+  ASSERT_EQ(R"((int32 key=1, int32 int_val=2, string string_val="", )"
             "int32 non_null_with_default=12345)", rows[0]);
             rows.clear();
 }

http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/common/encoded_key-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/common/encoded_key-test.cc 
b/src/kudu/common/encoded_key-test.cc
index 3b31218..25dd122 100644
--- a/src/kudu/common/encoded_key-test.cc
+++ b/src/kudu/common/encoded_key-test.cc
@@ -163,7 +163,7 @@ TEST_F(EncodedKeyTest, TestDecodeSimpleKeys) {
 
   {
     Slice val("aKey");
-    EXPECT_DECODED_KEY_EQ(STRING, "(string key=aKey)", "aKey", &val);
+    EXPECT_DECODED_KEY_EQ(STRING, R"((string key="aKey"))", "aKey", &val);
   }
 }
 
@@ -200,7 +200,7 @@ TEST_F(EncodedKeyTest, TestDecodeCompoundKeys) {
     builder.AddColumnKey(&key1);
     key.reset(builder.BuildEncodedKey());
 
-    EXPECT_ROWKEY_EQ(schema, "(uint16 key0=12345, string key1=aKey)", *key);
+    EXPECT_ROWKEY_EQ(schema, R"((uint16 key0=12345, string key1="aKey"))", 
*key);
   }
 
   {
@@ -217,7 +217,7 @@ TEST_F(EncodedKeyTest, TestDecodeCompoundKeys) {
     builder.AddColumnKey(&key2);
     key.reset(builder.BuildEncodedKey());
 
-    EXPECT_ROWKEY_EQ(schema, "(uint16 key0=12345, string key1=aKey, uint8 
key2=123)", *key);
+    EXPECT_ROWKEY_EQ(schema, R"((uint16 key0=12345, string key1="aKey", uint8 
key2=123))", *key);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/common/key_util-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/common/key_util-test.cc b/src/kudu/common/key_util-test.cc
index b159a19..70dc181 100644
--- a/src/kudu/common/key_util-test.cc
+++ b/src/kudu/common/key_util-test.cc
@@ -100,12 +100,12 @@ TEST_F(KeyUtilTest, 
TestIncrementCompositeIntStringPrimaryKey) {
   EXPECT_OK(p_row.SetInt32(0, 1000));
   EXPECT_OK(p_row.SetStringNoCopy(1, "hello"));
   EXPECT_TRUE(key_util::IncrementPrimaryKey(&row, &arena_));
-  EXPECT_EQ("int32 k1=1000, string k2=hello\\000", p_row.ToString());
+  EXPECT_EQ(R"(int32 k1=1000, string k2="hello\000")", p_row.ToString());
 
   // There's no way to overflow a string key - you can always make it higher
   // by tacking on more \x00.
   EXPECT_TRUE(key_util::IncrementPrimaryKey(&row, &arena_));
-  EXPECT_EQ("int32 k1=1000, string k2=hello\\000\\000", p_row.ToString());
+  EXPECT_EQ(R"(int32 k1=1000, string k2="hello\000\000")", p_row.ToString());
 }
 
 TEST_F(KeyUtilTest, TestIncrementCompositeStringIntPrimaryKey) {
@@ -121,13 +121,13 @@ TEST_F(KeyUtilTest, 
TestIncrementCompositeStringIntPrimaryKey) {
   EXPECT_OK(p_row.SetStringNoCopy(0, "hello"));
   EXPECT_OK(p_row.SetInt32(1, 1000));
   EXPECT_TRUE(key_util::IncrementPrimaryKey(&row, &arena_));
-  EXPECT_EQ("string k1=hello, int32 k2=1001", p_row.ToString());
+  EXPECT_EQ(R"(string k1="hello", int32 k2=1001)", p_row.ToString());
 
   // Overflowing the int32 portion should tack \x00 onto the
   // string portion.
   EXPECT_OK(p_row.SetInt32(1, MathLimits<int32_t>::kMax));
   EXPECT_TRUE(key_util::IncrementPrimaryKey(&row, &arena_));
-  EXPECT_EQ("string k1=hello\\000, int32 k2=-2147483648", p_row.ToString());
+  EXPECT_EQ(R"(string k1="hello\000", int32 k2=-2147483648)", 
p_row.ToString());
 }
 
 } // namespace kudu

http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/common/partial_row-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/common/partial_row-test.cc 
b/src/kudu/common/partial_row-test.cc
index 9f7bc79..561f50a 100644
--- a/src/kudu/common/partial_row-test.cc
+++ b/src/kudu/common/partial_row-test.cc
@@ -145,7 +145,7 @@ TEST_F(PartialRowTest, UnitTest) {
   EXPECT_OK(row.SetStringCopy("string_val", "hello world"));
   EXPECT_TRUE(row.IsColumnSet(1));
   EXPECT_TRUE(row.IsColumnSet(2));
-  EXPECT_EQ("int32 key=12345, int32 int_val=54321, string string_val=hello 
world",
+  EXPECT_EQ(R"(int32 key=12345, int32 int_val=54321, string string_val="hello 
world")",
             row.ToString());
   Slice slice;
   EXPECT_OK(row.GetString("string_val", &slice));
@@ -174,7 +174,7 @@ TEST_F(PartialRowTest, UnitTest) {
 
   // Set the NULL string back to non-NULL
   EXPECT_OK(row.SetStringCopy("string_val", "goodbye world"));
-  EXPECT_EQ("int32 key=12345, int32 int_val=54321, string string_val=goodbye 
world",
+  EXPECT_EQ(R"(int32 key=12345, int32 int_val=54321, string 
string_val="goodbye world")",
             row.ToString());
 
   // Unset some columns.
@@ -190,7 +190,7 @@ TEST_F(PartialRowTest, UnitTest) {
 
   // Set the binary column as a copy.
   EXPECT_OK(row.SetBinaryCopy("binary_val", "hello_world"));
-  EXPECT_EQ("int32 int_val=99999, binary binary_val=hello_world",
+  EXPECT_EQ(R"(int32 int_val=99999, binary binary_val="hello_world")",
               row.ToString());
   // Unset the binary column.
   EXPECT_OK(row.Unset("binary_val"));

http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/common/partition-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/common/partition-test.cc 
b/src/kudu/common/partition-test.cc
index 26e3e9a..2afb5d6 100644
--- a/src/kudu/common/partition-test.cc
+++ b/src/kudu/common/partition-test.cc
@@ -74,7 +74,7 @@ void CheckCreateRangePartitions(const 
vector<pair<optional<string>, optional<str
   PartitionSchema partition_schema;
   ASSERT_OK(PartitionSchema::FromPB(PartitionSchemaPB(), schema, 
&partition_schema));
 
-  ASSERT_EQ("range columns: [col]", partition_schema.DebugString(schema));
+  ASSERT_EQ("RANGE (col)", partition_schema.DebugString(schema));
 
   vector<pair<KuduPartialRow, KuduPartialRow>> bounds;
   for (const auto& bound : raw_bounds) {
@@ -111,14 +111,6 @@ void CheckCreateRangePartitions(const 
vector<pair<optional<string>, optional<str
     EXPECT_EQ(upper, partitions[i].range_key_end());
     EXPECT_EQ(lower, partitions[i].partition_key_start());
     EXPECT_EQ(upper, partitions[i].partition_key_end());
-
-    string lower_debug = lower.empty() ? "<start>" :
-      strings::Substitute("(string col=$0)", strings::Utf8SafeCEscape(lower));
-    string upper_debug = upper.empty() ? "<end>" :
-      strings::Substitute("(string col=$0)", strings::Utf8SafeCEscape(upper));
-
-    EXPECT_EQ(strings::Substitute("range: [$0, $1)", lower_debug, upper_debug),
-              partition_schema.PartitionDebugString(partitions[i], schema));
   }
 }
 
@@ -141,7 +133,7 @@ TEST(PartitionTest, TestCompoundRangeKeyEncoding) {
   PartitionSchema partition_schema;
   ASSERT_OK(PartitionSchema::FromPB(schema_builder, schema, 
&partition_schema));
 
-  ASSERT_EQ("range columns: [c1, c2, c3]", 
partition_schema.DebugString(schema));
+  ASSERT_EQ("RANGE (c1, c2, c3)", partition_schema.DebugString(schema));
 
   vector<pair<KuduPartialRow, KuduPartialRow>> bounds;
   vector<KuduPartialRow> splits;
@@ -182,15 +174,13 @@ TEST(PartitionTest, TestCompoundRangeKeyEncoding) {
 
   EXPECT_TRUE(partitions[0].hash_buckets().empty());
 
-  EXPECT_EQ("range: [<start>, (string c1=, string c2=, string c3=a))",
+  EXPECT_EQ(R"(RANGE (c1, c2, c3) PARTITION VALUES < ("", "", "a"))",
             partition_schema.PartitionDebugString(partitions[0], schema));
-  EXPECT_EQ("range: [(string c1=, string c2=, string c3=a), "
-                    "(string c1=, string c2=, string c3=b))",
+  EXPECT_EQ(R"(RANGE (c1, c2, c3) PARTITION ("", "", "a") <= VALUES < ("", "", 
"b"))",
             partition_schema.PartitionDebugString(partitions[1], schema));
-  EXPECT_EQ("range: [(string c1=, string c2=b, string c3=c), "
-            "(string c1=d, string c2=, string c3=f))",
+  EXPECT_EQ(R"(RANGE (c1, c2, c3) PARTITION ("", "b", "c") <= VALUES < ("d", 
"", "f"))",
             partition_schema.PartitionDebugString(partitions[2], schema));
-  EXPECT_EQ("range: [(string c1=e, string c2=, string c3=), <end>)",
+  EXPECT_EQ(R"(RANGE (c1, c2, c3) PARTITION VALUES >= ("e", "", ""))",
             partition_schema.PartitionDebugString(partitions[3], schema));
 }
 
@@ -208,8 +198,7 @@ TEST(PartitionTest, TestPartitionKeyEncoding) {
   PartitionSchema partition_schema;
   ASSERT_OK(PartitionSchema::FromPB(schema_builder, schema, 
&partition_schema));
 
-  ASSERT_EQ("hash bucket components: [(bucket count: 32, columns: [a, b]), "
-            "(bucket count: 32, seed: 42, columns: [c])], range columns: [a, 
b, c]",
+  ASSERT_EQ("HASH (a, b) PARTITIONS 32, HASH (c) PARTITIONS 32 SEED 42, RANGE 
(a, b, c)",
             partition_schema.DebugString(schema));
 
   {
@@ -223,9 +212,9 @@ TEST(PartitionTest, TestPartitionKeyEncoding) {
                      "\x80\0\0\0" // a = 0
                      "\0\0",      // b = ""; c is elided
                      14), key);
-    string debug = "bucket=0, bucket=20, int32 a=0, string b=, string c=";
-    EXPECT_EQ(debug, partition_schema.RowDebugString(row));
-    EXPECT_EQ(debug, partition_schema.PartitionKeyDebugString(key, schema));
+    string expected = R"(HASH (a, b): 0, HASH (c): 20, RANGE (a, b, c): (0, 
"", ""))";
+    EXPECT_EQ(expected, partition_schema.PartitionKeyDebugString(row));
+    EXPECT_EQ(expected, partition_schema.PartitionKeyDebugString(key, schema));
   }
 
   {
@@ -240,9 +229,9 @@ TEST(PartitionTest, TestPartitionKeyEncoding) {
                      "\0\0",        // b = ""; c is elided
                      14), key);
 
-    string debug_b = "bucket=5, bucket=20, int32 a=1, string b=, string c=";
-    EXPECT_EQ(debug_b, partition_schema.RowDebugString(row));
-    EXPECT_EQ(debug_b, partition_schema.PartitionKeyDebugString(key, schema));
+    string expected = R"(HASH (a, b): 5, HASH (c): 20, RANGE (a, b, c): (1, 
"", ""))";
+    EXPECT_EQ(expected, partition_schema.PartitionKeyDebugString(row));
+    EXPECT_EQ(expected, partition_schema.PartitionKeyDebugString(key, schema));
   }
 
   {
@@ -260,9 +249,9 @@ TEST(PartitionTest, TestPartitionKeyEncoding) {
                      "c",         // c = "c"
                      16), key);
 
-    string debug = "bucket=26, bucket=29, int32 a=0, string b=b, string c=c";
-    EXPECT_EQ(debug, partition_schema.RowDebugString(row));
-    EXPECT_EQ(debug, partition_schema.PartitionKeyDebugString(key, schema));
+    string expected = R"(HASH (a, b): 26, HASH (c): 29, RANGE (a, b, c): (0, 
"b", "c"))";
+    EXPECT_EQ(expected, partition_schema.PartitionKeyDebugString(row));
+    EXPECT_EQ(expected, partition_schema.PartitionKeyDebugString(key, schema));
   }
 
   {
@@ -280,9 +269,9 @@ TEST(PartitionTest, TestPartitionKeyEncoding) {
                      "c",          // c = "c"
                      16), key);
 
-    string debug = "bucket=0, bucket=29, int32 a=1, string b=b, string c=c";
-    EXPECT_EQ(debug, partition_schema.RowDebugString(row));
-    EXPECT_EQ(debug, partition_schema.PartitionKeyDebugString(key, schema));
+    string expected = R"(HASH (a, b): 0, HASH (c): 29, RANGE (a, b, c): (1, 
"b", "c"))";
+    EXPECT_EQ(expected, partition_schema.PartitionKeyDebugString(row));
+    EXPECT_EQ(expected, partition_schema.PartitionKeyDebugString(key, schema));
   }
 }
 
@@ -404,8 +393,7 @@ TEST(PartitionTest, TestCreateHashBucketPartitions) {
   PartitionSchema partition_schema;
   ASSERT_OK(PartitionSchema::FromPB(schema_builder, schema, 
&partition_schema));
 
-  ASSERT_EQ("hash bucket components: [(bucket count: 3, seed: 42, columns: 
[a])]",
-            partition_schema.DebugString(schema));
+  ASSERT_EQ("HASH (a) PARTITIONS 3 SEED 42", 
partition_schema.DebugString(schema));
 
   // Encoded Partition Keys:
   //
@@ -420,9 +408,9 @@ TEST(PartitionTest, TestCreateHashBucketPartitions) {
   EXPECT_EQ(0, partitions[0].hash_buckets()[0]);
   EXPECT_EQ("", partitions[0].range_key_start());
   EXPECT_EQ("", partitions[0].range_key_end());
-  EXPECT_EQ(string("", 0), partitions[0].partition_key_start());
+  EXPECT_EQ("", partitions[0].partition_key_start());
   EXPECT_EQ(string("\0\0\0\1", 4), partitions[0].partition_key_end());
-  EXPECT_EQ("hash buckets: (0)",
+  EXPECT_EQ("HASH (a) PARTITION 0",
             partition_schema.PartitionDebugString(partitions[0], schema));
 
   EXPECT_EQ(1, partitions[1].hash_buckets()[0]);
@@ -430,15 +418,15 @@ TEST(PartitionTest, TestCreateHashBucketPartitions) {
   EXPECT_EQ("", partitions[1].range_key_end());
   EXPECT_EQ(string("\0\0\0\1", 4), partitions[1].partition_key_start());
   EXPECT_EQ(string("\0\0\0\2", 4), partitions[1].partition_key_end());
-  EXPECT_EQ("hash buckets: (1)",
+  EXPECT_EQ("HASH (a) PARTITION 1",
             partition_schema.PartitionDebugString(partitions[1], schema));
 
   EXPECT_EQ(2, partitions[2].hash_buckets()[0]);
   EXPECT_EQ("", partitions[2].range_key_start());
   EXPECT_EQ("", partitions[2].range_key_end());
   EXPECT_EQ(string("\0\0\0\2", 4), partitions[2].partition_key_start());
-  EXPECT_EQ(string("", 0), partitions[2].partition_key_end());
-  EXPECT_EQ("hash buckets: (2)",
+  EXPECT_EQ("", partitions[2].partition_key_end());
+  EXPECT_EQ("HASH (a) PARTITION 2",
             partition_schema.PartitionDebugString(partitions[2], schema));
 }
 
@@ -456,8 +444,7 @@ TEST(PartitionTest, TestCreatePartitions) {
   PartitionSchema partition_schema;
   ASSERT_OK(PartitionSchema::FromPB(schema_builder, schema, 
&partition_schema));
 
-  ASSERT_EQ("hash bucket components: [(bucket count: 2, columns: [a]), "
-            "(bucket count: 2, columns: [b])], range columns: [a, b, c]",
+  ASSERT_EQ("HASH (a) PARTITIONS 2, HASH (b) PARTITIONS 2, RANGE (a, b, c)",
             partition_schema.DebugString(schema));
 
   // Split Rows:
@@ -508,11 +495,12 @@ TEST(PartitionTest, TestCreatePartitions) {
 
   EXPECT_EQ(0, partitions[0].hash_buckets()[0]);
   EXPECT_EQ(0, partitions[0].hash_buckets()[1]);
-  EXPECT_EQ(string("", 0), partitions[0].range_key_start());
+  EXPECT_EQ("", partitions[0].range_key_start());
   EXPECT_EQ(string("a1\0\0b1\0\0c1", 10), partitions[0].range_key_end());
-  EXPECT_EQ(string("", 0), partitions[0].partition_key_start());
+  EXPECT_EQ("", partitions[0].partition_key_start());
   EXPECT_EQ(string("\0\0\0\0" "\0\0\0\0" "a1\0\0b1\0\0c1", 18), 
partitions[0].partition_key_end());
-  EXPECT_EQ("hash buckets: (0, 0), range: [<start>, (string a=a1, string b=b1, 
string c=c1))",
+  EXPECT_EQ("HASH (a) PARTITION 0, HASH (b) PARTITION 0, "
+            R"(RANGE (a, b, c) PARTITION VALUES < ("a1", "b1", "c1"))",
             partition_schema.PartitionDebugString(partitions[0], schema));
 
   EXPECT_EQ(0, partitions[1].hash_buckets()[0]);
@@ -522,28 +510,28 @@ TEST(PartitionTest, TestCreatePartitions) {
   EXPECT_EQ(string("\0\0\0\0" "\0\0\0\0" "a1\0\0b1\0\0c1", 18),
             partitions[1].partition_key_start());
   EXPECT_EQ(string("\0\0\0\0" "\0\0\0\0" "a2\0\0b2\0\0", 16), 
partitions[1].partition_key_end());
-  EXPECT_EQ("hash buckets: (0, 0), range: [(string a=a1, string b=b1, string 
c=c1), "
-                                          "(string a=a2, string b=b2, string 
c=))",
+  EXPECT_EQ("HASH (a) PARTITION 0, HASH (b) PARTITION 0, "
+            R"(RANGE (a, b, c) PARTITION ("a1", "b1", "c1") <= VALUES < ("a2", 
"b2", ""))",
             partition_schema.PartitionDebugString(partitions[1], schema));
 
   EXPECT_EQ(0, partitions[2].hash_buckets()[0]);
   EXPECT_EQ(0, partitions[2].hash_buckets()[1]);
   EXPECT_EQ(string("a2\0\0b2\0\0", 8), partitions[2].range_key_start());
-  EXPECT_EQ(string("", 0), partitions[2].range_key_end());
+  EXPECT_EQ("", partitions[2].range_key_end());
   EXPECT_EQ(string("\0\0\0\0" "\0\0\0\0" "a2\0\0b2\0\0", 16), 
partitions[2].partition_key_start());
   EXPECT_EQ(string("\0\0\0\0" "\0\0\0\1", 8), 
partitions[2].partition_key_end());
-  EXPECT_EQ("hash buckets: (0, 0), "
-            "range: [(string a=a2, string b=b2, string c=), <end>)",
+  EXPECT_EQ("HASH (a) PARTITION 0, HASH (b) PARTITION 0, "
+            R"(RANGE (a, b, c) PARTITION VALUES >= ("a2", "b2", ""))",
             partition_schema.PartitionDebugString(partitions[2], schema));
 
   EXPECT_EQ(0, partitions[3].hash_buckets()[0]);
   EXPECT_EQ(1, partitions[3].hash_buckets()[1]);
-  EXPECT_EQ(string("", 0), partitions[3].range_key_start());
+  EXPECT_EQ("", partitions[3].range_key_start());
   EXPECT_EQ(string("a1\0\0b1\0\0c1", 10), partitions[3].range_key_end());
   EXPECT_EQ(string("\0\0\0\0" "\0\0\0\1", 8), 
partitions[3].partition_key_start());
   EXPECT_EQ(string("\0\0\0\0" "\0\0\0\1" "a1\0\0b1\0\0c1", 18), 
partitions[3].partition_key_end());
-  EXPECT_EQ("hash buckets: (0, 1), "
-            "range: [<start>, (string a=a1, string b=b1, string c=c1))",
+  EXPECT_EQ("HASH (a) PARTITION 0, HASH (b) PARTITION 1, "
+            R"(RANGE (a, b, c) PARTITION VALUES < ("a1", "b1", "c1"))",
             partition_schema.PartitionDebugString(partitions[3], schema));
 
   EXPECT_EQ(0, partitions[4].hash_buckets()[0]);
@@ -553,26 +541,28 @@ TEST(PartitionTest, TestCreatePartitions) {
   EXPECT_EQ(string("\0\0\0\0" "\0\0\0\1" "a1\0\0b1\0\0c1", 18),
             partitions[4].partition_key_start());
   EXPECT_EQ(string("\0\0\0\0" "\0\0\0\1" "a2\0\0b2\0\0", 16), 
partitions[4].partition_key_end());
-  EXPECT_EQ("hash buckets: (0, 1), range: [(string a=a1, string b=b1, string 
c=c1), "
-                                          "(string a=a2, string b=b2, string 
c=))",
+  EXPECT_EQ("HASH (a) PARTITION 0, HASH (b) PARTITION 1, "
+            R"(RANGE (a, b, c) PARTITION ("a1", "b1", "c1") <= VALUES < ("a2", 
"b2", ""))",
             partition_schema.PartitionDebugString(partitions[4], schema));
 
   EXPECT_EQ(0, partitions[5].hash_buckets()[0]);
   EXPECT_EQ(1, partitions[5].hash_buckets()[1]);
   EXPECT_EQ(string("a2\0\0b2\0\0", 8), partitions[5].range_key_start());
-  EXPECT_EQ(string("", 0), partitions[5].range_key_end());
+  EXPECT_EQ("", partitions[5].range_key_end());
   EXPECT_EQ(string("\0\0\0\0" "\0\0\0\1" "a2\0\0b2\0\0", 16), 
partitions[5].partition_key_start());
   EXPECT_EQ(string("\0\0\0\1", 4), partitions[5].partition_key_end());
-  EXPECT_EQ("hash buckets: (0, 1), range: [(string a=a2, string b=b2, string 
c=), <end>)",
+  EXPECT_EQ("HASH (a) PARTITION 0, HASH (b) PARTITION 1, "
+            R"(RANGE (a, b, c) PARTITION VALUES >= ("a2", "b2", ""))",
             partition_schema.PartitionDebugString(partitions[5], schema));
 
   EXPECT_EQ(1, partitions[6].hash_buckets()[0]);
   EXPECT_EQ(0, partitions[6].hash_buckets()[1]);
-  EXPECT_EQ(string("", 0), partitions[6].range_key_start());
+  EXPECT_EQ("", partitions[6].range_key_start());
   EXPECT_EQ(string("a1\0\0b1\0\0c1", 10), partitions[6].range_key_end());
   EXPECT_EQ(string("\0\0\0\1", 4), partitions[6].partition_key_start());
   EXPECT_EQ(string("\0\0\0\1" "\0\0\0\0" "a1\0\0b1\0\0c1", 18), 
partitions[6].partition_key_end());
-  EXPECT_EQ("hash buckets: (1, 0), range: [<start>, (string a=a1, string b=b1, 
string c=c1))",
+  EXPECT_EQ("HASH (a) PARTITION 1, HASH (b) PARTITION 0, "
+            R"(RANGE (a, b, c) PARTITION VALUES < ("a1", "b1", "c1"))",
             partition_schema.PartitionDebugString(partitions[6], schema));
 
   EXPECT_EQ(1, partitions[7].hash_buckets()[0]);
@@ -582,26 +572,28 @@ TEST(PartitionTest, TestCreatePartitions) {
   EXPECT_EQ(string("\0\0\0\1" "\0\0\0\0" "a1\0\0b1\0\0c1", 18),
             partitions[7].partition_key_start());
   EXPECT_EQ(string("\0\0\0\1" "\0\0\0\0" "a2\0\0b2\0\0", 16), 
partitions[7].partition_key_end());
-  EXPECT_EQ("hash buckets: (1, 0), range: [(string a=a1, string b=b1, string 
c=c1), "
-                                          "(string a=a2, string b=b2, string 
c=))",
+  EXPECT_EQ("HASH (a) PARTITION 1, HASH (b) PARTITION 0, "
+            R"(RANGE (a, b, c) PARTITION ("a1", "b1", "c1") <= VALUES < ("a2", 
"b2", ""))",
             partition_schema.PartitionDebugString(partitions[7], schema));
 
   EXPECT_EQ(1, partitions[8].hash_buckets()[0]);
   EXPECT_EQ(0, partitions[8].hash_buckets()[1]);
   EXPECT_EQ(string("a2\0\0b2\0\0", 8), partitions[8].range_key_start());
-  EXPECT_EQ(string("", 0), partitions[8].range_key_end());
+  EXPECT_EQ("", partitions[8].range_key_end());
   EXPECT_EQ(string("\0\0\0\1" "\0\0\0\0" "a2\0\0b2\0\0", 16), 
partitions[8].partition_key_start());
   EXPECT_EQ(string("\0\0\0\1" "\0\0\0\1", 8), 
partitions[8].partition_key_end());
-  EXPECT_EQ("hash buckets: (1, 0), range: [(string a=a2, string b=b2, string 
c=), <end>)",
+  EXPECT_EQ("HASH (a) PARTITION 1, HASH (b) PARTITION 0, "
+            R"(RANGE (a, b, c) PARTITION VALUES >= ("a2", "b2", ""))",
             partition_schema.PartitionDebugString(partitions[8], schema));
 
   EXPECT_EQ(1, partitions[9].hash_buckets()[0]);
   EXPECT_EQ(1, partitions[9].hash_buckets()[1]);
-  EXPECT_EQ(string("", 0), partitions[9].range_key_start());
+  EXPECT_EQ("", partitions[9].range_key_start());
   EXPECT_EQ(string("a1\0\0b1\0\0c1", 10), partitions[9].range_key_end());
   EXPECT_EQ(string("\0\0\0\1" "\0\0\0\1", 8), 
partitions[9].partition_key_start());
   EXPECT_EQ(string("\0\0\0\1" "\0\0\0\1" "a1\0\0b1\0\0c1", 18), 
partitions[9].partition_key_end());
-  EXPECT_EQ("hash buckets: (1, 1), range: [<start>, (string a=a1, string b=b1, 
string c=c1))",
+  EXPECT_EQ("HASH (a) PARTITION 1, HASH (b) PARTITION 1, "
+            R"(RANGE (a, b, c) PARTITION VALUES < ("a1", "b1", "c1"))",
             partition_schema.PartitionDebugString(partitions[9], schema));
 
   EXPECT_EQ(1, partitions[10].hash_buckets()[0]);
@@ -611,17 +603,18 @@ TEST(PartitionTest, TestCreatePartitions) {
   EXPECT_EQ(string("\0\0\0\1" "\0\0\0\1" "a1\0\0b1\0\0c1", 18),
             partitions[10].partition_key_start());
   EXPECT_EQ(string("\0\0\0\1" "\0\0\0\1" "a2\0\0b2\0\0", 16), 
partitions[10].partition_key_end());
-  EXPECT_EQ("hash buckets: (1, 1), range: [(string a=a1, string b=b1, string 
c=c1), "
-                                          "(string a=a2, string b=b2, string 
c=))",
+  EXPECT_EQ("HASH (a) PARTITION 1, HASH (b) PARTITION 1, "
+            R"(RANGE (a, b, c) PARTITION ("a1", "b1", "c1") <= VALUES < ("a2", 
"b2", ""))",
             partition_schema.PartitionDebugString(partitions[10], schema));
 
   EXPECT_EQ(1, partitions[11].hash_buckets()[0]);
   EXPECT_EQ(1, partitions[11].hash_buckets()[1]);
   EXPECT_EQ(string("a2\0\0b2\0\0", 8), partitions[11].range_key_start());
-  EXPECT_EQ(string("", 0), partitions[11].range_key_end());
+  EXPECT_EQ("", partitions[11].range_key_end());
   EXPECT_EQ(string("\0\0\0\1" "\0\0\0\1" "a2\0\0b2\0\0", 16), 
partitions[11].partition_key_start());
-  EXPECT_EQ(string("", 0), partitions[11].partition_key_end());
-  EXPECT_EQ("hash buckets: (1, 1), range: [(string a=a2, string b=b2, string 
c=), <end>)",
+  EXPECT_EQ("", partitions[11].partition_key_end());
+  EXPECT_EQ("HASH (a) PARTITION 1, HASH (b) PARTITION 1, "
+            R"(RANGE (a, b, c) PARTITION VALUES >= ("a2", "b2", ""))",
             partition_schema.PartitionDebugString(partitions[11], schema));
 }
 
@@ -714,7 +707,7 @@ TEST(PartitionTest, TestIncrementRangePartitionBounds) {
     ASSERT_OK(lower_bound.SetInt8("c3", 127));
     Status s = 
partition_schema.MakeLowerBoundRangePartitionKeyInclusive(&lower_bound);
     ASSERT_EQ("Invalid argument: Exclusive lower bound range partition key 
must not have "
-              "maximum values for all components: int8 c1=127, int8 c2=127, 
int8 c3=127",
+              "maximum values for all components: (127, 127, 127)",
               s.ToString());
 }
 

http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/common/partition.cc
----------------------------------------------------------------------
diff --git a/src/kudu/common/partition.cc b/src/kudu/common/partition.cc
index b578831..6d4f243 100644
--- a/src/kudu/common/partition.cc
+++ b/src/kudu/common/partition.cc
@@ -30,6 +30,7 @@
 #include "kudu/gutil/strings/join.h"
 #include "kudu/gutil/strings/substitute.h"
 #include "kudu/util/hash_util.h"
+#include "kudu/util/url-coding.h"
 
 using std::pair;
 using std::set;
@@ -126,7 +127,7 @@ Status ExtractColumnIds(const 
RepeatedPtrField<PartitionSchemaPB_ColumnIdentifie
 void SetColumnIdentifiers(const vector<ColumnId>& column_ids,
                           
RepeatedPtrField<PartitionSchemaPB_ColumnIdentifierPB>* identifiers) {
     identifiers->Reserve(column_ids.size());
-    for (ColumnId column_id : column_ids) {
+    for (const ColumnId& column_id : column_ids) {
       identifiers->Add()->set_id(column_id);
     }
 }
@@ -365,7 +366,7 @@ Status PartitionSchema::CreatePartitions(const 
vector<KuduPartialRow>& split_row
   }
 
   unordered_set<int> range_column_idxs;
-  for (ColumnId column_id : range_schema_.column_ids) {
+  for (const ColumnId& column_id : range_schema_.column_ids) {
     int column_idx = schema.find_column_by_id(column_id);
     if (column_idx == Schema::kColumnNotFound) {
       return Status::InvalidArgument(Substitute("range partition column ID $0 "
@@ -534,36 +535,9 @@ Status PartitionSchema::DecodeHashBuckets(Slice* 
encoded_key,
   return Status::OK();
 }
 
-string PartitionSchema::PartitionDebugString(const Partition& partition,
-                                             const Schema& schema) const {
-  string s;
-
-  if (!partition.hash_buckets().empty()) {
-    vector<string> components;
-    for (int32_t bucket : partition.hash_buckets()) {
-      components.push_back(Substitute("$0", bucket));
-    }
-    s.append("hash buckets: (");
-    s.append(JoinStrings(components, ", "));
-    if (!range_schema_.column_ids.empty()) {
-      s.append("), ");
-    } else {
-      s.append(")");
-    }
-  }
-
-  if (!range_schema_.column_ids.empty()) {
-    s.append("range: ");
-    s.append(RangePartitionDebugString(partition.range_key_start().ToString(),
-                                       partition.range_key_end().ToString(),
-                                       schema));
-  }
-  return s;
-}
-
 bool PartitionSchema::IsRangePartitionKeyEmpty(const KuduPartialRow& row) 
const {
   ConstContiguousRow const_row(row.schema(), row.row_data_);
-  for (ColumnId column_id : range_schema_.column_ids) {
+  for (const ColumnId& column_id : range_schema_.column_ids) {
     if (row.IsColumnSet(row.schema()->find_column_by_id(column_id))) return 
false;
   }
   return true;
@@ -573,11 +547,10 @@ void 
PartitionSchema::AppendRangeDebugStringComponentsOrMin(const KuduPartialRow
                                                             vector<string>* 
components) const {
   ConstContiguousRow const_row(row.schema(), row.row_data_);
 
-  for (ColumnId column_id : range_schema_.column_ids) {
-    string column;
+  for (const ColumnId& column_id : range_schema_.column_ids) {
     int32_t column_idx = row.schema()->find_column_by_id(column_id);
     if (column_idx == Schema::kColumnNotFound) {
-      components->push_back("<unknown-column>");
+      components->emplace_back("<unknown-column>");
       continue;
     }
     const ColumnSchema& column_schema = row.schema()->column(column_idx);
@@ -586,86 +559,115 @@ void 
PartitionSchema::AppendRangeDebugStringComponentsOrMin(const KuduPartialRow
       uint8_t min_value[kLargestTypeSize];
       column_schema.type_info()->CopyMinValue(&min_value);
       SimpleConstCell cell(&column_schema, &min_value);
-      column_schema.DebugCellAppend(cell, &column);
+      components->emplace_back(column_schema.Stringify(cell.ptr()));
     } else {
-      column_schema.DebugCellAppend(const_row.cell(column_idx), &column);
+      
components->emplace_back(column_schema.Stringify(const_row.cell_ptr(column_idx)));
     }
+  }
+}
 
-    components->push_back(column);
+namespace {
+// Converts a list of column IDs to a string with the column names seperated by
+// a comma character.
+string ColumnIdsToColumnNames(const Schema& schema,
+                              const vector<ColumnId>& column_ids) {
+  vector<string> names;
+  for (const ColumnId& column_id : column_ids) {
+    names.push_back(schema.column(schema.find_column_by_id(column_id)).name());
   }
+
+  return JoinStrings(names, ", ");
 }
+} // namespace
 
-string PartitionSchema::RowDebugString(const ConstContiguousRow& row) const {
+string PartitionSchema::PartitionDebugString(const Partition& partition,
+                                             const Schema& schema) const {
   vector<string> components;
+  if (partition.hash_buckets_.size() != hash_bucket_schemas_.size()) {
+    return "<hash-partition-error>";
+  }
 
-  for (const HashBucketSchema& hash_bucket_schema : hash_bucket_schemas_) {
-    int32_t bucket;
-    Status s = BucketForRow(row, hash_bucket_schema, &bucket);
-    if (s.ok()) {
-      components.push_back(Substitute("bucket=$0", bucket));
-    } else {
-      components.push_back(Substitute("<bucket-error: $0>", s.ToString()));
-    }
+  for (int i = 0; i < hash_bucket_schemas_.size(); i++) {
+    string s = Substitute("HASH ($0) PARTITION $1",
+                          ColumnIdsToColumnNames(schema, 
hash_bucket_schemas_[i].column_ids),
+                          partition.hash_buckets_[i]);
+    components.emplace_back(std::move(s));
   }
 
-  for (ColumnId column_id : range_schema_.column_ids) {
-    string column;
-    int32_t column_idx = row.schema()->find_column_by_id(column_id);
-    if (column_idx == Schema::kColumnNotFound) {
-      components.push_back("<unknown-column>");
-      break;
-    }
-    row.schema()->column(column_idx).DebugCellAppend(row.cell(column_idx), 
&column);
-    components.push_back(column);
+  if (!range_schema_.column_ids.empty()) {
+    string s = Substitute("RANGE ($0) PARTITION $1",
+                          ColumnIdsToColumnNames(schema, 
range_schema_.column_ids),
+                          
RangePartitionDebugString(partition.range_key_start(),
+                                                    partition.range_key_end(),
+                                                    schema));
+    components.emplace_back(std::move(s));
   }
 
   return JoinStrings(components, ", ");
 }
 
-string PartitionSchema::RowDebugString(const KuduPartialRow& row) const {
+template<typename Row>
+string PartitionSchema::PartitionKeyDebugStringImpl(const Row& row) const {
   vector<string> components;
 
   for (const HashBucketSchema& hash_bucket_schema : hash_bucket_schemas_) {
     int32_t bucket;
     Status s = BucketForRow(row, hash_bucket_schema, &bucket);
     if (s.ok()) {
-      components.push_back(Substitute("bucket=$0", bucket));
+      components.emplace_back(
+          Substitute("HASH ($0): $1",
+                     ColumnIdsToColumnNames(*row.schema(), 
hash_bucket_schema.column_ids),
+                     bucket));
     } else {
-      components.push_back(Substitute("<bucket-error: $0>", s.ToString()));
+      components.emplace_back(Substitute("<hash-error: $0>", s.ToString()));
     }
   }
 
-  AppendRangeDebugStringComponentsOrMin(row, &components);
+  if (!range_schema_.column_ids.empty()) {
+      components.emplace_back(
+          Substitute("RANGE ($0): $1",
+                     ColumnIdsToColumnNames(*row.schema(), 
range_schema_.column_ids),
+                     RangeKeyDebugString(row)));
+  }
 
   return JoinStrings(components, ", ");
 }
+template
+string PartitionSchema::PartitionKeyDebugStringImpl(const KuduPartialRow& row) 
const;
+template
+string PartitionSchema::PartitionKeyDebugStringImpl(const ConstContiguousRow& 
row) const;
+
+string PartitionSchema::PartitionKeyDebugString(const ConstContiguousRow& row) 
const {
+  return PartitionKeyDebugStringImpl(row);
+}
 
-string PartitionSchema::PartitionKeyDebugString(const string& key, const 
Schema& schema) const {
-  Slice encoded_key = key;
+string PartitionSchema::PartitionKeyDebugString(const KuduPartialRow& row) 
const {
+  return PartitionKeyDebugStringImpl(row);
+}
 
+string PartitionSchema::PartitionKeyDebugString(Slice key, const Schema& 
schema) const {
   vector<string> components;
 
-  if (!hash_bucket_schemas_.empty()) {
-    vector<int32_t> buckets;
-    Status s = DecodeHashBuckets(&encoded_key, &buckets);
-    if (!s.ok()) {
-      return Substitute("<hash-decode-error: $0>", s.ToString());
-    }
-    for (int32_t bucket : buckets) {
-      components.push_back(Substitute("bucket=$0", bucket));
-    }
+  size_t hash_components_size = kEncodedBucketSize * 
hash_bucket_schemas_.size();
+  if (key.size() < hash_components_size) {
+    return "<hash-decode-error>";
   }
 
-  if (!range_schema_.column_ids.empty()) {
-    Arena arena(1024, 128 * 1024);
-    KuduPartialRow row(&schema);
-
-    Status s = DecodeRangeKey(&encoded_key, &row, &arena);
-    if (!s.ok()) {
-      return Substitute("<range-decode-error: $0>", s.ToString());
-    }
+  for (const auto& hash_schema : hash_bucket_schemas_) {
+    uint32_t big_endian;
+    memcpy(&big_endian, key.data(), sizeof(uint32_t));
+    key.remove_prefix(sizeof(uint32_t));
+    components.emplace_back(
+        Substitute("HASH ($0): $1",
+                    ColumnIdsToColumnNames(schema, hash_schema.column_ids),
+                    BigEndian::ToHost32(big_endian)));
+  }
 
-    AppendRangeDebugStringComponentsOrMin(row, &components);
+  if (!range_schema_.column_ids.empty()) {
+      components.emplace_back(
+          Substitute("RANGE ($0): $1",
+                     ColumnIdsToColumnNames(schema, range_schema_.column_ids),
+                     RangeKeyDebugString(key, schema)));
   }
 
   return JoinStrings(components, ", ");
@@ -673,140 +675,166 @@ string PartitionSchema::PartitionKeyDebugString(const 
string& key, const Schema&
 
 string PartitionSchema::RangePartitionDebugString(const KuduPartialRow& 
lower_bound,
                                                   const KuduPartialRow& 
upper_bound) const {
-  string out("[");
-  if (IsRangePartitionKeyEmpty(lower_bound)) {
-    out.append("<start>");
-  } else {
-    vector<string> components;
-    AppendRangeDebugStringComponentsOrMin(lower_bound, &components);
-    out.push_back('(');
-    out.append(JoinStrings(components, ", "));
-    out.push_back(')');
-  }
-  out.append(", ");
-  if (IsRangePartitionKeyEmpty(upper_bound)) {
-    out.append("<end>");
-  } else {
-    vector<string> components;
-    AppendRangeDebugStringComponentsOrMin(upper_bound, &components);
-    out.push_back('(');
-    out.append(JoinStrings(components, ", "));
-    out.push_back(')');
+  bool lower_unbounded = IsRangePartitionKeyEmpty(lower_bound);
+  bool upper_unbounded = IsRangePartitionKeyEmpty(upper_bound);
+  if (lower_unbounded && upper_unbounded) {
+    return "UNBOUNDED";
+  }
+  if (lower_unbounded) {
+    return Substitute("VALUES < $0", RangeKeyDebugString(upper_bound));
   }
-  out.push_back(')');
-  return out;
+  if (upper_unbounded) {
+    return Substitute("VALUES >= $0", RangeKeyDebugString(lower_bound));
+  }
+  // TODO(dan): recognize when a simplified 'VALUES =' form can be used (see
+  // org.apache.kudu.client.Partition#formatRangePartition).
+  return Substitute("$0 <= VALUES < $1",
+                    RangeKeyDebugString(lower_bound),
+                    RangeKeyDebugString(upper_bound));
 }
 
-string PartitionSchema::RangePartitionDebugString(const string& lower_bound,
-                                                  const string& upper_bound,
+string PartitionSchema::RangePartitionDebugString(Slice lower_bound,
+                                                  Slice upper_bound,
                                                   const Schema& schema) const {
-  string out("[");
-  if (lower_bound.empty()) {
-    out.append("<start>");
-  } else {
-    out.push_back('(');
-    out.append(RangeKeyDebugString(lower_bound, schema));
-    out.push_back(')');
+  Arena arena(1024, 128 * 1024);
+  KuduPartialRow lower(&schema);
+  KuduPartialRow upper(&schema);
+
+  Status s = DecodeRangeKey(&lower_bound, &lower, &arena);
+  if (!s.ok()) {
+    return Substitute("<range-key-decode-error: $0>", s.ToString());
   }
-  out.append(", ");
-  if (upper_bound.empty()) {
-    out.append("<end>");
-  } else {
-    out.push_back('(');
-    out.append(RangeKeyDebugString(upper_bound, schema));
-    out.push_back(')');
+  s = DecodeRangeKey(&upper_bound, &upper, &arena);
+  if (!s.ok()) {
+    return Substitute("<range-key-decode-error: $0>", s.ToString());
   }
-  out.push_back(')');
-  return out;
+
+  return RangePartitionDebugString(lower, upper);
 }
 
-string PartitionSchema::RangeKeyDebugString(const string& range_key, const 
Schema& schema) const {
+string PartitionSchema::RangeKeyDebugString(Slice range_key, const Schema& 
schema) const {
   Arena arena(1024, 128 * 1024);
   KuduPartialRow row(&schema);
-  vector<string> components;
 
-  Slice encoded_key(range_key);
-  Status s = DecodeRangeKey(&encoded_key, &row, &arena);
+  Status s = DecodeRangeKey(&range_key, &row, &arena);
   if (!s.ok()) {
-    return Substitute("<range-decode-error: $0>", s.ToString());
+    return Substitute("<range-key-decode-error: $0>", s.ToString());
   }
-  AppendRangeDebugStringComponentsOrMin(row, &components);
-  return JoinStrings(components, ", ");
+  return RangeKeyDebugString(row);
 }
 
-namespace {
-// Converts a list of column IDs to a string with the column names seperated by
-// a comma character.
-string ColumnIdsToColumnNames(const Schema& schema,
-                              const vector<ColumnId> column_ids) {
-  vector<string> names;
-  for (ColumnId column_id : column_ids) {
-    names.push_back(schema.column(schema.find_column_by_id(column_id)).name());
+string PartitionSchema::RangeKeyDebugString(const KuduPartialRow& key) const {
+  vector<string> components;
+  AppendRangeDebugStringComponentsOrMin(key, &components);
+  if (components.size() == 1) {
+    // Omit the parentheses if the range partition has a single column.
+    return components.back();
   }
+  return Substitute("($0)", JoinStrings(components, ", "));
+}
 
-  return JoinStrings(names, ", ");
+string PartitionSchema::RangeKeyDebugString(const ConstContiguousRow& key) 
const {
+  vector<string> components;
+
+  for (const ColumnId& column_id : range_schema_.column_ids) {
+    string column;
+    int32_t column_idx = key.schema()->find_column_by_id(column_id);
+    if (column_idx == Schema::kColumnNotFound) {
+      components.push_back("<unknown-column>");
+      break;
+    }
+    key.schema()->column(column_idx).DebugCellAppend(key.cell(column_idx), 
&column);
+    components.push_back(column);
+  }
+
+  if (components.size() == 1) {
+    // Omit the parentheses if the range partition has a single column.
+    return components.back();
+  }
+  return Substitute("($0)", JoinStrings(components, ", "));
 }
-} // namespace
 
-string PartitionSchema::DebugString(const Schema& schema) const {
-  vector<string> component_types;
-
-  if (!hash_bucket_schemas_.empty()) {
-    vector<string> hash_components;
-    for (const HashBucketSchema& hash_bucket_schema : hash_bucket_schemas_) {
-      string component;
-      component.append(Substitute("(bucket count: $0", 
hash_bucket_schema.num_buckets));
-      if (hash_bucket_schema.seed != 0) {
-        component.append(Substitute(", seed: $0", hash_bucket_schema.seed));
-      }
-      component.append(Substitute(", columns: [$0])",
-                                  ColumnIdsToColumnNames(schema, 
hash_bucket_schema.column_ids)));
-      hash_components.push_back(component);
+vector<string> PartitionSchema::DebugStringComponents(const Schema& schema) 
const {
+  vector<string> components;
+
+  for (const auto& hash_bucket_schema : hash_bucket_schemas_) {
+    string s;
+    SubstituteAndAppend(&s, "HASH ($0) PARTITIONS $1",
+                        ColumnIdsToColumnNames(schema, 
hash_bucket_schema.column_ids),
+                        hash_bucket_schema.num_buckets);
+    if (hash_bucket_schema.seed != 0) {
+      SubstituteAndAppend(&s, " SEED $0", hash_bucket_schema.seed);
     }
-    component_types.push_back(Substitute("hash bucket components: [$0]",
-                                         JoinStrings(hash_components, ", ")));
+    components.emplace_back(std::move(s));
   }
 
   if (!range_schema_.column_ids.empty()) {
-    component_types.push_back(Substitute("range columns: [$0]",
-                                         ColumnIdsToColumnNames(schema, 
range_schema_.column_ids)));
+    string s = Substitute("RANGE ($0)", ColumnIdsToColumnNames(schema, 
range_schema_.column_ids));
+    components.emplace_back(std::move(s));
   }
-  return JoinStrings(component_types, ", ");
+
+  return components;
 }
 
-string PartitionSchema::DisplayString(const Schema& schema) const {
-  string display_string;
-
-  if (!hash_bucket_schemas_.empty()) {
-    display_string.append("Hash components:\n");
-    for (const HashBucketSchema& hash_bucket_schema : hash_bucket_schemas_) {
-      display_string.append("  (");
-      vector<string> hash_components;
-      hash_components.reserve(hash_bucket_schema.column_ids.size());
-      for (const ColumnId& col_id : hash_bucket_schema.column_ids) {
-        const ColumnSchema& col = schema.column_by_id(col_id);
-        hash_components.push_back(Substitute("$0 $1", col.name(), 
col.type_info()->name()));
-      }
-      display_string.append(JoinStrings(hash_components, ", "));
-      SubstituteAndAppend(&display_string, ") bucket count: $0", 
hash_bucket_schema.num_buckets);
-      if (hash_bucket_schema.seed != 0) {
-        SubstituteAndAppend(&display_string, " seed: $0", 
hash_bucket_schema.seed);
+string PartitionSchema::DebugString(const Schema& schema) const {
+  return JoinStrings(DebugStringComponents(schema), ", ");
+}
+
+string PartitionSchema::DisplayString(const Schema& schema,
+                                      const vector<string>& range_partitions) 
const {
+  string display_string = JoinStrings(DebugStringComponents(schema), ",\n");
+
+  if (!range_schema_.column_ids.empty()) {
+    display_string.append(" (");
+    if (range_partitions.empty()) {
+      display_string.append(")");
+    } else {
+      bool is_first = true;
+      for (const string& range_partition : range_partitions) {
+        if (is_first) {
+          is_first = false;
+        } else {
+          display_string.push_back(',');
+        }
+        display_string.append("\n    PARTITION ");
+        display_string.append(range_partition);
       }
-      display_string.append("\n");
+      display_string.append("\n)");
     }
   }
+  return display_string;
+}
 
+string PartitionSchema::PartitionTableHeader(const Schema& schema) const {
+  string header;
+  for (const auto& hash_bucket_schema : hash_bucket_schemas_) {
+    SubstituteAndAppend(&header, "<th>HASH ($0) PARTITION</th>",
+                        EscapeForHtmlToString(
+                          ColumnIdsToColumnNames(schema, 
hash_bucket_schema.column_ids)));
+  }
   if (!range_schema_.column_ids.empty()) {
-    display_string.append("Range component:\n");
-    vector<string> range_component;
-    range_component.reserve(range_schema_.column_ids.size());
-    for (const ColumnId& col_id : range_schema_.column_ids) {
-      const ColumnSchema& col = schema.column_by_id(col_id);
-      range_component.push_back(Substitute("$0 $1", col.name(), 
col.type_info()->name()));
-    }
-    SubstituteAndAppend(&display_string, "  ($0)\n", 
JoinStrings(range_component, ", "));
+    SubstituteAndAppend(&header, "<th>RANGE ($0) PARTITION</th>",
+                        EscapeForHtmlToString(
+                          ColumnIdsToColumnNames(schema, 
range_schema_.column_ids)));
   }
-  return display_string;
+  return header;
+}
+
+string PartitionSchema::PartitionTableEntry(const Schema& schema,
+                                            const Partition& partition) const {
+  string entry;
+  for (int32_t bucket : partition.hash_buckets_) {
+    SubstituteAndAppend(&entry, "<td>$0</td>", bucket);
+  }
+
+  if (!range_schema_.column_ids.empty()) {
+    SubstituteAndAppend(&entry, "<td>$0</td>",
+                        EscapeForHtmlToString(
+                          
RangePartitionDebugString(partition.range_key_start(),
+                                                    partition.range_key_end(),
+                                                    schema)));
+  }
+  return entry;
 }
 
 bool PartitionSchema::Equals(const PartitionSchema& other) const {
@@ -913,7 +941,7 @@ Status PartitionSchema::Validate(const Schema& schema) 
const {
       return Status::InvalidArgument("must have at least one hash column");
     }
 
-    for (ColumnId hash_column : hash_schema.column_ids) {
+    for (const ColumnId& hash_column : hash_schema.column_ids) {
       if (!hash_columns.insert(hash_column).second) {
         return Status::InvalidArgument("hash bucket schema components must not 
"
                                        "contain columns in common");
@@ -929,7 +957,7 @@ Status PartitionSchema::Validate(const Schema& schema) 
const {
     }
   }
 
-  for (ColumnId column_id : range_schema_.column_ids) {
+  for (const ColumnId& column_id : range_schema_.column_ids) {
     int32_t column_idx = schema.find_column_by_id(column_id);
     if (column_idx == Schema::kColumnNotFound) {
       return Status::InvalidArgument("must specify existing columns for range "
@@ -1089,7 +1117,7 @@ Status 
PartitionSchema::IncrementRangePartitionKey(KuduPartialRow* row, bool* in
 
 Status 
PartitionSchema::MakeLowerBoundRangePartitionKeyInclusive(KuduPartialRow* row) 
const {
   // To transform a lower bound range partition key from exclusive to 
inclusive,
-  // the key mut be incremented. To increment the key, start with the least
+  // the key must be incremented. To increment the key, start with the least
   // significant column in the key (furthest right), and increment it.  If the
   // increment fails because the value is already the maximum, move on to the
   // next least significant column and attempt to increment it (and so on). 
When
@@ -1119,7 +1147,7 @@ Status 
PartitionSchema::MakeLowerBoundRangePartitionKeyInclusive(KuduPartialRow*
     AppendRangeDebugStringComponentsOrMin(*row, &components);
     return Status::InvalidArgument("Exclusive lower bound range partition key 
must not "
                                    "have maximum values for all components",
-                                   JoinStrings(components, ", "));
+                                   RangeKeyDebugString(*row));
   }
 
   return Status::OK();
@@ -1140,7 +1168,7 @@ Status 
PartitionSchema::MakeUpperBoundRangePartitionKeyExclusive(KuduPartialRow*
   //   an unbounded upper bound (this is a special case increment).
 
   bool all_unset = true;
-  for (ColumnId column_id : range_schema_.column_ids) {
+  for (const ColumnId& column_id : range_schema_.column_ids) {
     int32_t idx = row->schema()->find_column_by_id(column_id);
     if (idx == Schema::kColumnNotFound) {
       return Status::InvalidArgument(Substitute("range partition column ID $0 "
@@ -1156,7 +1184,7 @@ Status 
PartitionSchema::MakeUpperBoundRangePartitionKeyExclusive(KuduPartialRow*
   bool increment;
   RETURN_NOT_OK(IncrementRangePartitionKey(row, &increment));
   if (!increment) {
-    for (ColumnId column_id : range_schema_.column_ids) {
+    for (const ColumnId& column_id : range_schema_.column_ids) {
       int32_t idx = row->schema()->find_column_by_id(column_id);
       RETURN_NOT_OK(row->Unset(idx));
     }

http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/common/partition.h
----------------------------------------------------------------------
diff --git a/src/kudu/common/partition.h b/src/kudu/common/partition.h
index bd88978..60e15e8 100644
--- a/src/kudu/common/partition.h
+++ b/src/kudu/common/partition.h
@@ -139,9 +139,6 @@ class PartitionSchema {
   // Appends the row's encoded partition key into the provided buffer.
   // On failure, the buffer may have data partially appended.
   Status EncodeKey(const KuduPartialRow& row, std::string* buf) const 
WARN_UNUSED_RESULT;
-
-  // Appends the row's encoded partition key into the provided buffer.
-  // On failure, the buffer may have data partially appended.
   Status EncodeKey(const ConstContiguousRow& row, std::string* buf) const 
WARN_UNUSED_RESULT;
 
   // Creates the set of table partitions for a partition schema and collection
@@ -163,8 +160,6 @@ class PartitionSchema {
   Status PartitionContainsRow(const Partition& partition,
                               const KuduPartialRow& row,
                               bool* contains) const WARN_UNUSED_RESULT;
-
-  // Tests if the partition contains the row.
   Status PartitionContainsRow(const Partition& partition,
                               const ConstContiguousRow& row,
                               bool* contains) const WARN_UNUSED_RESULT;
@@ -172,35 +167,40 @@ class PartitionSchema {
   // Returns a text description of the partition suitable for debug printing.
   std::string PartitionDebugString(const Partition& partition, const Schema& 
schema) const;
 
-  // Returns a text description of the partial row's partition key suitable 
for debug printing.
-  std::string RowDebugString(const KuduPartialRow& row) const;
-
-  // Returns a text description of the row's partition key suitable for debug 
printing.
-  std::string RowDebugString(const ConstContiguousRow& row) const;
-
-  // Returns a text description of the encoded partition key suitable for 
debug printing.
-  std::string PartitionKeyDebugString(const std::string& key, const Schema& 
schema) const;
+  // Returns a text description of a partition key suitable for debug printing.
+  std::string PartitionKeyDebugString(Slice key, const Schema& schema) const;
+  std::string PartitionKeyDebugString(const KuduPartialRow& row) const;
+  std::string PartitionKeyDebugString(const ConstContiguousRow& row) const;
 
   // Returns a text description of the range partition with the provided
   // inclusive lower bound and exclusive upper bound.
   std::string RangePartitionDebugString(const KuduPartialRow& lower_bound,
                                         const KuduPartialRow& upper_bound) 
const;
-
-  // Returns a text description of the range partition with the provided
-  // inclusive lower bound and exclusive upper bound.
-  std::string RangePartitionDebugString(const std::string& lower_bound,
-                                        const std::string& upper_bound,
+  std::string RangePartitionDebugString(Slice lower_bound,
+                                        Slice upper_bound,
                                         const Schema& schema) const;
 
   // Returns a text description of the encoded range key suitable for debug 
printing.
-  std::string RangeKeyDebugString(const std::string& range_key, const Schema& 
schema) const;
+  std::string RangeKeyDebugString(Slice range_key, const Schema& schema) const;
+  std::string RangeKeyDebugString(const KuduPartialRow& key) const;
+  std::string RangeKeyDebugString(const ConstContiguousRow& key) const;
 
   // Returns a text description of this partition schema suitable for debug 
printing.
   std::string DebugString(const Schema& schema) const;
 
   // Returns a text description of this partition schema suitable for display 
in the web UI.
   // The format of this string is not guaranteed to be identical cross-version.
-  std::string DisplayString(const Schema& schema) const;
+  //
+  // 'range_partitions' should include the set of range partitions in the 
table,
+  // as formatted by 'RangePartitionDebugString'.
+  std::string DisplayString(const Schema& schema,
+                            const std::vector<std::string>& range_partitions) 
const;
+
+  // Returns header and entry HTML cells for the partition schema for the 
master
+  // table web UI. This is an abstraction leak, but it's better than leaking 
the
+  // internals of partitions to the master path handlers.
+  std::string PartitionTableHeader(const Schema& schema) const;
+  std::string PartitionTableEntry(const Schema& schema, const Partition& 
partition) const;
 
   // Returns true if the other partition schema is equivalent to this one.
   bool Equals(const PartitionSchema& other) const;
@@ -251,6 +251,10 @@ class PartitionSchema {
                              const HashBucketSchema& hash_bucket_schema,
                              int32_t* bucket);
 
+  // PartitionKeyDebugString implementation for row types.
+  template<typename Row>
+  std::string PartitionKeyDebugStringImpl(const Row& row) const;
+
   // Private templated helper for PartitionContainsRow.
   template<typename Row>
   Status PartitionContainsRowImpl(const Partition& partition,
@@ -273,6 +277,10 @@ class PartitionSchema {
   void AppendRangeDebugStringComponentsOrMin(const KuduPartialRow& row,
                                              std::vector<std::string>* 
components) const;
 
+  /// Returns the stringified hash and range schema componenets of the 
partition
+  /// schema.
+  std::vector<std::string> DebugStringComponents(const Schema& schema) const;
+
   // Encode the provided row into a range key. The row must not include values
   // for any columns not in the range key. Missing range values will be filled
   // with the logical minimum value for the column. A row without any values

http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/common/row_changelist-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/common/row_changelist-test.cc 
b/src/kudu/common/row_changelist-test.cc
index 1394f9e..9523322 100644
--- a/src/kudu/common/row_changelist-test.cc
+++ b/src/kudu/common/row_changelist-test.cc
@@ -73,7 +73,7 @@ TEST_F(TestRowChangeList, TestEncodeDecodeUpdates) {
   LOG(INFO) << "Encoded: " << HexDump(buf);
 
   // Read it back.
-  EXPECT_EQ(string("SET col1=update1, col2=update2, col3=12345, col4=NULL"),
+  EXPECT_EQ(R"(SET col1="update1", col2="update2", col3=12345, col4=NULL)",
             RowChangeList(Slice(buf)).ToString(schema_));
 
   RowChangeListDecoder decoder((RowChangeList(buf)));
@@ -149,7 +149,7 @@ TEST_F(TestRowChangeList, TestReinserts) {
   // Read it back.
   // Note that col1 (hello) is not present in the output string as it's part 
of the primary
   // key which not encoded in the REINSERT mutation.
-  EXPECT_EQ(string("REINSERT col2=world, col3=12345, col4=NULL"),
+  EXPECT_EQ(string(R"(REINSERT col2="world", col3=12345, col4=NULL)"),
             RowChangeList(Slice(buf)).ToString(schema_));
 
   RowChangeListDecoder reinsert_1_dec((RowChangeList(buf)));
@@ -173,11 +173,11 @@ TEST_F(TestRowChangeList, TestReinserts) {
                                                        &reinsert_2_enc));
     // The row should now match reinsert 1
     ASSERT_STR_CONTAINS(schema_.DebugRow(dst_row),
-    "(string col1=hello, string col2=world, uint32 col3=12345, uint32 
col4=NULL)");
+        R"((string col1="hello", string col2="world", uint32 col3=12345, 
uint32 col4=NULL))");
   }
 
   // And reinsert 2 should contain the original state of the row.
-  EXPECT_EQ(string("REINSERT col2=mundo, col3=54321, col4=1"),
+  EXPECT_EQ(R"(REINSERT col2="mundo", col3=54321, col4=1)",
             RowChangeList(Slice(buf2)).ToString(schema_));
 }
 

http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/common/row_operations-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/common/row_operations-test.cc 
b/src/kudu/common/row_operations-test.cc
index c3fd37a..4b957db 100644
--- a/src/kudu/common/row_operations-test.cc
+++ b/src/kudu/common/row_operations-test.cc
@@ -384,7 +384,7 @@ TEST_F(RowOperationsTest, 
ProjectionTestWholeSchemaSpecified) {
     CHECK_OK(client_row.SetInt32("key", 12345));
     CHECK_OK(client_row.SetInt32("int_val", 54321));
     CHECK_OK(client_row.SetStringCopy("string_val", "hello world"));
-    EXPECT_EQ("INSERT (int32 key=12345, int32 int_val=54321, string 
string_val=hello world)",
+    EXPECT_EQ(R"(INSERT (int32 key=12345, int32 int_val=54321, string 
string_val="hello world"))",
               TestProjection(RowOperationsPB::INSERT, client_row, schema_));
 
     // The first result should have the field specified.
@@ -531,7 +531,7 @@ TEST_F(RowOperationsTest, TestProjectUpdates) {
 
   // Specify the key and update both columns
   ASSERT_OK(client_row.SetStringNoCopy("string_val", "foo"));
-  EXPECT_EQ("MUTATE (int32 key=12345) SET int_val=12345, string_val=foo",
+  EXPECT_EQ(R"(MUTATE (int32 key=12345) SET int_val=12345, string_val="foo")",
             TestProjection(RowOperationsPB::UPDATE, client_row, 
server_schema));
 
   // Update the nullable column to null.

http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/common/scan_spec-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/common/scan_spec-test.cc 
b/src/kudu/common/scan_spec-test.cc
index 10ac42f..795c49f 100644
--- a/src/kudu/common/scan_spec-test.cc
+++ b/src/kudu/common/scan_spec-test.cc
@@ -668,8 +668,8 @@ TEST_F(CompositeIntStringKeysTest, TestPrefixEquality) {
   SCOPED_TRACE(spec.ToString(schema_));
   spec.OptimizeScan(schema_, &arena_, &pool_, true);
   // Expect: key >= (64, "", "") AND key < (65, "", "")
-  EXPECT_EQ("PK >= (int8 a=64, string b=, string c=) AND "
-            "PK < (int8 a=65, string b=, string c=)",
+  EXPECT_EQ(R"(PK >= (int8 a=64, string b="", string c="") AND )"
+            R"(PK < (int8 a=65, string b="", string c=""))",
             spec.ToString(schema_));
 }
 
@@ -680,8 +680,8 @@ TEST_F(CompositeIntStringKeysTest, 
TestPrefixEqualityWithString) {
   AddPredicate<Slice>(&spec, "b", EQ, Slice("abc"));
   SCOPED_TRACE(spec.ToString(schema_));
   spec.OptimizeScan(schema_, &arena_, &pool_, true);
-  EXPECT_EQ("PK >= (int8 a=64, string b=abc, string c=) AND "
-            "PK < (int8 a=64, string b=abc\\000, string c=)",
+  EXPECT_EQ(R"(PK >= (int8 a=64, string b="abc", string c="") AND )"
+            R"(PK < (int8 a=64, string b="abc\000", string c=""))",
             spec.ToString(schema_));
 }
 

http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/common/schema-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/common/schema-test.cc b/src/kudu/common/schema-test.cc
index 2464c4c..0f59764 100644
--- a/src/kudu/common/schema-test.cc
+++ b/src/kudu/common/schema-test.cc
@@ -265,7 +265,7 @@ TEST(TestSchema, TestRowOperations) {
   ASSERT_GT(schema.Compare(row_b, row_a), 0);
   ASSERT_LT(schema.Compare(row_a, row_b), 0);
 
-  ASSERT_EQ(string("(string col1=row_a_1, string col2=row_a_2, uint32 col3=3, 
int32 col4=-3)"),
+  ASSERT_EQ(R"((string col1="row_a_1", string col2="row_a_2", uint32 col3=3, 
int32 col4=-3))",
             schema.DebugRow(row_a));
 }
 
@@ -312,11 +312,11 @@ TEST(TestSchema, TestDecodeKeys_CompoundStringKey) {
                   ColumnSchema("col3", STRING) },
                 2);
 
-  EXPECT_EQ("(string col1=foo, string col2=bar)",
+  EXPECT_EQ(R"((string col1="foo", string col2="bar"))",
             schema.DebugEncodedRowKey(Slice("foo\0\0bar", 8), 
Schema::START_KEY));
-  EXPECT_EQ("(string col1=fo\\000o, string col2=bar)",
+  EXPECT_EQ(R"((string col1="fo\000o", string col2="bar"))",
             schema.DebugEncodedRowKey(Slice("fo\x00\x01o\0\0""bar", 10), 
Schema::START_KEY));
-  EXPECT_EQ("(string col1=fo\\000o, string col2=bar\\000xy)",
+  EXPECT_EQ(R"((string col1="fo\000o", string col2="bar\000xy"))",
             schema.DebugEncodedRowKey(Slice("fo\x00\x01o\0\0""bar\0xy", 13), 
Schema::START_KEY));
 
   EXPECT_EQ("<start of table>",

http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/common/types.h
----------------------------------------------------------------------
diff --git a/src/kudu/common/types.h b/src/kudu/common/types.h
index 71df079..2e8b40b 100644
--- a/src/kudu/common/types.h
+++ b/src/kudu/common/types.h
@@ -371,7 +371,9 @@ struct DataTypeTraits<BINARY> {
   }
   static void AppendDebugStringForValue(const void *val, string *str) {
     const Slice *s = reinterpret_cast<const Slice *>(val);
+    str->push_back('"');
     str->append(strings::CHexEscape(s->ToString()));
+    str->push_back('"');
   }
   static int Compare(const void *lhs, const void *rhs) {
     const Slice *lhs_slice = reinterpret_cast<const Slice *>(lhs);
@@ -461,7 +463,9 @@ struct DataTypeTraits<STRING> : public 
DerivedTypeTraits<BINARY>{
   }
   static void AppendDebugStringForValue(const void *val, string *str) {
     const Slice *s = reinterpret_cast<const Slice *>(val);
+    str->push_back('"');
     str->append(strings::Utf8SafeCEscape(s->ToString()));
+    str->push_back('"');
   }
 };
 

http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/master/master-path-handlers.cc
----------------------------------------------------------------------
diff --git a/src/kudu/master/master-path-handlers.cc 
b/src/kudu/master/master-path-handlers.cc
index f85e5c8..f0ce997 100644
--- a/src/kudu/master/master-path-handlers.cc
+++ b/src/kudu/master/master-path-handlers.cc
@@ -240,12 +240,13 @@ void MasterPathHandlers::HandleTablePage(const 
Webserver::WebRequest& req,
 
   // Prepare the tablets table first because the tablet partition information 
is
   // also used to make the range bounds.
-  std::set<std::pair<string, string>> range_bounds;
+  std::vector<string> range_partitions;
   std::ostringstream tablets_output;
   tablets_output << "<h3>Tablets</h3>";
   tablets_output << "<table class='table table-striped'>\n";
-  tablets_output << "  <tr><th>Tablet ID</th><th>Partition</th><th>State</th>"
-      "<th>Message</th><th>Peers</th></tr>\n";
+  tablets_output << "  <tr><th>Tablet ID</th>"
+                 << partition_schema.PartitionTableHeader(schema)
+                 << "<th>State</th><th>Message</th><th>Peers</th></tr>\n";
   for (const scoped_refptr<TabletInfo>& tablet : tablets) {
     vector<pair<string, RaftPeerPB::Role>> sorted_replicas;
     TabletMetadataLock l(tablet.get(), TabletMetadataLock::READ);
@@ -282,15 +283,24 @@ void MasterPathHandlers::HandleTablePage(const 
Webserver::WebRequest& req,
 
     Partition partition;
     Partition::FromPB(l.data().pb.partition(), &partition);
-    range_bounds.insert({partition.range_key_start().ToString(),
-                         partition.range_key_end().ToString()});
+
+    // For each unique range partition, add a debug string to range_partitions.
+    // To ensure uniqueness, only use partitions whose hash buckets are all 0.
+    if (std::all_of(partition.hash_buckets().begin(),
+                    partition.hash_buckets().end(),
+                    [] (const int32_t& bucket) { return bucket == 0; })) {
+        range_partitions.emplace_back(
+            
partition_schema.RangePartitionDebugString(partition.range_key_start(),
+                                                       
partition.range_key_end(),
+                                                       schema));
+    }
 
     string state = SysTabletsEntryPB_State_Name(l.data().pb.state());
     Capitalize(&state);
     tablets_output << Substitute(
-        "<tr><th>$0</th><td>$1</td><td>$2</td><td>$3</td><td>$4</td></tr>\n",
+        "<tr><th>$0</th>$1<td>$2</td><td>$3</td><td>$4</td></tr>\n",
         tablet->tablet_id(),
-        EscapeForHtmlToString(partition_schema.PartitionDebugString(partition, 
schema)),
+        partition_schema.PartitionTableEntry(schema, partition),
         state,
         EscapeForHtmlToString(l.data().pb.state_msg()),
         raft_config_html.str());
@@ -298,16 +308,9 @@ void MasterPathHandlers::HandleTablePage(const 
Webserver::WebRequest& req,
   tablets_output << "</table>\n";
 
   // Write out the partition schema and range bound information...
-  *output << "<h3>Partition schema &amp; range bounds</h3>";
+  *output << "<h3>Partition Schema</h3>";
   *output << "<pre>";
-  *output << EscapeForHtmlToString(partition_schema.DisplayString(schema));
-  *output << "Range bounds:\n";
-  for (const auto& pair : range_bounds) {
-    string range_bound = partition_schema.RangePartitionDebugString(pair.first,
-                                                                    
pair.second,
-                                                                    schema);
-    *output << Substitute("  $0\n", EscapeForHtmlToString(range_bound));
-  }
+  *output << EscapeForHtmlToString(partition_schema.DisplayString(schema, 
range_partitions));
   *output << "</pre>";
 
   // ...then the tablets table.

http://git-wip-us.apache.org/repos/asf/kudu/blob/f165ef7d/src/kudu/tablet/compaction-test.cc
----------------------------------------------------------------------
diff --git a/src/kudu/tablet/compaction-test.cc 
b/src/kudu/tablet/compaction-test.cc
index c90caa2..8f64b49 100644
--- a/src/kudu/tablet/compaction-test.cc
+++ b/src/kudu/tablet/compaction-test.cc
@@ -460,11 +460,11 @@ TEST_F(TestCompaction, TestMemRowSetInput) {
   gscoped_ptr<CompactionInput> input(CompactionInput::Create(*mrs, &schema_, 
snap));
   IterateInput(input.get(), &out);
   ASSERT_EQ(10, out.size());
-  EXPECT_EQ("RowIdxInBlock: 0; Base: (string key=hello 00000000, int32 val=0, "
+  EXPECT_EQ(R"(RowIdxInBlock: 0; Base: (string key="hello 00000000", int32 
val=0, )"
                 "int32 nullable_val=0); Undo Mutations: [@1(DELETE)]; Redo 
Mutations: "
                 "[@11(SET val=1, nullable_val=1), @21(SET val=2, 
nullable_val=NULL)];",
             out[0]);
-  EXPECT_EQ("RowIdxInBlock: 9; Base: (string key=hello 00000090, int32 val=9, "
+  EXPECT_EQ(R"(RowIdxInBlock: 9; Base: (string key="hello 00000090", int32 
val=9, )"
                 "int32 nullable_val=NULL); Undo Mutations: [@10(DELETE)]; Redo 
Mutations: "
                 "[@20(SET val=1, nullable_val=1), @30(SET val=2, 
nullable_val=NULL)];",
             out[9]);
@@ -485,16 +485,16 @@ TEST_F(TestCompaction, TestFlushMRSWithRolling) {
   vector<string> rows;
   rows.reserve(30000 / 2);
   rowsets[0]->DebugDump(&rows);
-  EXPECT_EQ("RowIdxInBlock: 0; Base: (string key=hello 00000000, int32 val=0, "
+  EXPECT_EQ(R"(RowIdxInBlock: 0; Base: (string key="hello 00000000", int32 
val=0, )"
                 "int32 nullable_val=0); Undo Mutations: [@1(DELETE)]; Redo 
Mutations: [];",
             rows[0]);
 
   rows.clear();
   rowsets[1]->DebugDump(&rows);
-  EXPECT_EQ("RowIdxInBlock: 0; Base: (string key=hello 00154700, int32 
val=15470, "
+  EXPECT_EQ(R"(RowIdxInBlock: 0; Base: (string key="hello 00154700", int32 
val=15470, )"
                 "int32 nullable_val=15470); Undo Mutations: [@15471(DELETE)]; 
Redo Mutations: [];",
             rows[0]);
-  EXPECT_EQ("RowIdxInBlock: 1; Base: (string key=hello 00154710, int32 
val=15471, "
+  EXPECT_EQ(R"(RowIdxInBlock: 1; Base: (string key="hello 00154710", int32 
val=15471, )"
                 "int32 nullable_val=NULL); Undo Mutations: [@15472(DELETE)]; 
Redo Mutations: [];",
             rows[1]);
 }
@@ -525,12 +525,12 @@ TEST_F(TestCompaction, TestRowSetInput) {
   ASSERT_OK(CompactionInput::Create(*rs, &schema_, MvccSnapshot(mvcc_), 
&input));
   IterateInput(input.get(), &out);
   ASSERT_EQ(10, out.size());
-  EXPECT_EQ("RowIdxInBlock: 0; Base: (string key=hello 00000000, int32 val=0, "
+  EXPECT_EQ(R"(RowIdxInBlock: 0; Base: (string key="hello 00000000", int32 
val=0, )"
                 "int32 nullable_val=0); Undo Mutations: [@1(DELETE)]; Redo 
Mutations: "
                 "[@11(SET val=1, nullable_val=1), @21(SET val=2, 
nullable_val=NULL), "
                 "@31(SET val=3, nullable_val=3), @41(SET val=4, 
nullable_val=NULL)];",
             out[0]);
-  EXPECT_EQ("RowIdxInBlock: 9; Base: (string key=hello 00000090, int32 val=9, "
+  EXPECT_EQ(R"(RowIdxInBlock: 9; Base: (string key="hello 00000090", int32 
val=9, )"
                 "int32 nullable_val=NULL); Undo Mutations: [@10(DELETE)]; Redo 
Mutations: "
                 "[@20(SET val=1, nullable_val=1), @30(SET val=2, 
nullable_val=NULL), "
                 "@40(SET val=3, nullable_val=3), @50(SET val=4, 
nullable_val=NULL)];",
@@ -598,12 +598,12 @@ TEST_F(TestCompaction, TestDuplicatedGhostRowsMerging) {
   vector<string> out;
   IterateInput(input.get(), &out);
   ASSERT_EQ(out.size(), 10);
-  EXPECT_EQ("RowIdxInBlock: 0; Base: (string key=hello 00000000, int32 val=2, "
+  EXPECT_EQ(R"(RowIdxInBlock: 0; Base: (string key="hello 00000000", int32 
val=2, )"
                 "int32 nullable_val=NULL); Undo Mutations: [@61(SET val=0, 
nullable_val=0), "
                 "@51(DELETE), @41(REINSERT val=1, nullable_val=1), @31(SET 
val=0, nullable_val=0), "
                 "@21(DELETE), @11(REINSERT val=0, nullable_val=0), 
@1(DELETE)]; "
                 "Redo Mutations: [];", out[0]);
-  EXPECT_EQ("RowIdxInBlock: 9; Base: (string key=hello 00000090, int32 val=2, "
+  EXPECT_EQ(R"(RowIdxInBlock: 9; Base: (string key="hello 00000090", int32 
val=2, )"
                 "int32 nullable_val=NULL); Undo Mutations: [@70(SET val=9, 
nullable_val=NULL), "
                 "@60(DELETE), @50(REINSERT val=1, nullable_val=1), @40(SET 
val=9, "
                 "nullable_val=NULL), @30(DELETE), @20(REINSERT val=9, 
nullable_val=NULL), "
@@ -840,10 +840,10 @@ TEST_F(TestCompaction, 
TestMRSCompactionDoesntOutputUnobservableRows) {
   vector<string> out;
   IterateInput(merged.get(), &out);
   EXPECT_EQ(out.size(), 2);
-  EXPECT_EQ("RowIdxInBlock: 0; Base: (string key=hello 00000001, int32 val=1, "
+  EXPECT_EQ(R"(RowIdxInBlock: 0; Base: (string key="hello 00000001", int32 
val=1, )"
                 "int32 nullable_val=NULL); Undo Mutations: [@1(DELETE)]; Redo 
Mutations: "
                 "[@2(DELETE)];", out[0]);
-  EXPECT_EQ("RowIdxInBlock: 0; Base: (string key=hello 00000002, int32 val=0, "
+  EXPECT_EQ(R"(RowIdxInBlock: 0; Base: (string key="hello 00000002", int32 
val=0, )"
                 "int32 nullable_val=0); Undo Mutations: [@2(DELETE)]; Redo 
Mutations: [];", out[1]);
 }
 
@@ -884,7 +884,7 @@ TEST_F(TestCompaction, TestOneToOne) {
   ASSERT_OK(CompactionInput::Create(*rs, &schema_, MvccSnapshot(mvcc_), 
&input));
   IterateInput(input.get(), &out);
   ASSERT_EQ(1000, out.size());
-  EXPECT_EQ("RowIdxInBlock: 0; Base: (string key=hello 00000000, int32 val=1, "
+  EXPECT_EQ(R"(RowIdxInBlock: 0; Base: (string key="hello 00000000", int32 
val=1, )"
                 "int32 nullable_val=1); Undo Mutations: [@1001(SET val=0, 
nullable_val=0), "
                 "@1(DELETE)]; Redo Mutations: [@2001(SET val=2, 
nullable_val=NULL), "
                 "@3001(SET val=3, nullable_val=3)];", out[0]);
@@ -991,10 +991,10 @@ TEST_F(TestCompaction, TestMergeMRS) {
   vector<string> out;
   IterateInput(input.get(), &out);
   ASSERT_EQ(out.size(), 20);
-  EXPECT_EQ("RowIdxInBlock: 0; Base: (string key=hello 00000000, int32 val=0, "
+  EXPECT_EQ(R"(RowIdxInBlock: 0; Base: (string key="hello 00000000", int32 
val=0, )"
                 "int32 nullable_val=0); Undo Mutations: [@1(DELETE)]; "
                 "Redo Mutations: [];", out[0]);
-  EXPECT_EQ("RowIdxInBlock: 9; Base: (string key=hello 00000091, int32 val=9, "
+  EXPECT_EQ(R"(RowIdxInBlock: 9; Base: (string key="hello 00000091", int32 
val=9, )"
                 "int32 nullable_val=NULL); Undo Mutations: [@20(DELETE)]; "
                 "Redo Mutations: [];", out[19]);
 }

Reply via email to