Repository: spark
Updated Branches:
refs/heads/branch-2.0 1259a6fa8 -> b75542603
[SPARK-15952][SQL] fix "show databases" ordering issue
## What changes were proposed in this pull request?
Two issues I've found for "show databases" command:
1. The returned database name list was not sorted, it only works when "like"
was used together; (HIVE will always return a sorted list)
2. When it is used as sql("show databases").show, it will output a table with
column named as "result", but for sql("show tables").show, it will output the
column name as "tableName", so I think we should be consistent and use
"databaseName" at least.
## How was this patch tested?
Updated existing test case to test its ordering as well.
Author: bomeng <[email protected]>
Closes #13671 from bomeng/SPARK-15952.
(cherry picked from commit 42a28caf1001244d617b9256de196129348f2fef)
Signed-off-by: Reynold Xin <[email protected]>
Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/b7554260
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/b7554260
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/b7554260
Branch: refs/heads/branch-2.0
Commit: b75542603714227faa952442daa5d52d46a56d3c
Parents: 1259a6f
Author: bomeng <[email protected]>
Authored: Tue Jun 14 18:35:29 2016 -0700
Committer: Reynold Xin <[email protected]>
Committed: Tue Jun 14 18:35:39 2016 -0700
----------------------------------------------------------------------
.../apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala | 2 +-
.../org/apache/spark/sql/execution/command/databases.scala | 4 ++--
.../org/apache/spark/sql/execution/command/DDLSuite.scala | 6 +++---
3 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/spark/blob/b7554260/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala
----------------------------------------------------------------------
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala
index 6052579..14da30a 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala
@@ -178,7 +178,7 @@ class InMemoryCatalog(hadoopConfig: Configuration = new
Configuration) extends E
}
override def listDatabases(): Seq[String] = synchronized {
- catalog.keySet.toSeq
+ catalog.keySet.toSeq.sorted
}
override def listDatabases(pattern: String): Seq[String] = synchronized {
http://git-wip-us.apache.org/repos/asf/spark/blob/b7554260/sql/core/src/main/scala/org/apache/spark/sql/execution/command/databases.scala
----------------------------------------------------------------------
diff --git
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/databases.scala
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/databases.scala
index cefe0f6..597ec27 100644
---
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/databases.scala
+++
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/databases.scala
@@ -33,9 +33,9 @@ import org.apache.spark.sql.types.StringType
*/
case class ShowDatabasesCommand(databasePattern: Option[String]) extends
RunnableCommand {
- // The result of SHOW DATABASES has one column called 'result'
+ // The result of SHOW DATABASES has one column called 'databaseName'
override val output: Seq[Attribute] = {
- AttributeReference("result", StringType, nullable = false)() :: Nil
+ AttributeReference("databaseName", StringType, nullable = false)() :: Nil
}
override def run(sparkSession: SparkSession): Seq[Row] = {
http://git-wip-us.apache.org/repos/asf/spark/blob/b7554260/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
----------------------------------------------------------------------
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
index a7e6893..e15fcf4 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
@@ -708,11 +708,11 @@ class DDLSuite extends QueryTest with SharedSQLContext
with BeforeAndAfterEach {
}
test("show databases") {
- sql("CREATE DATABASE showdb1A")
sql("CREATE DATABASE showdb2B")
+ sql("CREATE DATABASE showdb1A")
- assert(
- sql("SHOW DATABASES").count() >= 2)
+ // check the result as well as its order
+ checkDataset(sql("SHOW DATABASES"), Row("default"), Row("showdb1a"),
Row("showdb2b"))
checkAnswer(
sql("SHOW DATABASES LIKE '*db1A'"),
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]