MaxGekk commented on code in PR #37588:
URL: https://github.com/apache/spark/pull/37588#discussion_r1108213058
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablesExec.scala:
##########
@@ -33,14 +44,37 @@ case class ShowTablesExec(
output: Seq[Attribute],
catalog: TableCatalog,
namespace: Seq[String],
- pattern: Option[String]) extends V2CommandExec with LeafExecNode {
+ pattern: Option[String],
+ isExtended: Boolean = false,
+ partitionSpec: Option[TablePartitionSpec] = None) extends V2CommandExec
with LeafExecNode {
Review Comment:
`TablePartitionSpec` is legacy one, can't you use `ResolvedPartitionSpec`?
For example, see
https://github.com/apache/spark/blob/0494dc90af48ce7da0625485a4dc6917a244d580/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowPartitionsExec.scala#L36
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablesExec.scala:
##########
@@ -53,4 +87,113 @@ case class ShowTablesExec(
case _ => false
}
}
+
+ private def extendedTable(identifier: Identifier, table: Table): String = {
+ val results = new mutable.LinkedHashMap[String, String]()
+
+ if (!identifier.namespace().isEmpty) {
+ results.put("Namespace", identifier.namespace().quoted)
+ }
+ results.put("Table", identifier.name())
+ val tableType = if
(table.properties().containsKey(TableCatalog.PROP_EXTERNAL)) {
+ CatalogTableType.EXTERNAL
+ } else {
+ CatalogTableType.MANAGED
+ }
+ results.put("Type", tableType.name)
+
+ CatalogV2Util.TABLE_RESERVED_PROPERTIES
+ .filterNot(_ == TableCatalog.PROP_EXTERNAL)
+ .foreach(propKey => {
+ if (table.properties.containsKey(propKey)) {
+ results.put(propKey.capitalize, table.properties.get(propKey))
+ }
+ })
+
+ val properties =
+ conf.redactOptions(table.properties.asScala.toMap).toList
+ .filter(kv => !CatalogV2Util.TABLE_RESERVED_PROPERTIES.contains(kv._1))
+ .sortBy(_._1).map {
+ case (key, value) => key + "=" + value
+ }.mkString("[", ",", "]")
+ if (table.properties().isEmpty) {
+ results.put("Table Properties", properties.mkString("[", ", ", "]"))
+ }
+
+ // Partition Provider & Partition Columns
+ // TODO check
+ if (table.isPartitionable &&
!table.asPartitionable.partitionSchema().isEmpty) {
+ results.put("Partition Provider", "Catalog")
+ results.put("Partition Columns",
table.asPartitionable.partitionSchema().map(
+ field => quoteIdentifier(field.name)).mkString(", "))
+ }
+
+ if (table.schema().nonEmpty) results.put("Schema",
table.schema().treeString)
+
+ results.map { case ((key, value)) =>
+ if (value.isEmpty) key else s"$key: $value"
+ }.mkString("", "\n", "")
+ }
+
+ private def extendedPartition(
+ identifier: Identifier,
+ partitionTable: SupportsPartitionManagement,
+ partitionSpec: Option[TablePartitionSpec]): String = {
+ val results = new mutable.LinkedHashMap[String, String]()
+
+ // "Partition Values"
+ val partitionSchema = partitionTable.partitionSchema()
+ val normalizedSpec = normalizePartitionSpec(
Review Comment:
This one is not needed. The job should be done by
`ResolvePartitionSpec.resolvePartitionSpec`, or there is some reason to bypass
it?
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablesExec.scala:
##########
@@ -53,4 +87,113 @@ case class ShowTablesExec(
case _ => false
}
}
+
+ private def extendedTable(identifier: Identifier, table: Table): String = {
+ val results = new mutable.LinkedHashMap[String, String]()
+
+ if (!identifier.namespace().isEmpty) {
+ results.put("Namespace", identifier.namespace().quoted)
+ }
+ results.put("Table", identifier.name())
+ val tableType = if
(table.properties().containsKey(TableCatalog.PROP_EXTERNAL)) {
+ CatalogTableType.EXTERNAL
+ } else {
+ CatalogTableType.MANAGED
+ }
+ results.put("Type", tableType.name)
+
+ CatalogV2Util.TABLE_RESERVED_PROPERTIES
+ .filterNot(_ == TableCatalog.PROP_EXTERNAL)
+ .foreach(propKey => {
+ if (table.properties.containsKey(propKey)) {
+ results.put(propKey.capitalize, table.properties.get(propKey))
+ }
+ })
+
+ val properties =
+ conf.redactOptions(table.properties.asScala.toMap).toList
+ .filter(kv => !CatalogV2Util.TABLE_RESERVED_PROPERTIES.contains(kv._1))
+ .sortBy(_._1).map {
+ case (key, value) => key + "=" + value
+ }.mkString("[", ",", "]")
+ if (table.properties().isEmpty) {
+ results.put("Table Properties", properties.mkString("[", ", ", "]"))
+ }
+
+ // Partition Provider & Partition Columns
+ // TODO check
+ if (table.isPartitionable &&
!table.asPartitionable.partitionSchema().isEmpty) {
+ results.put("Partition Provider", "Catalog")
+ results.put("Partition Columns",
table.asPartitionable.partitionSchema().map(
+ field => quoteIdentifier(field.name)).mkString(", "))
+ }
+
+ if (table.schema().nonEmpty) results.put("Schema",
table.schema().treeString)
+
+ results.map { case ((key, value)) =>
+ if (value.isEmpty) key else s"$key: $value"
+ }.mkString("", "\n", "")
+ }
+
+ private def extendedPartition(
+ identifier: Identifier,
+ partitionTable: SupportsPartitionManagement,
+ partitionSpec: Option[TablePartitionSpec]): String = {
+ val results = new mutable.LinkedHashMap[String, String]()
+
+ // "Partition Values"
+ val partitionSchema = partitionTable.partitionSchema()
+ val normalizedSpec = normalizePartitionSpec(
+ partitionSpec.get,
+ partitionSchema,
+ partitionTable.name(),
+ conf.resolver)
+ requireExactMatchedPartitionSpec(identifier.toString,
+ normalizedSpec, partitionSchema.fieldNames)
+
+ val partitionNames = normalizedSpec.keySet
+ val (names, ident) = (partitionSchema.map(_.name),
+ convertToPartIdent(normalizedSpec, partitionSchema))
+ val partitionIdentifiers =
partitionTable.listPartitionIdentifiers(names.toArray, ident)
+ partitionIdentifiers.length match {
+ case 0 =>
Review Comment:
The functions `extendedPartition()` is invoked only for non-empty partition
spec as I can see, or not? Is there any test for this case?
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablesExec.scala:
##########
@@ -53,4 +87,113 @@ case class ShowTablesExec(
case _ => false
}
}
+
+ private def extendedTable(identifier: Identifier, table: Table): String = {
+ val results = new mutable.LinkedHashMap[String, String]()
+
+ if (!identifier.namespace().isEmpty) {
+ results.put("Namespace", identifier.namespace().quoted)
+ }
+ results.put("Table", identifier.name())
+ val tableType = if
(table.properties().containsKey(TableCatalog.PROP_EXTERNAL)) {
+ CatalogTableType.EXTERNAL
+ } else {
+ CatalogTableType.MANAGED
+ }
+ results.put("Type", tableType.name)
+
+ CatalogV2Util.TABLE_RESERVED_PROPERTIES
+ .filterNot(_ == TableCatalog.PROP_EXTERNAL)
+ .foreach(propKey => {
+ if (table.properties.containsKey(propKey)) {
+ results.put(propKey.capitalize, table.properties.get(propKey))
+ }
+ })
+
+ val properties =
+ conf.redactOptions(table.properties.asScala.toMap).toList
+ .filter(kv => !CatalogV2Util.TABLE_RESERVED_PROPERTIES.contains(kv._1))
+ .sortBy(_._1).map {
+ case (key, value) => key + "=" + value
+ }.mkString("[", ",", "]")
+ if (table.properties().isEmpty) {
+ results.put("Table Properties", properties.mkString("[", ", ", "]"))
+ }
+
+ // Partition Provider & Partition Columns
+ // TODO check
+ if (table.isPartitionable &&
!table.asPartitionable.partitionSchema().isEmpty) {
+ results.put("Partition Provider", "Catalog")
+ results.put("Partition Columns",
table.asPartitionable.partitionSchema().map(
+ field => quoteIdentifier(field.name)).mkString(", "))
+ }
+
+ if (table.schema().nonEmpty) results.put("Schema",
table.schema().treeString)
+
+ results.map { case ((key, value)) =>
+ if (value.isEmpty) key else s"$key: $value"
+ }.mkString("", "\n", "")
+ }
+
+ private def extendedPartition(
+ identifier: Identifier,
+ partitionTable: SupportsPartitionManagement,
+ partitionSpec: Option[TablePartitionSpec]): String = {
+ val results = new mutable.LinkedHashMap[String, String]()
+
+ // "Partition Values"
+ val partitionSchema = partitionTable.partitionSchema()
+ val normalizedSpec = normalizePartitionSpec(
+ partitionSpec.get,
+ partitionSchema,
+ partitionTable.name(),
+ conf.resolver)
+ requireExactMatchedPartitionSpec(identifier.toString,
+ normalizedSpec, partitionSchema.fieldNames)
+
+ val partitionNames = normalizedSpec.keySet
+ val (names, ident) = (partitionSchema.map(_.name),
+ convertToPartIdent(normalizedSpec, partitionSchema))
+ val partitionIdentifiers =
partitionTable.listPartitionIdentifiers(names.toArray, ident)
+ partitionIdentifiers.length match {
+ case 0 =>
+ throw QueryExecutionErrors.notExistPartitionError(
+ identifier.toString, ident, partitionSchema)
+ case len if (len > 1) =>
Review Comment:
nit:
```suggestion
case len if len > 1 =>
```
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablesExec.scala:
##########
@@ -53,4 +87,113 @@ case class ShowTablesExec(
case _ => false
}
}
+
+ private def extendedTable(identifier: Identifier, table: Table): String = {
+ val results = new mutable.LinkedHashMap[String, String]()
+
+ if (!identifier.namespace().isEmpty) {
+ results.put("Namespace", identifier.namespace().quoted)
+ }
+ results.put("Table", identifier.name())
+ val tableType = if
(table.properties().containsKey(TableCatalog.PROP_EXTERNAL)) {
+ CatalogTableType.EXTERNAL
+ } else {
+ CatalogTableType.MANAGED
+ }
+ results.put("Type", tableType.name)
+
+ CatalogV2Util.TABLE_RESERVED_PROPERTIES
+ .filterNot(_ == TableCatalog.PROP_EXTERNAL)
+ .foreach(propKey => {
+ if (table.properties.containsKey(propKey)) {
+ results.put(propKey.capitalize, table.properties.get(propKey))
+ }
+ })
+
+ val properties =
+ conf.redactOptions(table.properties.asScala.toMap).toList
+ .filter(kv => !CatalogV2Util.TABLE_RESERVED_PROPERTIES.contains(kv._1))
+ .sortBy(_._1).map {
+ case (key, value) => key + "=" + value
+ }.mkString("[", ",", "]")
+ if (table.properties().isEmpty) {
+ results.put("Table Properties", properties.mkString("[", ", ", "]"))
+ }
+
+ // Partition Provider & Partition Columns
+ // TODO check
+ if (table.isPartitionable &&
!table.asPartitionable.partitionSchema().isEmpty) {
+ results.put("Partition Provider", "Catalog")
+ results.put("Partition Columns",
table.asPartitionable.partitionSchema().map(
+ field => quoteIdentifier(field.name)).mkString(", "))
+ }
+
+ if (table.schema().nonEmpty) results.put("Schema",
table.schema().treeString)
+
+ results.map { case ((key, value)) =>
+ if (value.isEmpty) key else s"$key: $value"
+ }.mkString("", "\n", "")
+ }
+
+ private def extendedPartition(
+ identifier: Identifier,
+ partitionTable: SupportsPartitionManagement,
+ partitionSpec: Option[TablePartitionSpec]): String = {
+ val results = new mutable.LinkedHashMap[String, String]()
+
+ // "Partition Values"
+ val partitionSchema = partitionTable.partitionSchema()
+ val normalizedSpec = normalizePartitionSpec(
+ partitionSpec.get,
+ partitionSchema,
+ partitionTable.name(),
+ conf.resolver)
+ requireExactMatchedPartitionSpec(identifier.toString,
+ normalizedSpec, partitionSchema.fieldNames)
+
+ val partitionNames = normalizedSpec.keySet
Review Comment:
Where is it used?
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablesExec.scala:
##########
@@ -53,4 +87,113 @@ case class ShowTablesExec(
case _ => false
}
}
+
+ private def extendedTable(identifier: Identifier, table: Table): String = {
+ val results = new mutable.LinkedHashMap[String, String]()
+
+ if (!identifier.namespace().isEmpty) {
+ results.put("Namespace", identifier.namespace().quoted)
+ }
+ results.put("Table", identifier.name())
+ val tableType = if
(table.properties().containsKey(TableCatalog.PROP_EXTERNAL)) {
+ CatalogTableType.EXTERNAL
+ } else {
+ CatalogTableType.MANAGED
+ }
+ results.put("Type", tableType.name)
+
+ CatalogV2Util.TABLE_RESERVED_PROPERTIES
+ .filterNot(_ == TableCatalog.PROP_EXTERNAL)
+ .foreach(propKey => {
+ if (table.properties.containsKey(propKey)) {
+ results.put(propKey.capitalize, table.properties.get(propKey))
+ }
+ })
+
+ val properties =
+ conf.redactOptions(table.properties.asScala.toMap).toList
+ .filter(kv => !CatalogV2Util.TABLE_RESERVED_PROPERTIES.contains(kv._1))
+ .sortBy(_._1).map {
+ case (key, value) => key + "=" + value
+ }.mkString("[", ",", "]")
+ if (table.properties().isEmpty) {
+ results.put("Table Properties", properties.mkString("[", ", ", "]"))
+ }
+
+ // Partition Provider & Partition Columns
+ // TODO check
+ if (table.isPartitionable &&
!table.asPartitionable.partitionSchema().isEmpty) {
+ results.put("Partition Provider", "Catalog")
+ results.put("Partition Columns",
table.asPartitionable.partitionSchema().map(
+ field => quoteIdentifier(field.name)).mkString(", "))
+ }
+
+ if (table.schema().nonEmpty) results.put("Schema",
table.schema().treeString)
+
+ results.map { case ((key, value)) =>
+ if (value.isEmpty) key else s"$key: $value"
+ }.mkString("", "\n", "")
+ }
+
+ private def extendedPartition(
+ identifier: Identifier,
+ partitionTable: SupportsPartitionManagement,
+ partitionSpec: Option[TablePartitionSpec]): String = {
+ val results = new mutable.LinkedHashMap[String, String]()
+
+ // "Partition Values"
+ val partitionSchema = partitionTable.partitionSchema()
+ val normalizedSpec = normalizePartitionSpec(
+ partitionSpec.get,
+ partitionSchema,
+ partitionTable.name(),
+ conf.resolver)
+ requireExactMatchedPartitionSpec(identifier.toString,
+ normalizedSpec, partitionSchema.fieldNames)
+
+ val partitionNames = normalizedSpec.keySet
+ val (names, ident) = (partitionSchema.map(_.name),
+ convertToPartIdent(normalizedSpec, partitionSchema))
+ val partitionIdentifiers =
partitionTable.listPartitionIdentifiers(names.toArray, ident)
+ partitionIdentifiers.length match {
+ case 0 =>
+ throw QueryExecutionErrors.notExistPartitionError(
+ identifier.toString, ident, partitionSchema)
+ case len if (len > 1) =>
+ throw
QueryExecutionErrors.showTableExtendedMultiPartitionUnsupportedError(
+ identifier.toString)
+ case _ => // do nothing
+ }
+ val partitionIdentifier = partitionIdentifiers.head
+ val len = partitionSchema.length
+ val partitions = new Array[String](len)
+ val timeZoneId = conf.sessionLocalTimeZone
+ var i = 0
+ while (i < len) {
Review Comment:
This loop:
```scala
var i = 0
while (i < len) {
i += 1
}
```
can be simplified by:
```scala
for (i <- 0 until len) {
}
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]