Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2404#discussion_r198056881
--- Diff:
integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapShowCommand.scala
---
@@ -66,11 +67,24 @@ case class CarbonDataMapShowCommand(tableIdentifier:
Option[TableIdentifier])
private def convertToRow(schemaList: util.List[DataMapSchema]) = {
if (schemaList != null && schemaList.size() > 0) {
- schemaList.asScala.map { s =>
- var table = "(NA)"
- val relationIdentifier = s.getRelationIdentifier
- table = relationIdentifier.getDatabaseName + "." +
relationIdentifier.getTableName
- Row(s.getDataMapName, s.getProviderName, table)
+ schemaList.asScala
+ .map { s =>
+ val relationIdentifier = s.getRelationIdentifier
+ val table = relationIdentifier.getDatabaseName + "." +
relationIdentifier.getTableName
+ // preaggregate datamap does not support user specified
property, therefor we return empty
+ val dmPropertieStr = if (s.getProviderName.equalsIgnoreCase(
+ DataMapClassProvider.PREAGGREGATE.getShortName)) {
--- End diff --
How about timeseries and MV datamap?
---