Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/1865#discussion_r164617642
--- Diff:
integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
---
@@ -59,44 +59,37 @@ case class CarbonCreateDataMapCommand(
val details = TimeSeriesUtil
.getTimeSeriesGranularityDetails(dmproperties, dmClassName)
val updatedDmProperties = dmproperties -
TimeSeriesUtil.getGranularityKey(dmproperties)
- details.map { f =>
- CreatePreAggregateTableCommand(dataMapName,
- tableIdentifier,
- dmClassName,
- updatedDmProperties,
- queryString.get,
- Some(f._1))
- }.toSeq
+ CreatePreAggregateTableCommand(dataMapName,
+ tableIdentifier,
+ dmClassName,
+ updatedDmProperties,
+ queryString.get,
+ Some(details(0)._1))
} else {
- Seq(CreatePreAggregateTableCommand(
+ CreatePreAggregateTableCommand(
dataMapName,
tableIdentifier,
dmClassName,
dmproperties,
queryString.get
- ))
+ )
}
-
createPreAggregateTableCommands.flatMap(_.processMetadata(sparkSession))
+ createPreAggregateTableCommands.processMetadata(sparkSession)
} else {
throw new UnsupportedDataMapException(dmClassName)
}
- LOGGER.audit(s"DataMap $dataMapName successfully added to Table ${
tableIdentifier.table }")
+ LOGGER.audit(s"DataMap $dataMapName successfully added to Table
${tableIdentifier.table}")
Seq.empty
}
override def processData(sparkSession: SparkSession): Seq[Row] = {
- if (dmClassName.equalsIgnoreCase(PREAGGREGATE.getName) ||
- dmClassName.equalsIgnoreCase(TIMESERIES.getName)) {
- createPreAggregateTableCommands.flatMap(_.processData(sparkSession))
- } else {
- throw new UnsupportedDataMapException(dmClassName)
- }
+ createPreAggregateTableCommands.processData(sparkSession)
--- End diff --
I think it is better not to remove the validation of the dmClassName, since
we will refactor processData and processMeta and add test framework for it
later. If we remove the validation, we may forget to do this check when
refactoring
---