Github user kumarvishal09 commented on a diff in the pull request:

    https://github.com/apache/carbondata/pull/1865#discussion_r164381622
  
    --- Diff: 
integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
 ---
    @@ -79,32 +79,27 @@ case class CarbonCreateDataMapCommand(
           }
           
createPreAggregateTableCommands.flatMap(_.processMetadata(sparkSession))
         } else {
    -      val dataMapSchema = new DataMapSchema(dataMapName, dmClassName)
    -      dataMapSchema.setProperties(new java.util.HashMap[String, 
String](dmproperties.asJava))
    -      val dbName = 
CarbonEnv.getDatabaseName(tableIdentifier.database)(sparkSession)
    -      // upadting the parent table about dataschema
    -      PreAggregateUtil.updateMainTable(dbName, tableIdentifier.table, 
dataMapSchema, sparkSession)
    +      throw new UnsupportedDataMapException(dmClassName)
         }
         LOGGER.audit(s"DataMap $dataMapName successfully added to Table ${ 
tableIdentifier.table }")
         Seq.empty
       }
     
       override def processData(sparkSession: SparkSession): Seq[Row] = {
    -    if 
(dmClassName.equals("org.apache.carbondata.datamap.AggregateDataMapHandler") ||
    --- End diff --
    
    I think process meta will handle exception part if class name mentioned in 
create data map statement is not valid. When call will come to processData or 
undoMeta validation is already passed so no need to add class validation again


---

Reply via email to