Github user jackylk commented on a diff in the pull request:

    https://github.com/apache/carbondata/pull/1672#discussion_r157339670
  
    --- Diff: 
integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
 ---
    @@ -316,16 +332,35 @@ case class CarbonLoadDataCommand(
         } else {
           dataFrame
         }
    -    CarbonDataRDDFactory.loadCarbonData(sparkSession.sqlContext,
    -      carbonLoadModel,
    -      columnar,
    -      partitionStatus,
    -      server,
    -      isOverwriteTable,
    -      hadoopConf,
    -      loadDataFrame,
    -      updateModel,
    -      operationContext)
    +
    +    if (carbonTable.isStandardPartitionTable) {
    +      try {
    +        loadStandardPartition(sparkSession, carbonLoadModel, hadoopConf, 
loadDataFrame)
    +      } finally {
    +        server match {
    +          case Some(dictServer) =>
    +            try {
    +              
dictServer.writeTableDictionary(carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
    +                .getCarbonTableIdentifier.getTableId)
    +            } catch {
    +              case _: Exception =>
    +                throw new Exception("Dataload failed due to error while 
writing dictionary file!")
    +            }
    +          case _ =>
    +        }
    +      }
    +    } else {
    +      CarbonDataRDDFactory.loadCarbonData(sparkSession.sqlContext,
    --- End diff --
    
    move `sparkSession.sqlContext` to next line


---

Reply via email to