kevinjmh commented on a change in pull request #3867:
URL: https://github.com/apache/carbondata/pull/3867#discussion_r462058720



##########
File path: 
integration/spark/src/main/scala/org/apache/carbondata/view/MVRefresher.scala
##########
@@ -68,19 +68,19 @@ object MVRefresher {
     // Clean up the old invalid segment data before creating a new entry for 
new load.
     SegmentStatusManager.deleteLoadsAndUpdateMetadata(viewTable, false, null)
     val segmentStatusManager: SegmentStatusManager = new 
SegmentStatusManager(viewTableIdentifier)
-    // Acquire table status lock to handle concurrent dataloading
+    // Acquire table status lock to handle concurrent data loading
     val lock: ICarbonLock = segmentStatusManager.getTableStatusLock
     val segmentMapping: util.Map[String, util.List[String]] =
       new util.HashMap[String, util.List[String]]
     val viewManager = MVManagerInSpark.get(session)
     try if (lock.lockWithRetries) {
-      LOGGER.info("Acquired lock for mv " + viewIdentifier + " for table 
status updation")
+      LOGGER.info("Acquired lock for mv " + viewIdentifier + " for table 
status te")

Review comment:
       check this

##########
File path: 
integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala
##########
@@ -50,7 +50,7 @@ import org.apache.carbondata.events.{IndexServerLoadEvent, 
OperationContext, Ope
 import org.apache.carbondata.hadoop.api.{CarbonInputFormat, 
CarbonTableInputFormat}
 import org.apache.carbondata.processing.exception.MultipleMatchingException
 import org.apache.carbondata.processing.loading.FailureCauses
-import org.apache.carbondata.spark.DeleteDelataResultImpl
+import org.apache.carbondata.spark.DeleteDelateResultImpl

Review comment:
       delta

##########
File path: 
integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala
##########
@@ -266,9 +266,9 @@ object DeleteExecution {
                 CarbonUpdateUtil.getRequiredFieldFromTID(TID, 
TupleIdEnum.BLOCK_ID) +
                 CarbonCommonConstants.FACT_FILE_EXT)
           }
-          val deleteDeletaPath = CarbonUpdateUtil
+          val deleteDeletePath = CarbonUpdateUtil

Review comment:
       ditto

##########
File path: 
integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/optimizer/CarbonSecondaryIndexOptimizer.scala
##########
@@ -693,10 +693,10 @@ class CarbonSecondaryIndexOptimizer(sparkSession: 
SparkSession) {
       case sort@Sort(order, global, plan) =>
         addProjection = true
         (sort, true)
-      case filter@Filter(condition, 
logicalRelation@MatchIndexableRelation(indexableRelation))
+      case filter@Filter(condition, 
logicalRelation@MatchIndexTableRelation(indexTableRelation))

Review comment:
       rename is changing meaning. And actual code is to match 
CarbonDatasourceHadoopRelation




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to