nihal0107 commented on a change in pull request #4071:
URL: https://github.com/apache/carbondata/pull/4071#discussion_r569392411



##########
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/mergedata/CarbonDataFileMergeTestCaseOnSI.scala
##########
@@ -338,8 +276,48 @@ class CarbonDataFileMergeTestCaseOnSI
       .queryExecution.sparkPlan
     assert(getDataFileCount("nonindexmerge_index1", "0") == 100)
     assert(getDataFileCount("nonindexmerge_index1", "1") == 100)
-    
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_SI_SEGMENT_MERGE,
-      CarbonCommonConstants.CARBON_SI_SEGMENT_MERGE_DEFAULT)
+
+    // exception is thrown by compaction executor
+    val mock3: MockUp[CarbonCompactionExecutor] = new 
MockUp[CarbonCompactionExecutor]() {
+      @Mock
+      def processTableBlocks(configuration: Configuration, filterExpr: 
Expression):
+      util.Map[String, util.List[RawResultIterator]] = {
+        throw new IOException("An exception occurred while compaction 
executor.")
+      }
+    }
+    val exception2 = intercept[Exception] {
+      sql("REFRESH INDEX nonindexmerge_index1 ON TABLE 
nonindexmerge").collect()
+    }
+    mock3.tearDown()
+    assert(exception2.getMessage.contains("Merge data files Failure in Merger 
Rdd."))
+    df1 = sql("""Select * from nonindexmerge where name='n16000'""")
+        .queryExecution.sparkPlan
+    assert(getDataFileCount("nonindexmerge_index1", "0") == 100)
+    assert(getDataFileCount("nonindexmerge_index1", "1") == 100)
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants

Review comment:
       done

##########
File path: 
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestCreateIndexWithLoadAndCompaction.scala
##########
@@ -283,19 +291,35 @@ class TestCreateIndexWithLoadAndCompaction extends 
QueryTest with BeforeAndAfter
     }
     sql("ALTER TABLE table1 COMPACT 'CUSTOM' WHERE SEGMENT.ID IN (1,2,3)")
 
-    val segments = sql("SHOW SEGMENTS FOR TABLE idx1")
-    val segInfos = segments.collect().map { each =>
-      ((each.toSeq) (0).toString, (each.toSeq) (1).toString)
-    }
-    assert(segInfos.length == 6)
+    val segInfos = checkSegmentList(6)
     assert(segInfos.contains(("0", "Success")))
     assert(segInfos.contains(("1", "Compacted")))
     assert(segInfos.contains(("2", "Compacted")))
     assert(segInfos.contains(("3", "Compacted")))
     assert(segInfos.contains(("1.1", "Success")))
     assert(segInfos.contains(("4", "Success")))
     checkAnswer(sql("select * from table1 where c3='b2'"), Seq(Row(3, "a2", 
"b2")))
-    sql("drop table if exists table1")
+
+    // after clean files
+    val mock = mockreadSegmentList()
+    sql("CLEAN FILES FOR TABLE table1 options('force'='true')")
+    mock.tearDown()
+    val details = SegmentStatusManager.readLoadMetadata(CarbonEnv
+        .getCarbonTable(Some("default"), 
"idx1")(sqlContext.sparkSession).getMetadataPath)
+    assert(SegmentStatusManager.countInvisibleSegments(details, 4) == 1)
+    checkSegmentList(4)
+    CarbonProperties.getInstance()

Review comment:
       done




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to