QiangCai commented on a change in pull request #3950:
URL: https://github.com/apache/carbondata/pull/3950#discussion_r504466643



##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/index/bloom/BloomCoarseGrainIndexFunctionSuite.scala
##########
@@ -803,27 +840,38 @@ class BloomCoarseGrainIndexFunctionSuite  extends 
QueryTest with BeforeAndAfterA
     sql(s"INSERT INTO $bloomSampleTable SELECT 'c1v2', 2, 'c3v2'")
 
     // two segments both has index files
-    val carbonTable = CarbonEnv.getCarbonTable(Option("default"), 
bloomSampleTable)(SparkTestQueryExecutor.spark)
+    val carbonTable = CarbonEnv.getCarbonTable(Option("default"), 
bloomSampleTable)(
+      SparkTestQueryExecutor.spark)
     import scala.collection.JavaConverters._
     (0 to 1).foreach { segId =>
-      val indexPath = 
CarbonTablePath.getIndexesStorePath(carbonTable.getTablePath, segId.toString, 
indexName)
-      assert(FileUtils.listFiles(FileUtils.getFile(indexPath), 
Array("bloomindexmerge"), true).asScala.nonEmpty)
+      val indexPath = 
CarbonTablePath.getIndexesStorePath(carbonTable.getTablePath,

Review comment:
       changed for all places

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/index/bloom/BloomCoarseGrainIndexSuite.scala
##########
@@ -82,16 +82,24 @@ class BloomCoarseGrainIndexSuite extends QueryTest with 
BeforeAndAfterAll with B
       sql(s"select * from $bloomSampleTable where id = 1 and city='city_1'", 
indexName, shouldHit),
       sql(s"select * from $normalTable where id = 1 and city='city_1'"))
     checkAnswer(
-      sql(s"select * from $bloomSampleTable where id = 999 and 
city='city_999'", indexName, shouldHit),
+      sql(s"select * from $bloomSampleTable where id = 999 and 
city='city_999'",
+        indexName,
+        shouldHit),

Review comment:
       changed for all places

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/index/lucene/LuceneFineGrainIndexSuite.scala
##########
@@ -186,12 +191,21 @@ class LuceneFineGrainIndexSuite extends QueryTest with 
BeforeAndAfterAll {
 
     sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE index_test1 
OPTIONS('header'='false')")
 
-    checkAnswer(sql("SELECT * FROM index_test1 WHERE TEXT_MATCH('name:n10')"), 
sql(s"select * from index_test1 where name='n10'"))
-
-    var carbonTable = CarbonEnv.getCarbonTable(Some("lucene"), 
"index_test1")(sqlContext.sparkSession)
-    var indexes = 
carbonTable.getIndexMetadata.getIndexesMap.get(IndexType.LUCENE.getIndexProviderName)
-      .asScala.filter(p => 
p._2.get(CarbonCommonConstants.INDEX_STATUS).equalsIgnoreCase(IndexStatus.ENABLED.name()))
-    assert(indexes.exists(p => p._1.equals("dm12") && 
p._2.get(CarbonCommonConstants.INDEX_STATUS) == IndexStatus.ENABLED.name()))
+    checkAnswer(sql("SELECT * FROM index_test1 WHERE TEXT_MATCH('name:n10')"),
+      sql(s"select * from index_test1 where name='n10'"))
+
+    var carbonTable = CarbonEnv.getCarbonTable(Some("lucene"), 
"index_test1")(sqlContext
+      .sparkSession)
+    val indexes = carbonTable.getIndexMetadata

Review comment:
       done

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/index/lucene/LuceneFineGrainIndexSuite.scala
##########
@@ -863,8 +891,13 @@ class LuceneFineGrainIndexSuite extends QueryTest with 
BeforeAndAfterAll {
          | ON index_test_table (name)
          | AS 'bloomfilter'
       """.stripMargin)
-    sql("show indexes on table index_test_table").show(false)
-    checkExistence(sql("show indexes on table index_test_table"), true, "dm", 
"dm1", "lucene", "bloomfilter")
+    sql("show indexes on table index_test_table").collect()
+    checkExistence(sql("show indexes on table index_test_table"),

Review comment:
       done

##########
File path: 
integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/aggquery/IntegerDataTypeTestCase.scala
##########
@@ -143,9 +147,52 @@ class IntegerDataTypeTestCase extends QueryTest with 
BeforeAndAfterAll {
 
     val ff = BigInt(2147484000L)
     checkAnswer(
-      sql("select 
begin_time,begin_time1,begin_time2,begin_time3,begin_time4,begin_time5,begin_time6,begin_time7,begin_time8,begin_time9,begin_time10,begin_time11,begin_time12,begin_time13,begin_time14,begin_time15,begin_time16,begin_time17,begin_time18,begin_time19,begin_time20
 from all_encoding_table"),
-      
Seq(Row(1497376581,10000,8388600,125,1497376581,8386600,10000,100,125,1497376581,1497423738,2139095000,1497376581,1497423738,32000,123.4,11.1,3200.1,214744460.2,1497376581,1497376581),
-        
Row(1497408581,32000,45000,25,10000,55000,32000,75,35,1497423838,1497423838,ff,1497423838,1497423838,31900,838860.7,12.3,127.1,214748360.2,1497408581,1497408581))
+      sql("select 
begin_time,begin_time1,begin_time2,begin_time3,begin_time4,begin_time5," +

Review comment:
       done




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to