QiangCai commented on a change in pull request #3950: URL: https://github.com/apache/carbondata/pull/3950#discussion_r504464757
########## File path: index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestIndexModelWithUnsafeColumnPage.scala ########## @@ -35,8 +35,8 @@ class TestIndexModelWithUnsafeColumnPage extends QueryTest with BeforeAndAfterAl } test("Test secondry index data count") { - checkAnswer(sql("select count(*) from testSecondryIndex_IndexTable") - ,Seq(Row(1))) + checkAnswer(sql("select count(*) from testSecondryIndex_IndexTable"), Review comment: change for all places ########## File path: index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestNIQueryWithIndex.scala ########## @@ -163,8 +163,11 @@ class TestNIQueryWithIndex extends QueryTest with BeforeAndAfterAll{ // Query has EqualTo - So SI = Yes assert(checkSIColumnsSize(ch24, 1)) // EqualTo - }finally{ - sql(s"set carbon.si.lookup.partialstring=${CarbonCommonConstants.ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT}") + } finally { + sql(s"set carbon.si.lookup.partialstring=${ + CarbonCommonConstants Review comment: change for all places ########## File path: index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestNIQueryWithIndex.scala ########## @@ -188,14 +191,17 @@ class TestNIQueryWithIndex extends QueryTest with BeforeAndAfterAll{ val ch15 = sql("select count(*) from seccust where c_phone='25-989-741-2988' and c_mktsegment like 'BUI%LDING'") // equals on c_phone of I1, I2 & (length & startsWith & endswith) on c_mktsegment of I2 so SI - Yes - assert(checkSIColumnsSize(ch15, 3)) //size = EqualTo on c_phone, length, StartsWith + assert(checkSIColumnsSize(ch15, 3)) // size = EqualTo on c_phone, length, StartsWith val ch16 = sql("select * from seccust where c_phone='25-989-741-2988'") // Query has EqualTo so SI - Yes assert(checkSIColumnsSize(ch16, 1)) // size = EqualTo - } finally{ - sql(s"set carbon.si.lookup.partialstring=${CarbonCommonConstants.ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT}") + } finally { + sql(s"set carbon.si.lookup.partialstring=${ + CarbonCommonConstants + .ENABLE_SI_LOOKUP_PARTIALSTRING_DEFAULT Review comment: change for all places ########## File path: index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestSIWithSecondryIndex.scala ########## @@ -128,25 +129,32 @@ class TestSIWithSecondryIndex extends QueryTest with BeforeAndAfterAll { test("test create secondary index global sort on partition table") { sql("drop table if exists partition_carbon_table") - sql("create table partition_carbon_table (name string, id string, country string) PARTITIONED BY(dateofjoin " + + sql("create table partition_carbon_table (" + + "name string, id string, country string) PARTITIONED BY(dateofjoin " + "string) stored as carbondata") // create SI before the inserting the data - sql("create index partition_carbon_table_index on table partition_carbon_table(id, country) as 'carbondata' properties" + + sql("create index partition_carbon_table_index on table partition_carbon_table(" + + "id, country) as 'carbondata' properties" + "('sort_scope'='global_sort', 'Global_sort_partitions'='3')") sql("insert into partition_carbon_table select 'xx', '2', 'china', '2020' " + "union all select 'xx', '1', 'india', '2021'") checkAnswerWithoutSort(sql("select id, country from partition_carbon_table_index"), Seq(Row("1", "india"), Row("2", "china"))) // check for valid sort_scope - checkExistence(sql("describe formatted partition_carbon_table_index"), true, "Sort Scope global_sort") + checkExistence(sql("describe formatted partition_carbon_table_index"), + true, + "Sort Scope global_sort") Review comment: done ########## File path: index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestSIWithSecondryIndex.scala ########## @@ -296,14 +308,18 @@ class TestSIWithSecondryIndex extends QueryTest with BeforeAndAfterAll { sql(s"""ALTER TABLE default.index1 SET |SERDEPROPERTIES ('isSITableEnabled' = 'false')""".stripMargin) - sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO " + - "TABLE uniqdata OPTIONS('DELIMITER'=',', 'BAD_RECORDS_LOGGER_ENABLE'='FALSE', 'BAD_RECORDS_ACTION'='FORCE')") + sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO TABLE uniqdata " + + "OPTIONS('DELIMITER'=',','BAD_RECORDS_LOGGER_ENABLE'='FALSE','BAD_RECORDS_ACTION'='FORCE')") val count1 = sql("select * from uniqdata where workgroupcategoryname = 'developer'").count() - val df1 = sql("select * from uniqdata where workgroupcategoryname = 'developer'").queryExecution.sparkPlan + val df1 = sql("select * from uniqdata where workgroupcategoryname = 'developer'") + .queryExecution + .sparkPlan sql(s"""ALTER TABLE default.index1 SET |SERDEPROPERTIES ('isSITableEnabled' = 'false')""".stripMargin) val count2 = sql("select * from uniqdata where workgroupcategoryname = 'developer'").count() - val df2 = sql("select * from uniqdata where workgroupcategoryname = 'developer'").queryExecution.sparkPlan + val df2 = sql("select * from uniqdata where workgroupcategoryname = 'developer'") + .queryExecution + .sparkPlan Review comment: change for all places ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org