This is an automated email from the ASF dual-hosted git repository.
qiangcai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git
The following commit(s) were added to refs/heads/master by this push:
new bc65e0c [HOTFIX] Fix random failure issue due to indexMerge
bc65e0c is described below
commit bc65e0c60207a3e58a1d41cd37601735b9ee4f59
Author: Indhumathi27 <[email protected]>
AuthorDate: Tue Oct 20 19:31:50 2020 +0530
[HOTFIX] Fix random failure issue due to indexMerge
Why is this PR needed?
CarbonIndexFileMerge test cases are failing randomly in CI
What changes were proposed in this PR?
Unset merge properties to default in afterALL function
Does this PR introduce any user interface change?
No
Is any new testcase added?
No
This closes #3989
---
.../spark/testsuite/mergedata/CarbonDataFileMergeTestCaseOnSI.scala | 5 ++++-
.../testsuite/mergeindex/CarbonIndexFileMergeTestCaseWithSI.scala | 3 ++-
.../integration/spark/testsuite/binary/TestBinaryDataType.scala | 5 +++++
.../spark/testsuite/booleantype/BooleanDataTypesParameterTest.scala | 3 +++
.../standardpartition/StandardPartitionTableLoadingTestCase.scala | 6 ++++--
.../standardpartition/StandardPartitionTableQueryTestCase.scala | 1 -
6 files changed, 18 insertions(+), 5 deletions(-)
diff --git
a/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/mergedata/CarbonDataFileMergeTestCaseOnSI.scala
b/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/mergedata/CarbonDataFileMergeTestCaseOnSI.scala
index c4020c6..4f1a40e 100644
---
a/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/mergedata/CarbonDataFileMergeTestCaseOnSI.scala
+++
b/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/mergedata/CarbonDataFileMergeTestCaseOnSI.scala
@@ -63,9 +63,12 @@ class CarbonDataFileMergeTestCaseOnSI
sql("use default")
sql("drop database if exists dataFileMerge cascade")
CarbonProperties.getInstance()
- .addProperty(CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT, "true")
+ .addProperty(CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT,
+ CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT_DEFAULT)
.addProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD,
CarbonCommonConstants.DEFAULT_SEGMENT_LEVEL_THRESHOLD)
+ .addProperty(CarbonCommonConstants.CARBON_SI_SEGMENT_MERGE,
+ CarbonCommonConstants.CARBON_SI_SEGMENT_MERGE_DEFAULT)
}
test("Verify correctness of data file merge") {
diff --git
a/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/mergeindex/CarbonIndexFileMergeTestCaseWithSI.scala
b/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/mergeindex/CarbonIndexFileMergeTestCaseWithSI.scala
index 79faa82..40f25d1 100644
---
a/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/mergeindex/CarbonIndexFileMergeTestCaseWithSI.scala
+++
b/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/mergeindex/CarbonIndexFileMergeTestCaseWithSI.scala
@@ -59,7 +59,8 @@ class CarbonIndexFileMergeTestCaseWithSI
sql("DROP INDEX IF EXISTS nonindexmerge_index4 on nonindexmerge")
sql("DROP INDEX IF EXISTS indexmerge_index on indexmerge")
CarbonProperties.getInstance()
- .addProperty(CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT, "true")
+ .addProperty(CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT,
+ CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT_DEFAULT)
.addProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD,
CarbonCommonConstants.DEFAULT_SEGMENT_LEVEL_THRESHOLD)
.addProperty(CarbonCommonConstants.CARBON_SI_SEGMENT_MERGE,
diff --git
a/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/binary/TestBinaryDataType.scala
b/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/binary/TestBinaryDataType.scala
index 4ea7820..2f37d2d 100644
---
a/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/binary/TestBinaryDataType.scala
+++
b/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/binary/TestBinaryDataType.scala
@@ -1755,6 +1755,11 @@ class TestBinaryDataType extends QueryTest with
BeforeAndAfterAll {
}
override def afterAll: Unit = {
+ CarbonProperties.getInstance()
+
.addProperty(CarbonCommonConstants.CARBON_ENABLE_BAD_RECORD_HANDLING_FOR_INSERT,
+
CarbonCommonConstants.CARBON_ENABLE_BAD_RECORD_HANDLING_FOR_INSERT_DEFAULT)
+ .addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
+ CarbonCommonConstants.DEFAULT_ENABLE_AUTO_LOAD_MERGE)
sqlContext.sparkSession.conf.unset("hive.exec.dynamic.partition.mode")
sql("DROP TABLE IF EXISTS binaryTable")
sql("DROP TABLE IF EXISTS hiveTable")
diff --git
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesParameterTest.scala
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesParameterTest.scala
index 6ff4146..8c54d5b 100644
---
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesParameterTest.scala
+++
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesParameterTest.scala
@@ -52,6 +52,9 @@ class BooleanDataTypesParameterTest
}
override def afterAll(): Unit = {
+ CarbonProperties.getInstance()
+ .addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
+ CarbonCommonConstants.DEFAULT_ENABLE_AUTO_LOAD_MERGE)
sql("drop table if exists boolean_one_column")
sql("drop table if exists boolean_table")
}
diff --git
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableLoadingTestCase.scala
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableLoadingTestCase.scala
index ab6c385..128274e 100644
---
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableLoadingTestCase.scala
+++
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableLoadingTestCase.scala
@@ -589,7 +589,8 @@ class StandardPartitionTableLoadingTestCase extends
QueryTest with BeforeAndAfte
checkAnswer(sql("SELECT COUNT(*) FROM new_par"), Seq(Row(4)))
} finally {
CarbonProperties.getInstance()
- .removeProperty(CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT)
+ .addProperty(CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT,
+ CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT_DEFAULT)
}
}
@@ -707,7 +708,8 @@ class StandardPartitionTableLoadingTestCase extends
QueryTest with BeforeAndAfte
override def afterAll: Unit = {
-
CarbonProperties.getInstance().addProperty("carbon.read.partition.hive.direct",
"true")
+
CarbonProperties.getInstance().addProperty("carbon.read.partition.hive.direct",
+ CarbonCommonConstants.CARBON_READ_PARTITION_HIVE_DIRECT_DEFAULT)
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
diff --git
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala
index 14ac920..20d14a6 100644
---
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala
+++
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala
@@ -21,7 +21,6 @@ import org.apache.spark.sql.{CarbonEnv, DataFrame, Row}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.strategy.CarbonDataSourceScan
import org.apache.spark.sql.test.util.QueryTest
-import org.apache.spark.util.SparkUtil
import org.scalatest.BeforeAndAfterAll
import
org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException