marchpure commented on a change in pull request #3793:
URL: https://github.com/apache/carbondata/pull/3793#discussion_r442741921
##########
File path:
integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/CarbonMergeDataSetCommand.scala
##########
@@ -269,11 +271,10 @@ case class CarbonMergeDataSetCommand(
new SparkCarbonFileFormat().prepareWrite(sparkSession, job,
Map(), schema)
val config = SparkSQLUtil.broadCastHadoopConf(sparkSession.sparkContext,
job.getConfiguration)
-
(frame.rdd.coalesce(DistributionUtil.getConfiguredExecutors(sparkSession.sparkContext)).
- mapPartitionsWithIndex { case (index, iter) =>
+ (frame.rdd.mapPartitionsWithIndex { case (index, iter) =>
CarbonProperties.getInstance().addProperty(CarbonLoadOptionConstants
.ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH, "true")
- val confB = config.value.value
+ val confB = new Configuration(config.value.value)
Review comment:
In multi-concurrent scenarios, conf is tampered, leading to some
exceptions. A new configuration can solved this issue.
##########
File path:
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/merge/MergeTestCase.scala
##########
@@ -547,6 +558,7 @@ class MergeTestCase extends QueryTest with
BeforeAndAfterAll {
CarbonMergeDataSetCommand(dwSelframe,
odsframe,
MergeDataSetMatches(col("A.id").equalTo(col("B.id")),
matches.toList)).run(sqlContext.sparkSession)
+
Review comment:
modified
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]