akashrn5 commented on a change in pull request #3793:
URL: https://github.com/apache/carbondata/pull/3793#discussion_r442662060
##########
File path:
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/merge/MergeTestCase.scala
##########
@@ -702,10 +716,25 @@ class MergeTestCase extends QueryTest with
BeforeAndAfterAll {
insertExpr(insertMap).
whenMatched("B.deleted=true").
delete().execute()
+ assert(getDeleteDeltaFileCount("target") == 1)
checkAnswer(sql("select count(*) from target"), Seq(Row(3)))
checkAnswer(sql("select * from target order by key"), Seq(Row("c", "200"),
Row("d", "3"), Row("e", "100")))
}
+ private def getDeleteDeltaFileCount(tableName: String): Int = {
+ val table = CarbonEnv.getCarbonTable(None,
tableName)(sqlContext.sparkSession)
+ val path = table.getTablePath
Review comment:
take the path till segment dir, that is till `Part0`
##########
File path:
integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/CarbonMergeDataSetCommand.scala
##########
@@ -51,6 +52,7 @@ import org.apache.carbondata.core.mutate.CarbonUpdateUtil
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.processing.loading.FailureCauses
+
Review comment:
revert this
##########
File path:
integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/CarbonMergeDataSetCommand.scala
##########
@@ -269,11 +271,10 @@ case class CarbonMergeDataSetCommand(
new SparkCarbonFileFormat().prepareWrite(sparkSession, job,
Map(), schema)
val config = SparkSQLUtil.broadCastHadoopConf(sparkSession.sparkContext,
job.getConfiguration)
-
(frame.rdd.coalesce(DistributionUtil.getConfiguredExecutors(sparkSession.sparkContext)).
- mapPartitionsWithIndex { case (index, iter) =>
+ (frame.rdd.mapPartitionsWithIndex { case (index, iter) =>
CarbonProperties.getInstance().addProperty(CarbonLoadOptionConstants
.ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH, "true")
- val confB = config.value.value
+ val confB = new Configuration(config.value.value)
Review comment:
why this change required? `confB` is already of type `Configuration`
##########
File path:
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/merge/MergeTestCase.scala
##########
@@ -702,10 +716,25 @@ class MergeTestCase extends QueryTest with
BeforeAndAfterAll {
insertExpr(insertMap).
whenMatched("B.deleted=true").
delete().execute()
+ assert(getDeleteDeltaFileCount("target") == 1)
checkAnswer(sql("select count(*) from target"), Seq(Row(3)))
checkAnswer(sql("select * from target order by key"), Seq(Row("c", "200"),
Row("d", "3"), Row("e", "100")))
}
+ private def getDeleteDeltaFileCount(tableName: String): Int = {
+ val table = CarbonEnv.getCarbonTable(None,
tableName)(sqlContext.sparkSession)
+ val path = table.getTablePath
+ val deleteDeltaFiles = FileFactory.getCarbonFile(path).listFiles(true, new
CarbonFileFilter {
+ override def accept(file: CarbonFile): Boolean =
file.getName.endsWith(CarbonCommonConstants
+ .DELETE_DELTA_FILE_EXT)
+ })
+ if (deleteDeltaFiles != null) {
Review comment:
no need of null check, it returns an array, so directly return
`deleteDeltaFiles .size`
##########
File path:
integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/merge/MergeTestCase.scala
##########
@@ -547,6 +558,7 @@ class MergeTestCase extends QueryTest with
BeforeAndAfterAll {
CarbonMergeDataSetCommand(dwSelframe,
odsframe,
MergeDataSetMatches(col("A.id").equalTo(col("B.id")),
matches.toList)).run(sqlContext.sparkSession)
+
Review comment:
revert this change
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]