turboFei commented on a change in pull request #25863:
[SPARK-28945][SPARK-29037][CORE][SQL] Fix the issue that spark gives duplicate
result and support concurrent file source write operations write to different
partitions in the same table.
URL: https://github.com/apache/spark/pull/25863#discussion_r328203055
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelationCommand.scala
##########
@@ -263,4 +280,108 @@ case class InsertIntoHadoopFsRelationCommand(
}
}.toMap
}
+
+ /**
+ * Detect the conflict when there are several InsertIntoHadoopFsRelation
operations
+ * write concurrently.
+ */
+ private def detectConflict(
+ fs: FileSystem,
+ path: Path,
+ staticPartitionKVs: Seq[(String, String)]): Unit = {
+ for (i <- 0 until partitionColumns.size) {
+ Some(".spark-staging-" + i)
+ .map(stagingPath => new Path(path, stagingPath))
+ .foreach { stagingDir =>
+ if (fs.exists(stagingDir)) {
+ val subPath =
HadoopMapReduceCommitProtocol.getEscapedStaticPartitionPath(
+ staticPartitionKVs.slice(0, i))
+ val checkedPath = if (!subPath.isEmpty) {
+ new Path(stagingDir, subPath)
+ } else {
+ stagingDir
+ }
+ if (fs.exists(checkedPath)) {
+ throwConflictException(fs, stagingDir, i, staticPartitionKVs)
+ }
+ }
+ }
+ }
+ }
+
+ private def throwConflictException(
+ fs: FileSystem,
+ stagingDir: Path,
+ depth: Int,
+ staticPartitionKVs: Seq[(String, String)]): Unit = {
+ val conflictedPaths = ListBuffer[Path]()
+ val currentPath = if (depth == staticPartitionKVs.size ||
staticPartitionKVs.size == 0) {
+ stagingDir
+ } else {
+ new Path(stagingDir,
HadoopMapReduceCommitProtocol.getEscapedStaticPartitionPath(
+ staticPartitionKVs.slice(0, staticPartitionKVs.size - depth)))
+ }
+
+ findConflictedStagingOutputPaths(fs, currentPath, depth, conflictedPaths)
+
+ val pathsInfo = conflictedPaths.toList
+ .map { path =>
+ try {
+ val files = fs.listStatus(path)
+ val appId = if (files.size > 0) {
+ files.apply(0).getPath.getName
+ } else {
+ "Not Found"
+ }
+
+ val absolutePath = path.toUri.getRawPath
+ val relativePath =
absolutePath.substring(absolutePath.lastIndexOf(stagingDir.getName))
+ (relativePath, appId, new
Date(fs.getFileStatus(path).getModificationTime))
+ } catch {
+ case e: Exception => logWarning("Exception occurred", e)
+ ("Not found due to exception", "Not Found", null)
+ }
+ }
+
+ throw new InsertFileSourceConflictException(
+ s"""
+ | Conflict is detected, some other conflicted output path(s) under
tablePath:
+ | ($outputPath) existed.
+ | Relative path, appId and last modification time information is
shown as below:
+ | ${pathsInfo}.
+ | There may be two possibilities:
+ | 1. Another InsertDataSource operation is executing, you need wait
for it to
+ | complete.
+ | 2. This dir is belong to a killed application and not be cleaned up
gracefully.
+ |
+ | Please check the last modification time and use given appId to
judge whether
+ | relative application is running now.
+ | If not, you should delete responding path without recursive
manually.
+ |""".stripMargin)
+ }
+
+ /**
+ * Find relative staging output paths, which is conflicted with current
+ * InsertIntoHadoopFsRelation operation.
+ */
+ private def findConflictedStagingOutputPaths(
+ fs: FileSystem,
+ path: Path,
+ depth: Int,
+ paths: ListBuffer[Path]): Unit = {
+ try {
+ if (fs.exists(path)) {
+ if (depth == 0) {
+ paths += path
+ } else {
+ for (file <- fs.listStatus(path)) {
+ findConflictedStagingOutputPaths(fs, file.getPath, depth - 1,
paths)
+ }
+ }
+ }
+ } catch {
+ case e: Exception =>
+ logWarning("Exception occurred when finding conflicted staging output
paths.", e)
+ }
Review comment:
ditto
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]