advancedxy commented on a change in pull request #27100:
[SPARK-29037][CORE][SQL] Fix the issue that spark gives duplicate result and
support concurrent file source write operations write to different partitions
in the same table.
URL: https://github.com/apache/spark/pull/27100#discussion_r364051130
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelationCommand.scala
##########
@@ -269,4 +323,214 @@ case class InsertIntoHadoopFsRelationCommand(
}
}.toMap
}
+
+ /**
+ * Check current committer whether supports several
InsertIntoHadoopFsRelation operations write
+ * to different partitions in a same table concurrently. If supports, then
detect the conflict
+ * whether there are several operations write to same partition in the same
table or write to
+ * a non-partitioned table.
+ */
+ private def detectConflict(
+ commitProtocol: FileCommitProtocol,
+ fs: FileSystem,
+ staticPartitionKVs: Seq[(String, String)]): Unit = {
+
+ val supportConcurrent =
commitProtocol.isInstanceOf[HadoopMapReduceCommitProtocol] &&
+
commitProtocol.asInstanceOf[HadoopMapReduceCommitProtocol].supportConcurrent
+ if (supportConcurrent && fs.isDirectory(outputPath)) {
+ val stagingDirName = ".spark-staging-" + staticPartitionKVs.size
+ concurrentStagingDir = new Path(outputPath, stagingDirName)
+ val stagingPartitionPathToCheck = new Path(outputPath,
buildPath(stagingDirName,
+ getEscapedStaticPartitionPath(staticPartitionKVs)))
+
+ if (tryLock(stagingPartitionPathToCheck, fs)) {
+ concurrentStagingPartitionDir = stagingPartitionPathToCheck
+ fs.mkdirs(new Path(concurrentStagingPartitionDir,
SparkEnv.get.conf.getAppId))
+ } else {
+ throwConflictedException(stagingPartitionPathToCheck,
concurrentStagingDir, fs)
+ }
+
+ // Check whether there are some conflicted insert operations with
different specified
+ // partition key-values number.
+ for (i <- 0 to partitionColumns.size) {
+ if (i != staticPartitionKVs.size) {
+ val stagingPath = new Path(outputPath, ".spark-staging-" + i)
+ val subPartitions = staticPartitionKVs.slice(0, i)
+ detectConflictPath(fs, stagingPath, subPartitions.size, i,
staticPartitionKVs)
+ }
+ }
+ }
+ }
+
+ private def detectConflictPath(
+ fs: FileSystem,
+ stagingPath: Path,
+ kvSize: Int,
+ depth: Int,
+ staticPartitionKVs: Seq[(String, String)]): Unit = {
+ val currentPath = if (kvSize == 0) {
+ stagingPath
+ } else {
+ new Path(outputPath, buildPath(stagingPath.getName,
getEscapedStaticPartitionPath(
+ staticPartitionKVs.slice(0, kvSize))))
+ }
+
+ findConflictedStagingPartitionPaths(fs, currentPath, depth - kvSize)
+ .foreach { stagingPartitionPath =>
+ if (tryLock(stagingPartitionPath, fs)) {
+ logInfo(
+ s"""
+ | Get the lock of conflicted staging
partition:$stagingPartitionPath
+ | successfully, it should be owned to a completed application,
whose staging
+ | partition path has not been cleaned up. Would clean up it.
+ |""".stripMargin)
+ unlock(stagingPartitionPath, stagingPath, fs)
+ } else {
+ throwConflictedException(stagingPartitionPath, stagingPath, fs)
+ }
+ }
+ }
+
+ private def throwConflictedException(
+ stagingPartitionPath: Path,
+ stagingPath: Path,
+ fs: FileSystem): Unit = {
+ val absolutePath = stagingPartitionPath.toUri.getPath
+ val relativePath =
absolutePath.substring(absolutePath.lastIndexOf(stagingPath.getName))
+ var modificationTime: Date = null
+ val appId = Try {
+ modificationTime = new
Date(fs.getFileStatus(stagingPartitionPath).getModificationTime)
+ fs.listStatus(stagingPartitionPath).filter { status =>
+ fs.isDirectory(status.getPath)
+ }.apply(0).getPath.getName
+ } match {
+ case Success(appDirName) => appDirName
+ case Failure(e) =>
+ logWarning(
+ s"""
+ | Exception occurred when getting appId dir name under
stagingPartitionDir:
+ | $stagingPartitionPath""".stripMargin, e)
+ "NOT FOUND"
+ }
+
+ // Unlock the concurrentStagingPartitionDir, which may has been created.
+ unlock(concurrentStagingPartitionDir, concurrentStagingDir, fs)
+ throw new InsertFileSourceConflictException(
+ s"""
+ | CONFLICT !!!. There is conflicted output path under tablePath:
+ | ($outputPath).
+ | Detailed information (conflicted path , appId, last modification
time):
+ | $relativePath,$appId,$modificationTime.
+ |
+ | There could be two possibilities:
+ | 1. The path is being written by another InsertDataSource operation
and you need wait
+ | for it to complete.
+ | 2. This path belongs to a failed application who didn't have a
chance to clean it up
+ | gracefully.
+ |
+ | Please check with the provided appId and last modification time
+ | whether another application is still writing to the path .
+ | If not, it's safe to delete it and retry your application.
+ |""".stripMargin)
+ }
+
+ /**
+ * Find relative staging partition paths, which is conflicted with current
+ * InsertIntoHadoopFsRelation operation.
+ */
+ private def findConflictedStagingPartitionPaths(
+ fs: FileSystem,
+ path: Path,
+ depth: Int): Seq[Path] = {
+ val paths = ListBuffer[Path]()
+ try {
+ if (fs.exists(path)) {
+ if (depth == 0) {
+ paths += path
+ } else {
+ for (file <- fs.listStatus(path)) {
+ paths ++= findConflictedStagingPartitionPaths(fs, file.getPath,
depth - 1)
+ }
+ }
+ }
+ } catch {
+ case e: Exception =>
+ logError("Exception occurred when finding conflicted staging partition
paths.")
+ throw e
+ }
+ paths
+ }
+
+ /**
+ * Try to get the lock of a partition. If the fileSystem is not
[[DistributedFileSystem]], check
+ * whether the lock file exists, if existed, we can not get the partition
lock, otherwise, create
+ * the lock file and get the partition lock.
+ * Otherwise, this filesystem is [[DistributedFileSystem]], if the lock file
exists, we can try to
+ * append this lock file, if append successfully, we get the partition lock;
else if the lock file
+ * does not exist, we create this lock file and get the partition lock.
+ */
+ private def tryLock(
+ stagingPartitionPath: Path,
+ fs: FileSystem): Boolean = {
+ try {
+ if (!fs.exists(stagingPartitionPath)) {
+ fs.mkdirs(stagingPartitionPath)
+ }
+ val partitionLock = new Path(stagingPartitionPath, getLockName)
+ if (!fs.isInstanceOf[DistributedFileSystem]) {
+ val existed = fs.exists(partitionLock)
+ if (!existed) {
+ fs.create(partitionLock).close()
+ }
+ !existed
+ } else {
+
+ def pickLock: Boolean = Try {
+ if (fs.exists(partitionLock)) {
+ fs.append(partitionLock)
+ } else {
+ fs.create(partitionLock)
+ }
+ } match {
+ case Success(_) => true
+ case _ => false
+ }
+
+ if (pickLock) {
+ true
+ } else {
+ Thread.sleep(HdfsConstants.LEASE_SOFTLIMIT_PERIOD)
Review comment:
This is quite some time(IIRC, 60secs), should we add an option to turn off
this wait?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]