Github user xuanyuanking commented on a diff in the pull request:
https://github.com/apache/spark/pull/17702#discussion_r156658524
--- Diff:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
---
@@ -668,4 +672,31 @@ object DataSource extends Logging {
}
globPath
}
+
+ /**
+ * Return all paths represented by the wildcard string.
+ * Follow [[InMemoryFileIndex]].bulkListLeafFile and reuse the conf.
+ */
+ private def getGlobbedPaths(
+ sparkSession: SparkSession,
+ fs: FileSystem,
+ hadoopConf: SerializableConfiguration,
+ qualified: Path): Seq[Path] = {
+ val paths = SparkHadoopUtil.get.expandGlobPath(fs, qualified)
+ if (paths.size <=
sparkSession.sessionState.conf.parallelPartitionDiscoveryThreshold) {
+ SparkHadoopUtil.get.globPathIfNecessary(fs, qualified)
+ } else {
+ val parallelPartitionDiscoveryParallelism =
+
sparkSession.sessionState.conf.parallelPartitionDiscoveryParallelism
+ val numParallelism = Math.min(paths.size,
parallelPartitionDiscoveryParallelism)
+ val expanded = sparkSession.sparkContext
--- End diff --
Thanks for your suggestion and detailed explanation, I'll reimplement this
to local thread pool.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]