github-actions[bot] closed pull request #46678: [SPARK-47008][CORE] Added
Hadoops fileSystems hasPathCapability check to avoid FileNotFoundException(s)
when using S3 Express One Zone Storage.
URL: https://github.com/apache/spark/pull/46678
--
This is an automated message from the Apache Git
github-actions[bot] commented on PR #46678:
URL: https://github.com/apache/spark/pull/46678#issuecomment-2336493072
We're closing this PR because it hasn't been updated in a while. This isn't
a judgement on the merit of the PR in any way. It's just a way of keeping the
PR queue manageable.
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1621412534
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -219,8 +220,20 @@ private[spark] class SparkHadoopUtil extends Logging {
*/
def list
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1621060315
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -218,12 +218,17 @@ private[spark] class SparkHadoopUtil extends Logging {
* that file.
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1621057278
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -219,8 +220,20 @@ private[spark] class SparkHadoopUtil extends Logging {
*/
def list
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1621057278
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -219,8 +220,20 @@ private[spark] class SparkHadoopUtil extends Logging {
*/
def list
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1621056913
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -218,12 +218,17 @@ private[spark] class SparkHadoopUtil extends Logging {
* that file.
sunchao commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1621002684
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -219,8 +220,20 @@ private[spark] class SparkHadoopUtil extends Logging {
*/
def listL
sunchao commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1621000587
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -218,12 +218,17 @@ private[spark] class SparkHadoopUtil extends Logging {
* that file.
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1620390870
##
core/src/main/scala/org/apache/spark/util/Utils.scala:
##
@@ -687,8 +687,22 @@ private[spark] object Utils
in.close()
}
} else {
- fs.list
LuciferYang commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1620200164
##
core/src/main/scala/org/apache/spark/util/Utils.scala:
##
@@ -687,8 +687,22 @@ private[spark] object Utils
in.close()
}
} else {
- fs.l
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1619216349
##
core/src/main/scala/org/apache/spark/util/Utils.scala:
##
@@ -687,8 +687,22 @@ private[spark] object Utils
in.close()
}
} else {
- fs.list
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1619272328
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -219,8 +220,20 @@ private[spark] class SparkHadoopUtil extends Logging {
*/
def list
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1619272328
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -219,8 +220,20 @@ private[spark] class SparkHadoopUtil extends Logging {
*/
def list
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1619231278
##
sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileOperator.scala:
##
@@ -125,16 +125,4 @@ private[hive] object OrcFileOperator extends Logging {
p
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1619216349
##
core/src/main/scala/org/apache/spark/util/Utils.scala:
##
@@ -687,8 +687,22 @@ private[spark] object Utils
in.close()
}
} else {
- fs.list
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1619210351
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -218,12 +218,17 @@ private[spark] class SparkHadoopUtil extends Logging {
* that file.
LuciferYang commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1618295996
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -218,12 +218,17 @@ private[spark] class SparkHadoopUtil extends Logging {
* that fil
sunchao commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1618208196
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -218,12 +218,17 @@ private[spark] class SparkHadoopUtil extends Logging {
* that file.
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1616174627
##
sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileOperator.scala:
##
@@ -125,16 +125,4 @@ private[hive] object OrcFileOperator extends Logging {
p
LuciferYang commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1615515548
##
sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileOperator.scala:
##
@@ -125,16 +125,4 @@ private[hive] object OrcFileOperator extends Logging {
LuciferYang commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1615515548
##
sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileOperator.scala:
##
@@ -125,16 +125,4 @@ private[hive] object OrcFileOperator extends Logging {
leovegas commented on PR #46678:
URL: https://github.com/apache/spark/pull/46678#issuecomment-2131495536
> Yea. Looks like it makes sense to add the check here as well
>
>
https://github.com/apache/spark/blob/e8f58a9c4a641b830c5304b34b876e0cd5d3ed8e/core/src/main/scala/org/apache/spar
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1613994258
##
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcUtils.scala:
##
@@ -62,10 +62,10 @@ object OrcUtils extends Logging {
val CATALYST_TYPE
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1613992518
##
sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileOperator.scala:
##
@@ -125,16 +125,4 @@ private[hive] object OrcFileOperator extends Logging {
p
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1613015537
##
core/src/main/scala/org/apache/spark/util/Utils.scala:
##
@@ -687,8 +687,22 @@ private[spark] object Utils
in.close()
}
} else {
- fs.list
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1613014972
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -219,8 +220,20 @@ private[spark] class SparkHadoopUtil extends Logging {
*/
def list
panbingkun commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1612592408
##
core/src/main/scala/org/apache/spark/util/Utils.scala:
##
@@ -687,8 +687,22 @@ private[spark] object Utils
in.close()
}
} else {
- fs.li
steveloughran commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1611760359
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -355,6 +368,9 @@ private[spark] object SparkHadoopUtil extends Logging {
*/
pri
leovegas commented on PR #46678:
URL: https://github.com/apache/spark/pull/46678#issuecomment-2126738308
Yea. Looks like it makes sense to add the check here as well
https://github.com/apache/spark/blob/e8f58a9c4a641b830c5304b34b876e0cd5d3ed8e/core/src/main/scala/org/apache/spark/util/Had
LuciferYang commented on PR #46678:
URL: https://github.com/apache/spark/pull/46678#issuecomment-2126302404
There are many places in the Spark code where `fs.listStatus(path)` is
called. Why is it only necessary to probe in these two places and not
elsewhere? For example:
https://gi
LuciferYang commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1610993533
##
core/src/main/scala/org/apache/spark/util/Utils.scala:
##
@@ -687,8 +687,22 @@ private[spark] object Utils
in.close()
}
} else {
- fs.l
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1607884919
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -87,8 +88,8 @@ private[spark] class SparkHadoopUtil extends Logging {
}
/**
- * A
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1607934126
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -87,8 +88,8 @@ private[spark] class SparkHadoopUtil extends Logging {
}
/**
- * A
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1607884919
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -87,8 +88,8 @@ private[spark] class SparkHadoopUtil extends Logging {
}
/**
- * A
leovegas commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1607882680
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -54,9 +55,9 @@ private[spark] class SparkHadoopUtil extends Logging {
* Runs the given
LuciferYang commented on code in PR #46678:
URL: https://github.com/apache/spark/pull/46678#discussion_r1607774195
##
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala:
##
@@ -54,9 +55,9 @@ private[spark] class SparkHadoopUtil extends Logging {
* Runs the giv
leovegas opened a new pull request, #46678:
URL: https://github.com/apache/spark/pull/46678
### What changes were proposed in this pull request?
Umbrella [SPARK-44111 Prepare Apache Spark
4.0.0](https://issues.apache.org/jira/browse/SPARK-44111)
Sub-task [SPARK-47008](h
38 matches
Mail list logo