This is an automated email from the ASF dual-hosted git repository.
ulyssesyou pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kyuubi.git
The following commit(s) were added to refs/heads/master by this push:
new 7feb53566 [KYUUBI #5028] Update session hadoop conf to catalog hadoop
conf
7feb53566 is described below
commit 7feb5356681f0ce87a0da430ab91b3398ffccbfc
Author: zhaomin <[email protected]>
AuthorDate: Thu Jul 6 18:25:12 2023 +0800
[KYUUBI #5028] Update session hadoop conf to catalog hadoop conf
### _Why are the changes needed?_
### _How was this patch tested?_
- [ ] Add some test cases that check the changes thoroughly including
negative and positive cases if possible
- [ ] Add screenshots for manual tests if appropriate
- [x] [Run
test](https://kyuubi.readthedocs.io/en/master/contributing/code/testing.html#running-tests)
locally before make a pull request
Closes #5028 from zhaomin1423/fix_hive_connector.
Closes #5028
d9c7e9c8a [zhaomin] Update session hadoop conf to catalog hadoop conf
Authored-by: zhaomin <[email protected]>
Signed-off-by: ulyssesyou <[email protected]>
---
.../kyuubi/spark/connector/hive/read/HiveFileIndex.scala | 12 ++++++++----
.../apache/kyuubi/spark/connector/hive/read/HiveScan.scala | 2 +-
2 files changed, 9 insertions(+), 5 deletions(-)
diff --git
a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveFileIndex.scala
b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveFileIndex.scala
index 937a9557d..a399cc302 100644
---
a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveFileIndex.scala
+++
b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveFileIndex.scala
@@ -21,6 +21,7 @@ import java.net.URI
import scala.collection.mutable
+import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.hive.ql.metadata.{Partition => HivePartition, Table}
import org.apache.spark.sql.SparkSession
@@ -37,7 +38,7 @@ import
org.apache.kyuubi.spark.connector.hive.{HiveTableCatalog, KyuubiHiveConne
class HiveCatalogFileIndex(
sparkSession: SparkSession,
val catalogTable: CatalogTable,
- hiveCatalog: HiveTableCatalog,
+ val hiveCatalog: HiveTableCatalog,
override val sizeInBytes: Long)
extends PartitioningAwareFileIndex(
sparkSession,
@@ -99,14 +100,16 @@ class HiveCatalogFileIndex(
userSpecifiedSchema = Some(partitionSpec.partitionColumns),
fileStatusCache = fileStatusCache,
userSpecifiedPartitionSpec = Some(partitionSpec),
- metadataOpsTimeNs = Some(timeNs))
+ metadataOpsTimeNs = Some(timeNs),
+ hadoopConf = hiveCatalog.hadoopConfiguration())
} else {
new HiveInMemoryFileIndex(
sparkSession = sparkSession,
rootPathsSpecified = rootPaths,
parameters = table.properties,
userSpecifiedSchema = None,
- fileStatusCache = fileStatusCache)
+ fileStatusCache = fileStatusCache,
+ hadoopConf = hiveCatalog.hadoopConfiguration())
}
}
@@ -142,7 +145,8 @@ class HiveInMemoryFileIndex(
partPathToBindHivePart: Map[PartitionPath, HivePartition] = Map.empty,
fileStatusCache: FileStatusCache = NoopCache,
userSpecifiedPartitionSpec: Option[PartitionSpec] = None,
- override val metadataOpsTimeNs: Option[Long] = None)
+ override val metadataOpsTimeNs: Option[Long] = None,
+ override protected val hadoopConf: Configuration)
extends InMemoryFileIndex(
sparkSession,
rootPathsSpecified,
diff --git
a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveScan.scala
b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveScan.scala
index e71e428b7..5895ecf03 100644
---
a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveScan.scala
+++
b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveScan.scala
@@ -55,7 +55,7 @@ case class HiveScan(
private val partFileToHivePartMap: mutable.Map[PartitionedFile,
HivePartition] = mutable.Map()
override def createReaderFactory(): PartitionReaderFactory = {
- val hiveConf = sparkSession.sessionState.newHadoopConf()
+ val hiveConf = fileIndex.hiveCatalog.hadoopConfiguration()
addCatalogTableConfToConf(hiveConf, catalogTable)
val table = HiveClientImpl.toHiveTable(catalogTable)