Github user gatorsmile commented on a diff in the pull request:
https://github.com/apache/spark/pull/19841#discussion_r154490878
--- Diff:
sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
---
@@ -104,147 +105,153 @@ case class InsertIntoHiveTable(
val partitionColumns =
fileSinkConf.getTableInfo.getProperties.getProperty("partition_columns")
val partitionColumnNames =
Option(partitionColumns).map(_.split("/")).getOrElse(Array.empty)
- // By this time, the partition map must match the table's partition
columns
- if (partitionColumnNames.toSet != partition.keySet) {
- throw new SparkException(
- s"""Requested partitioning does not match the
${table.identifier.table} table:
- |Requested partitions: ${partition.keys.mkString(",")}
- |Table partitions:
${table.partitionColumnNames.mkString(",")}""".stripMargin)
- }
-
- // Validate partition spec if there exist any dynamic partitions
- if (numDynamicPartitions > 0) {
- // Report error if dynamic partitioning is not enabled
- if (!hadoopConf.get("hive.exec.dynamic.partition",
"true").toBoolean) {
- throw new
SparkException(ErrorMsg.DYNAMIC_PARTITION_DISABLED.getMsg)
+ def processInsert = {
--- End diff --
Generally, we do not like nested method.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]