This is an automated email from the ASF dual-hosted git repository. srowen pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push: new 02bbe97 [MINOR] Remove unnecessary gets when getting a value from map. 02bbe97 is described below commit 02bbe977abaf7006b845a7e99d612b0235aa0025 Author: liuxian <liu.xi...@zte.com.cn> AuthorDate: Fri Mar 1 11:48:07 2019 -0600 [MINOR] Remove unnecessary gets when getting a value from map. ## What changes were proposed in this pull request? Redundant `get` when getting a value from `Map` given a key. ## How was this patch tested? N/A Closes #23901 from 10110346/removegetfrommap. Authored-by: liuxian <liu.xi...@zte.com.cn> Signed-off-by: Sean Owen <sean.o...@databricks.com> --- .../apache/spark/deploy/SparkSubmitArguments.scala | 2 +- .../org/apache/spark/deploy/master/Master.scala | 2 +- .../org/apache/spark/deploy/worker/Worker.scala | 2 +- .../org/apache/spark/ui/ConsoleProgressBar.scala | 6 +----- .../org/apache/spark/ui/storage/RDDPage.scala | 2 +- .../sort/BypassMergeSortShuffleWriterSuite.scala | 2 +- .../spark/util/TimeStampedHashMapSuite.scala | 12 +++++------ .../spark/sql/kafka010/KafkaMicroBatchStream.scala | 2 +- .../apache/spark/sql/kafka010/KafkaRelation.scala | 9 ++++---- .../apache/spark/sql/kafka010/KafkaSource.scala | 14 ++++++------- .../apache/spark/ml/feature/Word2VecSuite.scala | 4 ++-- .../org/apache/spark/sql/types/Metadata.scala | 2 +- .../catalyst/expressions/CodeGenerationSuite.scala | 4 ++-- .../spark/sql/execution/stat/StatFunctions.scala | 2 +- .../spark/sql/execution/ui/SparkPlanGraph.scala | 2 +- .../execution/ui/SQLAppStatusListenerSuite.scala | 2 +- .../spark/sql/sources/v2/DataSourceV2Suite.scala | 2 +- .../sql/streaming/StreamingAggregationSuite.scala | 24 +++++++++++----------- .../spark/sql/hive/client/HiveClientImpl.scala | 6 ++---- 19 files changed, 46 insertions(+), 55 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala index e23d1f8..f8c5330 100644 --- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala +++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala @@ -512,7 +512,7 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S if (unknownParam != null) { logInfo("Unknown/unsupported param " + unknownParam) } - val command = sys.env.get("_SPARK_CMD_USAGE").getOrElse( + val command = sys.env.getOrElse("_SPARK_CMD_USAGE", """Usage: spark-submit [options] <app jar | python file | R file> [app arguments] |Usage: spark-submit --kill [submission ID] --master [spark://...] |Usage: spark-submit --status [submission ID] --master [spark://...] diff --git a/core/src/main/scala/org/apache/spark/deploy/master/Master.scala b/core/src/main/scala/org/apache/spark/deploy/master/Master.scala index 3dd804b..a5aecd2 100644 --- a/core/src/main/scala/org/apache/spark/deploy/master/Master.scala +++ b/core/src/main/scala/org/apache/spark/deploy/master/Master.scala @@ -370,7 +370,7 @@ private[deploy] class Master( val validExecutors = executors.filter(exec => idToApp.get(exec.appId).isDefined) for (exec <- validExecutors) { - val app = idToApp.get(exec.appId).get + val app = idToApp(exec.appId) val execInfo = app.addExecutor(worker, exec.cores, Some(exec.execId)) worker.addExecutor(execInfo) execInfo.copyState(exec) diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala b/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala index 5f7ca5c..49cda0f 100755 --- a/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala +++ b/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala @@ -130,7 +130,7 @@ private[deploy] class Worker( assert(sys.props.contains("spark.test.home"), "spark.test.home is not set!") new File(sys.props("spark.test.home")) } else { - new File(sys.env.get("SPARK_HOME").getOrElse(".")) + new File(sys.env.getOrElse("SPARK_HOME", ".")) } var workDir: File = null diff --git a/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala b/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala index f0ae26e..f36b31c 100644 --- a/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala +++ b/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala @@ -39,11 +39,7 @@ private[spark] class ConsoleProgressBar(sc: SparkContext) extends Logging { private val firstDelayMSec = 500L // The width of terminal - private val TerminalWidth = if (!sys.env.getOrElse("COLUMNS", "").isEmpty) { - sys.env.get("COLUMNS").get.toInt - } else { - 80 - } + private val TerminalWidth = sys.env.getOrElse("COLUMNS", "80").toInt private var lastFinishTime = 0L private var lastUpdateTime = 0L diff --git a/core/src/main/scala/org/apache/spark/ui/storage/RDDPage.scala b/core/src/main/scala/org/apache/spark/ui/storage/RDDPage.scala index dde441a..4bd3bbd 100644 --- a/core/src/main/scala/org/apache/spark/ui/storage/RDDPage.scala +++ b/core/src/main/scala/org/apache/spark/ui/storage/RDDPage.scala @@ -189,7 +189,7 @@ private[ui] class BlockDataSource( rddPartition.memoryUsed, rddPartition.diskUsed, rddPartition.executors - .map { id => executorIdToAddress.get(id).getOrElse(id) } + .map { id => executorIdToAddress.getOrElse(id, id) } .sorted .mkString(" ")) } diff --git a/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala b/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala index 7f956c2..72a1a4f 100644 --- a/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala +++ b/core/src/test/scala/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriterSuite.scala @@ -115,7 +115,7 @@ class BypassMergeSortShuffleWriterSuite extends SparkFunSuite with BeforeAndAfte when(diskBlockManager.getFile(any[BlockId])).thenAnswer( new Answer[File] { override def answer(invocation: InvocationOnMock): File = { - blockIdToFileMap.get(invocation.getArguments.head.asInstanceOf[BlockId]).get + blockIdToFileMap(invocation.getArguments.head.asInstanceOf[BlockId]) } }) } diff --git a/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala b/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala index dcae789..77a92e7 100644 --- a/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala +++ b/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala @@ -73,14 +73,14 @@ class TimeStampedHashMapSuite extends SparkFunSuite { // put, get, and apply testMap1 += (("k1", "v1")) assert(testMap1.get("k1").isDefined) - assert(testMap1.get("k1").get === "v1") + assert(testMap1("k1") === "v1") testMap1("k2") = "v2" assert(testMap1.get("k2").isDefined) - assert(testMap1.get("k2").get === "v2") + assert(testMap1("k2") === "v2") assert(testMap1("k2") === "v2") testMap1.update("k3", "v3") assert(testMap1.get("k3").isDefined) - assert(testMap1.get("k3").get === "v3") + assert(testMap1("k3") === "v3") // remove testMap1.remove("k1") @@ -121,15 +121,15 @@ class TimeStampedHashMapSuite extends SparkFunSuite { val testMap3 = testMap2 + (("k0", "v0")) assert(testMap3.size === 2) assert(testMap3.get("k1").isDefined) - assert(testMap3.get("k1").get === "v1") + assert(testMap3("k1") === "v1") assert(testMap3.get("k0").isDefined) - assert(testMap3.get("k0").get === "v0") + assert(testMap3("k0") === "v0") // - val testMap4 = testMap3 - "k0" assert(testMap4.size === 1) assert(testMap4.get("k1").isDefined) - assert(testMap4.get("k1").get === "v1") + assert(testMap4("k1") === "v1") } } diff --git a/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchStream.scala b/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchStream.scala index 337a51e..a630346 100644 --- a/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchStream.scala +++ b/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchStream.scala @@ -228,7 +228,7 @@ private[kafka010] class KafkaMicroBatchStream( until.map { case (tp, end) => tp -> sizes.get(tp).map { size => - val begin = from.get(tp).getOrElse(fromNew(tp)) + val begin = from.getOrElse(tp, fromNew(tp)) val prorate = limit * (size / total) // Don't completely starve small topicpartitions val prorateLong = (if (prorate < 1) Math.ceil(prorate) else Math.floor(prorate)).toLong diff --git a/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaRelation.scala b/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaRelation.scala index e6f9d12..9effa29 100644 --- a/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaRelation.scala +++ b/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaRelation.scala @@ -89,11 +89,10 @@ private[kafka010] class KafkaRelation( // Calculate offset ranges val offsetRanges = untilPartitionOffsets.keySet.map { tp => - val fromOffset = fromPartitionOffsets.get(tp).getOrElse { - // This should not happen since topicPartitions contains all partitions not in - // fromPartitionOffsets - throw new IllegalStateException(s"$tp doesn't have a from offset") - } + val fromOffset = fromPartitionOffsets.getOrElse(tp, + // This should not happen since topicPartitions contains all partitions not in + // fromPartitionOffsets + throw new IllegalStateException(s"$tp doesn't have a from offset")) val untilOffset = untilPartitionOffsets(tp) KafkaSourceRDDOffsetRange(tp, fromOffset, untilOffset, None) }.toArray diff --git a/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaSource.scala b/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaSource.scala index 624c796..82d746e 100644 --- a/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaSource.scala +++ b/external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaSource.scala @@ -187,7 +187,7 @@ private[kafka010] class KafkaSource( until.map { case (tp, end) => tp -> sizes.get(tp).map { size => - val begin = from.get(tp).getOrElse(fromNew(tp)) + val begin = from.getOrElse(tp, fromNew(tp)) val prorate = limit * (size / total) logDebug(s"rateLimit $tp prorated amount is $prorate") // Don't completely starve small topicpartitions @@ -273,13 +273,11 @@ private[kafka010] class KafkaSource( // Calculate offset ranges val offsetRanges = topicPartitions.map { tp => - val fromOffset = fromPartitionOffsets.get(tp).getOrElse { - newPartitionOffsets.getOrElse(tp, { - // This should not happen since newPartitionOffsets contains all partitions not in - // fromPartitionOffsets - throw new IllegalStateException(s"$tp doesn't have a from offset") - }) - } + val fromOffset = fromPartitionOffsets.getOrElse(tp, newPartitionOffsets.getOrElse(tp, { + // This should not happen since newPartitionOffsets contains all partitions not in + // fromPartitionOffsets + throw new IllegalStateException(s"$tp doesn't have a from offset") + })) val untilOffset = untilPartitionOffsets(tp) val preferredLoc = if (numExecutors > 0) { // This allows cached KafkaConsumers in the executors to be re-used to read the same diff --git a/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala index b59c4e7..70d1177 100644 --- a/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala +++ b/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala @@ -130,14 +130,14 @@ class Word2VecSuite extends MLTest with DefaultReadWriteTest { expected.foreach { case (expectedSynonym, expectedSimilarity) => assert(findSynonymsResult.contains(expectedSynonym)) - assert(expectedSimilarity ~== findSynonymsResult.get(expectedSynonym).get absTol 1E-5) + assert(expectedSimilarity ~== findSynonymsResult(expectedSynonym) absTol 1E-5) } val findSynonymsArrayResult = model.findSynonymsArray("a", 2).toMap findSynonymsResult.foreach { case (expectedSynonym, expectedSimilarity) => assert(findSynonymsArrayResult.contains(expectedSynonym)) - assert(expectedSimilarity ~== findSynonymsArrayResult.get(expectedSynonym).get absTol 1E-5) + assert(expectedSimilarity ~== findSynonymsArrayResult(expectedSynonym) absTol 1E-5) } } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Metadata.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Metadata.scala index b6a859b..982f624 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Metadata.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Metadata.scala @@ -88,7 +88,7 @@ sealed class Metadata private[types] (private[types] val map: Map[String, Any]) map.keysIterator.forall { key => that.map.get(key) match { case Some(otherValue) => - val ourValue = map.get(key).get + val ourValue = map(key) (ourValue, otherValue) match { case (v0: Array[Long], v1: Array[Long]) => java.util.Arrays.equals(v0, v1) case (v0: Array[Double], v1: Array[Double]) => java.util.Arrays.equals(v0, v1) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CodeGenerationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CodeGenerationSuite.scala index 7e6fe5b..baa1b3b 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CodeGenerationSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CodeGenerationSuite.scala @@ -424,7 +424,7 @@ class CodeGenerationSuite extends SparkFunSuite with ExpressionEvalHelper { assert(ctx1.inlinedMutableStates.size == CodeGenerator.OUTER_CLASS_VARIABLES_THRESHOLD) // When the number of primitive type mutable states is over the threshold, others are // allocated into an array - assert(ctx1.arrayCompactedMutableStates.get(CodeGenerator.JAVA_INT).get.arrayNames.size == 1) + assert(ctx1.arrayCompactedMutableStates(CodeGenerator.JAVA_INT).arrayNames.size == 1) assert(ctx1.mutableStateInitCode.size == CodeGenerator.OUTER_CLASS_VARIABLES_THRESHOLD + 10) val ctx2 = new CodegenContext @@ -434,7 +434,7 @@ class CodeGenerationSuite extends SparkFunSuite with ExpressionEvalHelper { // When the number of non-primitive type mutable states is over the threshold, others are // allocated into a new array assert(ctx2.inlinedMutableStates.isEmpty) - assert(ctx2.arrayCompactedMutableStates.get("InternalRow[]").get.arrayNames.size == 2) + assert(ctx2.arrayCompactedMutableStates("InternalRow[]").arrayNames.size == 2) assert(ctx2.arrayCompactedMutableStates("InternalRow[]").getCurrentIndex == 10) assert(ctx2.mutableStateInitCode.size == CodeGenerator.MUTABLESTATEARRAY_SIZE_LIMIT + 10) } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala index ac25a8f..a6c9c29 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/StatFunctions.scala @@ -203,7 +203,7 @@ object StatFunctions extends Logging { // row.get(0) is column 1 // row.get(1) is column 2 // row.get(2) is the frequency - val columnIndex = distinctCol2.get(cleanElement(row.get(1))).get + val columnIndex = distinctCol2(cleanElement(row.get(1))) countsRow.setLong(columnIndex + 1, row.getLong(2)) } // the value of col1 is the first value, the rest are the counts diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala index e57d080..838ee76 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala @@ -184,7 +184,7 @@ private[ui] class SparkPlanGraphCluster( require(duration.length == 1) val id = duration(0).accumulatorId if (metricsValue.contains(duration(0).accumulatorId)) { - name + "\n\n" + metricsValue.get(id).get + name + "\n\n" + metricsValue(id) } else { name } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala index 4c008c4..c8d862c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala @@ -140,7 +140,7 @@ class SQLAppStatusListenerSuite extends SparkFunSuite with SharedSQLContext with // TODO: this is brittle. There is no requirement that the actual string needs to start // with the accumulator value. assert(actual.contains(id)) - val v = actual.get(id).get.trim + val v = actual(id).trim assert(v.startsWith(value.toString), s"Wrong value for accumulator $id") } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/v2/DataSourceV2Suite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/v2/DataSourceV2Suite.scala index b857244..e184bf5 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/sources/v2/DataSourceV2Suite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/v2/DataSourceV2Suite.scala @@ -349,7 +349,7 @@ class DataSourceV2Suite extends QueryTest with SharedSQLContext { val options = df.queryExecution.optimizedPlan.collectFirst { case d: DataSourceV2Relation => d.options }.get - assert(options.get(optionName).get == "false") + assert(options(optionName) === "false") } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala index 97dbb9b..116fd74 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala @@ -207,15 +207,15 @@ class StreamingAggregationSuite extends StateStoreMetricsTest with Assertions { AddData(inputData, 1), CheckLastBatch((1, 1), (2, 1)), AssertOnQuery { _.stateNodes.size === 1 }, - AssertOnQuery { _.stateNodes.head.metrics.get("numOutputRows").get.value === 2 }, - AssertOnQuery { _.stateNodes.head.metrics.get("numUpdatedStateRows").get.value === 2 }, - AssertOnQuery { _.stateNodes.head.metrics.get("numTotalStateRows").get.value === 2 }, + AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 2 }, + AssertOnQuery { _.stateNodes.head.metrics("numUpdatedStateRows").value === 2 }, + AssertOnQuery { _.stateNodes.head.metrics("numTotalStateRows").value === 2 }, AddData(inputData, 2, 3), CheckLastBatch((2, 2), (3, 2), (4, 1)), AssertOnQuery { _.stateNodes.size === 1 }, - AssertOnQuery { _.stateNodes.head.metrics.get("numOutputRows").get.value === 3 }, - AssertOnQuery { _.stateNodes.head.metrics.get("numUpdatedStateRows").get.value === 3 }, - AssertOnQuery { _.stateNodes.head.metrics.get("numTotalStateRows").get.value === 4 } + AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 3 }, + AssertOnQuery { _.stateNodes.head.metrics("numUpdatedStateRows").value === 3 }, + AssertOnQuery { _.stateNodes.head.metrics("numTotalStateRows").value === 4 } ) // Test with Complete mode @@ -224,15 +224,15 @@ class StreamingAggregationSuite extends StateStoreMetricsTest with Assertions { AddData(inputData, 1), CheckLastBatch((1, 1), (2, 1)), AssertOnQuery { _.stateNodes.size === 1 }, - AssertOnQuery { _.stateNodes.head.metrics.get("numOutputRows").get.value === 2 }, - AssertOnQuery { _.stateNodes.head.metrics.get("numUpdatedStateRows").get.value === 2 }, - AssertOnQuery { _.stateNodes.head.metrics.get("numTotalStateRows").get.value === 2 }, + AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 2 }, + AssertOnQuery { _.stateNodes.head.metrics("numUpdatedStateRows").value === 2 }, + AssertOnQuery { _.stateNodes.head.metrics("numTotalStateRows").value === 2 }, AddData(inputData, 2, 3), CheckLastBatch((1, 1), (2, 2), (3, 2), (4, 1)), AssertOnQuery { _.stateNodes.size === 1 }, - AssertOnQuery { _.stateNodes.head.metrics.get("numOutputRows").get.value === 4 }, - AssertOnQuery { _.stateNodes.head.metrics.get("numUpdatedStateRows").get.value === 3 }, - AssertOnQuery { _.stateNodes.head.metrics.get("numTotalStateRows").get.value === 4 } + AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 4 }, + AssertOnQuery { _.stateNodes.head.metrics("numUpdatedStateRows").value === 3 }, + AssertOnQuery { _.stateNodes.head.metrics("numTotalStateRows").value === 4 } ) } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala index 77ac606..2ca54af 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala @@ -996,10 +996,8 @@ private[hive] object HiveClientImpl { ht: HiveTable): HivePartition = { val tpart = new org.apache.hadoop.hive.metastore.api.Partition val partValues = ht.getPartCols.asScala.map { hc => - p.spec.get(hc.getName).getOrElse { - throw new IllegalArgumentException( - s"Partition spec is missing a value for column '${hc.getName}': ${p.spec}") - } + p.spec.getOrElse(hc.getName, throw new IllegalArgumentException( + s"Partition spec is missing a value for column '${hc.getName}': ${p.spec}")) } val storageDesc = new StorageDescriptor val serdeInfo = new SerDeInfo --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org