Github user cloud-fan commented on a diff in the pull request:

    https://github.com/apache/spark/pull/19602#discussion_r192576715
  
    --- Diff: 
sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala 
---
    @@ -59,38 +62,62 @@ class HiveClientSuite(version: String)
             "h" -> h.toString,
             "chunk" -> chunk
           ), storageFormat)
    -    assert(partitions.size == testPartitionCount)
    +    assert(partitions0.size == testPartitionCount0)
    +
    +    client.createPartitions(
    +      "default", "test0", partitions0, ignoreIfExists = false)
    +
    +    val partitions1 =
    +      for {
    +        pt <- 0 until 10
    +        chunk <- Seq("aa", "ab", "ba", "bb")
    +      } yield CatalogTablePartition(Map(
    +        "pt" -> pt.toString,
    +        "chunk" -> chunk
    +      ), storageFormat)
    +    assert(partitions1.size == testPartitionCount1)
     
         client.createPartitions(
    -      "default", "test", partitions, ignoreIfExists = false)
    +      "default", "test1", partitions1, ignoreIfExists = false)
    +
         client
       }
     
    +  private def pAttr(table: String, name: String): Attribute = {
    +    val partTypes = client.getTable("default", 
table).partitionSchema.fields
    +        .map(field => (field.name, field.dataType)).toMap
    +    partTypes.get(name) match {
    --- End diff --
    
    building a map and loop up only once is a waste. We should move the map 
outside, or look up linearly.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to