yaooqinn commented on a change in pull request #31281:
URL: https://github.com/apache/spark/pull/31281#discussion_r563483203



##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/util/PartitioningUtils.scala
##########
@@ -30,14 +35,34 @@ object PartitioningUtils {
    */
   def normalizePartitionSpec[T](
       partitionSpec: Map[String, T],
-      partColNames: Seq[String],
+      partCols: StructType,
       tblName: String,
       resolver: Resolver): Map[String, T] = {
     val normalizedPartSpec = partitionSpec.toSeq.map { case (key, value) =>
-      val normalizedKey = partColNames.find(resolver(_, key)).getOrElse {
-        throw new AnalysisException(s"$key is not a valid partition column in 
table $tblName.")
+      val normalizedFiled = CharVarcharUtils.getRawSchema(partCols)

Review comment:
       fixed

##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/util/PartitioningUtils.scala
##########
@@ -30,14 +35,33 @@ object PartitioningUtils {
    */
   def normalizePartitionSpec[T](
       partitionSpec: Map[String, T],
-      partColNames: Seq[String],
+      partCols: StructType,
       tblName: String,
       resolver: Resolver): Map[String, T] = {
+    val rawSchema = CharVarcharUtils.getRawSchema(partCols)
     val normalizedPartSpec = partitionSpec.toSeq.map { case (key, value) =>
-      val normalizedKey = partColNames.find(resolver(_, key)).getOrElse {
+      val normalizedFiled = rawSchema.find(f => resolver(f.name, 
key)).getOrElse {
         throw new AnalysisException(s"$key is not a valid partition column in 
table $tblName.")
       }
-      normalizedKey -> value
+
+      val normalizedVal = normalizedFiled.dataType match {
+        case CharType(len) if value != null && value != DEFAULT_PARTITION_NAME 
=>
+          val v = value match {
+            case Some(v) => Some(charTypeWriteSideCheck(v.toString, len))
+            case None => None
+            case other => charTypeWriteSideCheck(other.toString, len)

Review comment:
       I got match errors after removing these 2;

##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/CharVarcharTestSuite.scala
##########
@@ -37,31 +37,109 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
     assert(CharVarcharUtils.getRawType(f.metadata) == Some(dt))
   }
 
-  test("char type values should be padded: top-level columns") {
+  test("char type values should be padded or trimmed: top-level columns") {
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format")
-      sql("INSERT INTO t VALUES ('1', 'a')")
-      checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
-      checkColType(spark.table("t").schema(1), CharType(5))
+      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        sql(s"INSERT OVERWRITE t VALUES ('1', '$v')")
+        checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
+        checkColType(spark.table("t").schema(1), CharType(5))
+      }
 
       sql("INSERT OVERWRITE t VALUES ('1', null)")
       checkAnswer(spark.table("t"), Row("1", null))
+
+      val e = intercept[SparkException](sql("INSERT OVERWRITE t VALUES ('1', 
'abcdef')"))

Review comment:
       yes checked that, I will remove all related

##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/CharVarcharTestSuite.scala
##########
@@ -37,31 +37,109 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
     assert(CharVarcharUtils.getRawType(f.metadata) == Some(dt))
   }
 
-  test("char type values should be padded: top-level columns") {
+  test("char type values should be padded or trimmed: top-level columns") {
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format")
-      sql("INSERT INTO t VALUES ('1', 'a')")
-      checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
-      checkColType(spark.table("t").schema(1), CharType(5))
+      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        sql(s"INSERT OVERWRITE t VALUES ('1', '$v')")
+        checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
+        checkColType(spark.table("t").schema(1), CharType(5))
+      }
 
       sql("INSERT OVERWRITE t VALUES ('1', null)")
       checkAnswer(spark.table("t"), Row("1", null))
+
+      val e = intercept[SparkException](sql("INSERT OVERWRITE t VALUES ('1', 
'abcdef')"))
+      assert(e.getCause.getMessage.contains("Exceeds char/varchar type length 
limitation: 5"))
     }
   }
 
-  test("char type values should be padded: partitioned columns") {
+  test("char type values should be padded or trimmed: partitioned columns") {
+    withTable("t") {
+      sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY 
(c)")
+      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        sql(s"INSERT OVERWRITE t VALUES ('1', '$v')")
+        checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
+        checkColType(spark.table("t").schema(1), CharType(5))
+      }
+      val e1 = intercept[SparkException](sql("INSERT OVERWRITE t VALUES ('1', 
'abcdef')"))
+      assert(e1.getCause.getMessage.contains("Exceeds char/varchar type length 
limitation: 5"))
+    }
+
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY 
(c)")
-      sql("INSERT INTO t VALUES ('1', 'a')")
-      checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
-      checkColType(spark.table("t").schema(1), CharType(5))
+      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        sql(s"INSERT INTO t VALUES ('1', '$v')")
+        checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
+        sql(s"ALTER TABLE t DROP PARTITION(c='$v')")

Review comment:
       good point, I will add one

##########
File path: 
sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
##########
@@ -988,7 +989,15 @@ private[hive] class HiveClientImpl(
 private[hive] object HiveClientImpl extends Logging {
   /** Converts the native StructField to Hive's FieldSchema. */
   def toHiveColumn(c: StructField): FieldSchema = {
-    val typeString = HiveVoidType.replaceVoidType(c.dataType).catalogString
+    val typeString = if 
(c.metadata.contains(CHAR_VARCHAR_TYPE_STRING_METADATA_KEY)) {

Review comment:
       HiveTableScanExec -> HiveTableRelation -> CatalogTable, we need this 
chage to covert it back to `HiveTable` properly

##########
File path: 
sql/hive/src/test/scala/org/apache/spark/sql/HiveCharVarcharTestSuite.scala
##########
@@ -50,6 +51,35 @@ class HiveCharVarcharTestSuite extends CharVarcharTestSuite 
with TestHiveSinglet
       assert(rest.contains("CHAR(5)"))
     }
   }
+
+  // TODO(SPARK-34203): Move this too super class when the ticket gets fixed

Review comment:
       I rechecked this, this is a valid case. moved to super. The null case 
seems an unrelated issue and need not to add a TODO here

##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/CharVarcharTestSuite.scala
##########
@@ -37,31 +37,134 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
     assert(CharVarcharUtils.getRawType(f.metadata) == Some(dt))
   }
 
-  test("char type values should be padded: top-level columns") {
+  test("char type values should be padded or trimmed: top-level columns") {
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format")
-      sql("INSERT INTO t VALUES ('1', 'a')")
-      checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
-      checkColType(spark.table("t").schema(1), CharType(5))
+      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        sql(s"INSERT OVERWRITE t VALUES ('1', '$v')")
+        checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
+        checkColType(spark.table("t").schema(1), CharType(5))
+      }
+
+      sql("INSERT OVERWRITE t VALUES ('1', null)")
+      checkAnswer(spark.table("t"), Row("1", null))
+    }
+  }
+
+  test("char type values should be padded or trimmed: partitioned columns") {
+    withTable("t") {
+      sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY 
(c)")
+      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        sql(s"INSERT OVERWRITE t VALUES ('1', '$v')")
+        checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
+        checkColType(spark.table("t").schema(1), CharType(5))
+      }
+    }
+
+    withTable("t") {
+      sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY 
(c)")
+      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        sql(s"INSERT INTO t VALUES ('1', '$v')")
+        checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
+        sql(s"ALTER TABLE t DROP PARTITION(c='$v')")
+        checkAnswer(spark.table("t"), Nil)
+      }
 
       sql("INSERT OVERWRITE t VALUES ('1', null)")
       checkAnswer(spark.table("t"), Row("1", null))
     }
+
+    withTable("t") {
+      sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY 
(c)")
+      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        sql(s"INSERT INTO t VALUES ('1', '$v')")
+        sql(s"ALTER TABLE t DROP PARTITION(c='a')")
+        checkAnswer(spark.table("t"), Nil)
+      }
+    }
   }
 
-  test("char type values should be padded: partitioned columns") {
+  test("char type values should be padded or trimmed: static partitioned 
columns") {
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY 
(c)")
-      sql("INSERT INTO t VALUES ('1', 'a')")
-      checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
-      checkColType(spark.table("t").schema(1), CharType(5))
+      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        sql(s"INSERT INTO t PARTITION (c ='$v') VALUES ('1')")
+        checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
+        checkColType(spark.table("t").schema(1), CharType(5))
+        sql(s"ALTER TABLE t DROP PARTITION(c='$v')")
+        checkAnswer(spark.table("t"), Nil)
+      }
+    }
+  }
+
+  test("oversize char/varchar values for alter table partition operations") {
+    Seq("CHAR(5)", "VARCHAR(5)").foreach { typ =>
+      withTable("t") {
+        sql(s"CREATE TABLE t(i STRING, c $typ) USING $format PARTITIONED BY 
(c)")
+        Seq("ADD", "DROP").foreach { op =>
+          val e = intercept[RuntimeException](sql(s"ALTER TABLE t $op 
PARTITION(c='abcdef')"))
+          assert(e.getMessage.contains("Exceeds char/varchar type length 
limitation: 5"))
+        }
+        val e1 = intercept[RuntimeException] {
+          sql(s"ALTER TABLE t PARTITION (c='abcdef') RENAME TO PARTITION 
(c='2')")
+        }
+        assert(e1.getMessage.contains("Exceeds char/varchar type length 
limitation: 5"))
+        val e2 = intercept[RuntimeException] {
+          sql(s"ALTER TABLE t PARTITION (c='1') RENAME TO PARTITION 
(c='abcdef')")
+        }
+        assert(e2.getMessage.contains("Exceeds char/varchar type length 
limitation: 5"))
+      }
+    }
+  }
+
+  test("varchar type values length check: partitioned columns dynamic") {
+    (0 to 5).foreach { n =>
+      withTable("t") {
+        sql(s"CREATE TABLE t(i STRING, c VARCHAR(5)) USING $format PARTITIONED 
BY (c)")
+        val v = "a" + " " * n
+        sql(s"INSERT INTO t VALUES ('1', '$v')")
+        checkAnswer(spark.table("t"), Row("1", "a" + " " * math.min(n, 4)))
+        checkColType(spark.table("t").schema(1), VarcharType(5))
+      }
+    }
 
-      sql("ALTER TABLE t DROP PARTITION(c='a')")
+    (0 to 5).foreach { n =>
+      withTable("t") {
+        sql(s"CREATE TABLE t(i STRING, c VARCHAR(5)) USING $format PARTITIONED 
BY (c)")
+        val v = "a" + " " * n
+        sql(s"INSERT INTO t VALUES ('1', '$v')")
+        checkAnswer(spark.table("t"), Row("1", "a" + " " * math.min(n, 4)))
+        sql(s"ALTER TABLE t DROP PARTITION(c='$v')")
+        checkAnswer(spark.table("t"), Nil)
+      }
+    }
+
+    withTable("t") {
+      sql(s"CREATE TABLE t(i STRING, c VARCHAR(5)) USING $format PARTITIONED 
BY (c)")
+      val e = intercept[RuntimeException](sql("ALTER TABLE t DROP 
PARTITION(c='abcdef')"))
+      assert(e.getMessage.contains("Exceeds char/varchar type length 
limitation: 5"))
       sql("INSERT OVERWRITE t VALUES ('1', null)")
       checkAnswer(spark.table("t"), Row("1", null))
     }
   }
 
+  test("varchar type values length check: partitioned columns of other types") 
{

Review comment:
       done

##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/CharVarcharTestSuite.scala
##########
@@ -37,31 +37,134 @@ trait CharVarcharTestSuite extends QueryTest with 
SQLTestUtils {
     assert(CharVarcharUtils.getRawType(f.metadata) == Some(dt))
   }
 
-  test("char type values should be padded: top-level columns") {
+  test("char type values should be padded or trimmed: top-level columns") {
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format")
-      sql("INSERT INTO t VALUES ('1', 'a')")
-      checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
-      checkColType(spark.table("t").schema(1), CharType(5))
+      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        sql(s"INSERT OVERWRITE t VALUES ('1', '$v')")
+        checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
+        checkColType(spark.table("t").schema(1), CharType(5))
+      }
+
+      sql("INSERT OVERWRITE t VALUES ('1', null)")
+      checkAnswer(spark.table("t"), Row("1", null))
+    }
+  }
+
+  test("char type values should be padded or trimmed: partitioned columns") {
+    withTable("t") {
+      sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY 
(c)")
+      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        sql(s"INSERT OVERWRITE t VALUES ('1', '$v')")
+        checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
+        checkColType(spark.table("t").schema(1), CharType(5))
+      }
+    }
+
+    withTable("t") {
+      sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY 
(c)")
+      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        sql(s"INSERT INTO t VALUES ('1', '$v')")
+        checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
+        sql(s"ALTER TABLE t DROP PARTITION(c='$v')")
+        checkAnswer(spark.table("t"), Nil)
+      }
 
       sql("INSERT OVERWRITE t VALUES ('1', null)")
       checkAnswer(spark.table("t"), Row("1", null))
     }
+
+    withTable("t") {
+      sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY 
(c)")
+      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        sql(s"INSERT INTO t VALUES ('1', '$v')")
+        sql(s"ALTER TABLE t DROP PARTITION(c='a')")
+        checkAnswer(spark.table("t"), Nil)
+      }
+    }
   }
 
-  test("char type values should be padded: partitioned columns") {
+  test("char type values should be padded or trimmed: static partitioned 
columns") {
     withTable("t") {
       sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY 
(c)")
-      sql("INSERT INTO t VALUES ('1', 'a')")
-      checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
-      checkColType(spark.table("t").schema(1), CharType(5))
+      (0 to 5).map(n => "a" + " " * n).foreach { v =>
+        sql(s"INSERT INTO t PARTITION (c ='$v') VALUES ('1')")
+        checkAnswer(spark.table("t"), Row("1", "a" + " " * 4))
+        checkColType(spark.table("t").schema(1), CharType(5))
+        sql(s"ALTER TABLE t DROP PARTITION(c='$v')")

Review comment:
       According to SQL standard, for varchar type, the tailing spaces within 
the length limit is valid, so this case should remain as it is.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to