imback82 commented on code in PR #42577:
URL: https://github.com/apache/spark/pull/42577#discussion_r1385921893


##########
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/interface.scala:
##########
@@ -170,6 +174,55 @@ case class CatalogTablePartition(
   }
 }
 
+/**
+ * A container for clustering information.
+ *
+ * @param columnNames the names of the columns used for clustering.
+ */
+case class ClusterBySpec(columnNames: Seq[NamedReference]) {
+  override def toString: String = toJson
+
+  def toJson: String = 
ClusterBySpec.mapper.writeValueAsString(columnNames.map(_.fieldNames))
+}
+
+object ClusterBySpec {
+  private val mapper = {
+    val ret = new ObjectMapper() with ClassTagExtensions
+    ret.setSerializationInclusion(Include.NON_ABSENT)
+    ret.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
+    ret.registerModule(DefaultScalaModule)
+    ret
+  }
+
+  def fromProperty(columns: String): ClusterBySpec = {
+    
ClusterBySpec(mapper.readValue[Seq[Seq[String]]](columns).map(FieldReference(_)))

Review Comment:
   Alternatively I could have serialized `FieldReference`, but this approach is 
more generic.



##########
sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/CreateTableClusterBySuite.scala:
##########
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.command.v1
+
+import org.apache.spark.sql.AnalysisException
+import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
+import org.apache.spark.sql.catalyst.catalog.ClusterBySpec
+import 
org.apache.spark.sql.connector.catalog.CatalogManager.SESSION_CATALOG_NAME
+import org.apache.spark.sql.execution.command
+
+/**
+ * This base suite contains unified tests for the `CREATE TABLE ... CLUSTER 
BY` command that
+ * checks V1 table catalogs. The tests that cannot run for all V1 catalogs are 
located in more
+ * specific test suites:
+ *
+ *   - V1 In-Memory catalog: 
`org.apache.spark.sql.execution.command.v1.CreateTableClusterBySuite`
+ *   - V1 Hive External catalog:
+ *     `org.apache.spark.sql.hive.execution.command.CreateTableClusterBySuite`
+ */
+trait CreateTableClusterBySuiteBase extends 
command.CreateTableClusterBySuiteBase
+    with command.TestsV1AndV2Commands {
+  override def validateClusterBy(
+      tableIdent: TableIdentifier, clusteringColumns: Seq[String]): Unit = {
+    val catalog = spark.sessionState.catalog
+    val table = catalog.getTableMetadata(tableIdent)
+    assert(table.clusterBySpec ===
+      Some(ClusterBySpec(clusteringColumns.map(UnresolvedAttribute(_)))))
+  }
+}
+
+/**
+ * The class contains tests for the `CREATE TABLE ... CLUSTER BY` command to 
check V1 In-Memory
+ * table catalog.
+ */
+class CreateTableClusterBySuite extends CreateTableClusterBySuiteBase
+    with CommandSuiteBase {
+  override def commandVersion: String = 
super[CreateTableClusterBySuiteBase].commandVersion
+
+  test("clustering columns not defined in schema") {
+    withNamespaceAndTable("ns", "table") { tbl =>
+      checkError(
+        exception = intercept[AnalysisException](
+          sql(s"CREATE TABLE $tbl (id bigint, data string) $defaultUsing 
CLUSTER BY (unknown)")),
+        errorClass = "COLUMN_NOT_DEFINED_IN_TABLE",

Review Comment:
   now that the logic follows the same as in `PreprocessTableCreation`, the 
error message is unitifed:   
https://github.com/apache/spark/pull/42577/files#diff-f2a04f920c41d18a7d387216f86405bfdc6fb09c44ebe1bb09312ba7dde55333R216



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to