This is an automated email from the ASF dual-hosted git repository.
lzljs3620320 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-paimon.git
The following commit(s) were added to refs/heads/master by this push:
new c93840149 [spark] Spark Show create table should show primary key
(#1524)
c93840149 is described below
commit c93840149518b5190f53f81c70b21d5a50f3cb08
Author: Jingsong Lee <[email protected]>
AuthorDate: Mon Jul 10 18:12:38 2023 +0800
[spark] Spark Show create table should show primary key (#1524)
---
.../src/main/java/org/apache/paimon/spark/SparkTable.java | 10 +++++++++-
.../src/test/java/org/apache/paimon/spark/SparkReadITCase.java | 8 +++++---
2 files changed, 14 insertions(+), 4 deletions(-)
diff --git
a/paimon-spark/paimon-spark-common/src/main/java/org/apache/paimon/spark/SparkTable.java
b/paimon-spark/paimon-spark-common/src/main/java/org/apache/paimon/spark/SparkTable.java
index 180e1fcec..b867bf3d4 100644
---
a/paimon-spark/paimon-spark-common/src/main/java/org/apache/paimon/spark/SparkTable.java
+++
b/paimon-spark/paimon-spark-common/src/main/java/org/apache/paimon/spark/SparkTable.java
@@ -18,6 +18,7 @@
package org.apache.paimon.spark;
+import org.apache.paimon.CoreOptions;
import org.apache.paimon.predicate.Predicate;
import org.apache.paimon.table.DataTable;
import org.apache.paimon.table.FileStoreTable;
@@ -40,6 +41,7 @@ import org.apache.spark.sql.util.CaseInsensitiveStringMap;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@@ -118,7 +120,13 @@ public class SparkTable
@Override
public Map<String, String> properties() {
if (table instanceof DataTable) {
- return ((DataTable) table).coreOptions().toMap();
+ Map<String, String> properties =
+ new HashMap<>(((DataTable) table).coreOptions().toMap());
+ if (table.primaryKeys().size() > 0) {
+ properties.put(
+ CoreOptions.PRIMARY_KEY.key(), String.join(",",
table.primaryKeys()));
+ }
+ return properties;
} else {
return Collections.emptyMap();
}
diff --git
a/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkReadITCase.java
b/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkReadITCase.java
index 05f54e86b..5841570a0 100644
---
a/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkReadITCase.java
+++
b/paimon-spark/paimon-spark-common/src/test/java/org/apache/paimon/spark/SparkReadITCase.java
@@ -246,7 +246,7 @@ public class SparkReadITCase extends SparkReadTestBase {
assertThat(spark.sql("SHOW CREATE TABLE
t_pk_as").collectAsList().toString())
.isEqualTo(
String.format(
- "[[%sTBLPROPERTIES (\n 'path' = '%s')\n]]",
+ "[[%sTBLPROPERTIES (\n 'path' = '%s',\n
'primary-key' = 'a')\n]]",
showCreateString("t_pk_as", "a BIGINT", "b
STRING", "c STRING"),
new Path(warehousePath,
"default.db/t_pk_as")));
List<Row> resultPk = spark.sql("SELECT * FROM
t_pk_as").collectAsList();
@@ -273,7 +273,8 @@ public class SparkReadITCase extends SparkReadTestBase {
"[[%s"
+ "PARTITIONED BY (dt)\n"
+ "TBLPROPERTIES (\n"
- + " 'path' = '%s')\n"
+ + " 'path' = '%s',\n"
+ + " 'primary-key' = 'dt,hh')\n"
+ "]]",
showCreateString(
"t_all_as",
@@ -358,7 +359,8 @@ public class SparkReadITCase extends SparkReadTestBase {
+ "COMMENT 'tbl comment'\n"
+ "TBLPROPERTIES (\n"
+ " 'k1' = 'v1',\n"
- + " 'path' = '%s')\n]]",
+ + " 'path' = '%s',\n"
+ + " 'primary-key' = 'a,b')\n]]",
showCreateString("tbl", "a INT COMMENT 'a
comment'", "b STRING"),
new Path(warehousePath, "default.db/tbl")));
}