wuchong commented on code in PR #1726:
URL: https://github.com/apache/fluss/pull/1726#discussion_r2367279785


##########
fluss-flink/fluss-flink-2.1/src/test/java/org/apache/fluss/flink/catalog/Flink21CatalogITCase.java:
##########
@@ -17,5 +17,267 @@
 
 package org.apache.fluss.flink.catalog;
 
+import org.apache.flink.table.api.DataTypes;
+import org.apache.flink.table.api.Schema;
+import org.apache.flink.table.catalog.CatalogTable;
+import org.apache.flink.table.catalog.ObjectPath;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.fluss.flink.FlinkConnectorOptions.BUCKET_KEY;
+import static org.apache.fluss.flink.FlinkConnectorOptions.BUCKET_NUMBER;
+import static org.assertj.core.api.Assertions.assertThat;
+
 /** IT case for catalog in Flink 2.1. */
-public class Flink21CatalogITCase extends FlinkCatalogITCase {}
+public class Flink21CatalogITCase extends FlinkCatalogITCase {
+
+    @BeforeAll
+    static void beforeAll() {
+        FlinkCatalogITCase.beforeAll();
+
+        // close the old one and open a new one later
+        catalog.close();
+
+        catalog =
+                new Flink21Catalog(
+                        catalog.catalogName,
+                        catalog.defaultDatabase,
+                        catalog.bootstrapServers,
+                        catalog.classLoader,
+                        catalog.securityConfigs);
+        catalog.open();
+    }
+
+    @Test
+    @Override
+    void testCreateTable() throws Exception {
+        // create a table will all supported data types
+        tEnv.executeSql(
+                "create table test_table "
+                        + "(a int not null primary key not enforced,"
+                        + " b CHAR(3),"
+                        + " c STRING not null COMMENT 'STRING COMMENT',"
+                        + " d STRING,"
+                        + " e BOOLEAN,"
+                        + " f BINARY(2),"
+                        + " g BYTES COMMENT 'BYTES',"
+                        + " h BYTES,"
+                        + " i DECIMAL(12, 2),"
+                        + " j TINYINT,"
+                        + " k SMALLINT,"
+                        + " l BIGINT,"
+                        + " m FLOAT,"
+                        + " n DOUBLE,"
+                        + " o DATE,"
+                        + " p TIME,"
+                        + " q TIMESTAMP,"
+                        + " r TIMESTAMP_LTZ,"
+                        + " s ROW<a INT>) COMMENT 'a test table'");
+        Schema.Builder schemaBuilder = Schema.newBuilder();
+        schemaBuilder
+                .column("a", DataTypes.INT().notNull())
+                .column("b", DataTypes.CHAR(3))
+                .column("c", DataTypes.STRING().notNull())
+                .withComment("STRING COMMENT")
+                .column("d", DataTypes.STRING())
+                .column("e", DataTypes.BOOLEAN())
+                .column("f", DataTypes.BINARY(2))
+                .column("g", DataTypes.BYTES())
+                .withComment("BYTES")
+                .column("h", DataTypes.BYTES())
+                .column("i", DataTypes.DECIMAL(12, 2))
+                .column("j", DataTypes.TINYINT())
+                .column("k", DataTypes.SMALLINT())
+                .column("l", DataTypes.BIGINT())
+                .column("m", DataTypes.FLOAT())
+                .column("n", DataTypes.DOUBLE())
+                .column("o", DataTypes.DATE())
+                .column("p", DataTypes.TIME())
+                .column("q", DataTypes.TIMESTAMP())
+                .column("r", DataTypes.TIMESTAMP_LTZ())
+                .column("s", DataTypes.ROW(DataTypes.FIELD("a", 
DataTypes.INT())))
+                .primaryKey("a")
+                .index("a");

Review Comment:
   We should avoid duplicating test content, otherwise, it's hard to maintain 
the code. It seems the only difference with the base test is the `index("a")`, 
can we add a base method `addDefaultIndexKey(SchemaBuilder)` in the parent 
class with nothing to do in default implementation but overrirde in this class? 



##########
fluss-flink/fluss-flink-2.1/src/test/java/org/apache/fluss/flink/source/Flink21TableSourceITCase.java:
##########
@@ -17,5 +17,127 @@
 
 package org.apache.fluss.flink.source;
 
+import org.apache.fluss.metadata.TablePath;
+import org.apache.fluss.row.InternalRow;
+
+import org.apache.flink.table.api.config.ExecutionConfigOptions;
+import org.apache.flink.table.api.config.OptimizerConfigOptions;
+import org.apache.flink.types.Row;
+import org.apache.flink.util.CloseableIterator;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.List;
+
+import static 
org.apache.fluss.flink.source.testutils.FlinkRowAssertionsUtils.assertResultsIgnoreOrder;
+import static org.apache.fluss.flink.utils.FlinkTestBase.writeRows;
+import static org.apache.fluss.testutils.DataTestUtils.row;
+import static org.assertj.core.api.Assertions.assertThat;
+
 /** IT case for {@link FlinkTableSource} in Flink 2.1. */
-public class Flink21TableSourceITCase extends FlinkTableSourceITCase {}
+public class Flink21TableSourceITCase extends FlinkTableSourceITCase {
+
+    @Test
+    void testDeltaJoin() throws Exception {
+        
tEnv.getConfig().set(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM,
 2);
+
+        String leftTableName = "left_table";
+        tEnv.executeSql(
+                String.format(
+                        "create table %s ( "
+                                + " a1 int, "
+                                + " b1 varchar, "
+                                + " c1 bigint, "
+                                + " d1 int, "
+                                + " e1 bigint, "
+                                + " primary key (c1, d1) NOT ENFORCED"
+                                + ") with ("
+                                + " 'connector' = 'fluss', "
+                                + " 'bucket.key' = 'c1', "
+                                // currently, delta join only support 
append-only source
+                                + " 'table.merge-engine' = 'first_row' "
+                                + ")",
+                        leftTableName));
+        List<InternalRow> rows1 =
+                Arrays.asList(
+                        row(1, "v1", 100L, 1, 10000L),
+                        row(2, "v2", 200L, 2, 20000L),
+                        // dropped because of first row
+                        row(3, "v1", 300L, 3, 30000L),
+                        row(4, "v4", 400L, 4, 40000L));
+        // write records and wait generate snapshot.
+        TablePath leftTablePath = TablePath.of(DEFAULT_DB, leftTableName);
+        writeRows(conn, leftTablePath, rows1, false);
+
+        String rightTableName = "right_table";
+        tEnv.executeSql(
+                String.format(
+                        "create table %s ("
+                                + " a2 int, "
+                                + " b2 varchar, "
+                                + " c2 bigint, "
+                                + " d2 int, "
+                                + " e2 bigint, "
+                                + " primary key (c2, d2) NOT ENFORCED"
+                                + ") with ("
+                                + " 'connector' = 'fluss', "
+                                + " 'bucket.key' = 'c2', "
+                                // currently, delta join only support 
append-only source
+                                + " 'table.merge-engine' = 'first_row' "
+                                + ")",
+                        rightTableName));
+        List<InternalRow> rows2 =
+                Arrays.asList(
+                        row(1, "v1", 100L, 1, 10000L),
+                        row(2, "v3", 200L, 2, 20000L),
+                        row(3, "v4", 300L, 4, 30000L),
+                        // dropped because of first row

Review Comment:
   If `c1, d1` is the primary key, there should no records be dropped?



##########
fluss-flink/fluss-flink-2.1/src/test/java/org/apache/fluss/flink/source/Flink21TableSourceITCase.java:
##########
@@ -17,5 +17,127 @@
 
 package org.apache.fluss.flink.source;
 
+import org.apache.fluss.metadata.TablePath;
+import org.apache.fluss.row.InternalRow;
+
+import org.apache.flink.table.api.config.ExecutionConfigOptions;
+import org.apache.flink.table.api.config.OptimizerConfigOptions;
+import org.apache.flink.types.Row;
+import org.apache.flink.util.CloseableIterator;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.List;
+
+import static 
org.apache.fluss.flink.source.testutils.FlinkRowAssertionsUtils.assertResultsIgnoreOrder;
+import static org.apache.fluss.flink.utils.FlinkTestBase.writeRows;
+import static org.apache.fluss.testutils.DataTestUtils.row;
+import static org.assertj.core.api.Assertions.assertThat;
+
 /** IT case for {@link FlinkTableSource} in Flink 2.1. */
-public class Flink21TableSourceITCase extends FlinkTableSourceITCase {}
+public class Flink21TableSourceITCase extends FlinkTableSourceITCase {
+
+    @Test
+    void testDeltaJoin() throws Exception {
+        
tEnv.getConfig().set(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM,
 2);
+
+        String leftTableName = "left_table";
+        tEnv.executeSql(
+                String.format(
+                        "create table %s ( "
+                                + " a1 int, "
+                                + " b1 varchar, "
+                                + " c1 bigint, "
+                                + " d1 int, "
+                                + " e1 bigint, "
+                                + " primary key (c1, d1) NOT ENFORCED"
+                                + ") with ("
+                                + " 'connector' = 'fluss', "
+                                + " 'bucket.key' = 'c1', "
+                                // currently, delta join only support 
append-only source
+                                + " 'table.merge-engine' = 'first_row' "
+                                + ")",
+                        leftTableName));
+        List<InternalRow> rows1 =
+                Arrays.asList(
+                        row(1, "v1", 100L, 1, 10000L),
+                        row(2, "v2", 200L, 2, 20000L),
+                        // dropped because of first row

Review Comment:
   If `c1, d1` is the primary key, there should no records be dropped?



##########
fluss-flink/fluss-flink-2.1/src/test/java/org/apache/fluss/flink/source/Flink21TableSourceITCase.java:
##########
@@ -17,5 +17,127 @@
 
 package org.apache.fluss.flink.source;
 
+import org.apache.fluss.metadata.TablePath;
+import org.apache.fluss.row.InternalRow;
+
+import org.apache.flink.table.api.config.ExecutionConfigOptions;
+import org.apache.flink.table.api.config.OptimizerConfigOptions;
+import org.apache.flink.types.Row;
+import org.apache.flink.util.CloseableIterator;
+import org.junit.jupiter.api.Test;
+
+import java.util.Arrays;
+import java.util.List;
+
+import static 
org.apache.fluss.flink.source.testutils.FlinkRowAssertionsUtils.assertResultsIgnoreOrder;
+import static org.apache.fluss.flink.utils.FlinkTestBase.writeRows;
+import static org.apache.fluss.testutils.DataTestUtils.row;
+import static org.assertj.core.api.Assertions.assertThat;
+
 /** IT case for {@link FlinkTableSource} in Flink 2.1. */
-public class Flink21TableSourceITCase extends FlinkTableSourceITCase {}
+public class Flink21TableSourceITCase extends FlinkTableSourceITCase {
+
+    @Test
+    void testDeltaJoin() throws Exception {
+        
tEnv.getConfig().set(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM,
 2);
+
+        String leftTableName = "left_table";
+        tEnv.executeSql(
+                String.format(
+                        "create table %s ( "
+                                + " a1 int, "
+                                + " b1 varchar, "
+                                + " c1 bigint, "
+                                + " d1 int, "
+                                + " e1 bigint, "
+                                + " primary key (c1, d1) NOT ENFORCED"
+                                + ") with ("
+                                + " 'connector' = 'fluss', "
+                                + " 'bucket.key' = 'c1', "
+                                // currently, delta join only support 
append-only source
+                                + " 'table.merge-engine' = 'first_row' "
+                                + ")",
+                        leftTableName));
+        List<InternalRow> rows1 =
+                Arrays.asList(
+                        row(1, "v1", 100L, 1, 10000L),
+                        row(2, "v2", 200L, 2, 20000L),
+                        // dropped because of first row
+                        row(3, "v1", 300L, 3, 30000L),
+                        row(4, "v4", 400L, 4, 40000L));
+        // write records and wait generate snapshot.

Review Comment:
   why need to wait generate snapshot? 



##########
fluss-flink/fluss-flink-2.1/src/test/java/org/apache/fluss/flink/catalog/Flink21CatalogITCase.java:
##########
@@ -17,5 +17,267 @@
 
 package org.apache.fluss.flink.catalog;
 
+import org.apache.flink.table.api.DataTypes;
+import org.apache.flink.table.api.Schema;
+import org.apache.flink.table.catalog.CatalogTable;
+import org.apache.flink.table.catalog.ObjectPath;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.fluss.flink.FlinkConnectorOptions.BUCKET_KEY;
+import static org.apache.fluss.flink.FlinkConnectorOptions.BUCKET_NUMBER;
+import static org.assertj.core.api.Assertions.assertThat;
+
 /** IT case for catalog in Flink 2.1. */
-public class Flink21CatalogITCase extends FlinkCatalogITCase {}
+public class Flink21CatalogITCase extends FlinkCatalogITCase {
+
+    @BeforeAll
+    static void beforeAll() {
+        FlinkCatalogITCase.beforeAll();
+
+        // close the old one and open a new one later
+        catalog.close();
+
+        catalog =
+                new Flink21Catalog(
+                        catalog.catalogName,
+                        catalog.defaultDatabase,
+                        catalog.bootstrapServers,
+                        catalog.classLoader,
+                        catalog.securityConfigs);
+        catalog.open();
+    }
+
+    @Test
+    @Override
+    void testCreateTable() throws Exception {
+        // create a table will all supported data types
+        tEnv.executeSql(
+                "create table test_table "
+                        + "(a int not null primary key not enforced,"
+                        + " b CHAR(3),"
+                        + " c STRING not null COMMENT 'STRING COMMENT',"
+                        + " d STRING,"
+                        + " e BOOLEAN,"
+                        + " f BINARY(2),"
+                        + " g BYTES COMMENT 'BYTES',"
+                        + " h BYTES,"
+                        + " i DECIMAL(12, 2),"
+                        + " j TINYINT,"
+                        + " k SMALLINT,"
+                        + " l BIGINT,"
+                        + " m FLOAT,"
+                        + " n DOUBLE,"
+                        + " o DATE,"
+                        + " p TIME,"
+                        + " q TIMESTAMP,"
+                        + " r TIMESTAMP_LTZ,"
+                        + " s ROW<a INT>) COMMENT 'a test table'");
+        Schema.Builder schemaBuilder = Schema.newBuilder();
+        schemaBuilder
+                .column("a", DataTypes.INT().notNull())
+                .column("b", DataTypes.CHAR(3))
+                .column("c", DataTypes.STRING().notNull())
+                .withComment("STRING COMMENT")
+                .column("d", DataTypes.STRING())
+                .column("e", DataTypes.BOOLEAN())
+                .column("f", DataTypes.BINARY(2))
+                .column("g", DataTypes.BYTES())
+                .withComment("BYTES")
+                .column("h", DataTypes.BYTES())
+                .column("i", DataTypes.DECIMAL(12, 2))
+                .column("j", DataTypes.TINYINT())
+                .column("k", DataTypes.SMALLINT())
+                .column("l", DataTypes.BIGINT())
+                .column("m", DataTypes.FLOAT())
+                .column("n", DataTypes.DOUBLE())
+                .column("o", DataTypes.DATE())
+                .column("p", DataTypes.TIME())
+                .column("q", DataTypes.TIMESTAMP())
+                .column("r", DataTypes.TIMESTAMP_LTZ())
+                .column("s", DataTypes.ROW(DataTypes.FIELD("a", 
DataTypes.INT())))
+                .primaryKey("a")
+                .index("a");
+        Schema expectedSchema = schemaBuilder.build();
+        CatalogTable table =
+                (CatalogTable) catalog.getTable(new ObjectPath(DEFAULT_DB, 
"test_table"));
+        assertThat(table.getUnresolvedSchema()).isEqualTo(expectedSchema);
+    }
+
+    @Test
+    @Override
+    void testTableWithExpression() throws Exception {
+        // create a table with watermark and computed column
+        tEnv.executeSql(
+                "CREATE TABLE expression_test (\n"
+                        + "    `user` BIGINT not null primary key not 
enforced,\n"
+                        + "    product STRING COMMENT 'comment1',\n"
+                        + "    price DOUBLE,\n"
+                        + "    quantity DOUBLE,\n"
+                        + "    cost AS price * quantity,\n"
+                        + "    order_time TIMESTAMP(3),\n"
+                        + "    WATERMARK FOR order_time AS order_time - 
INTERVAL '5' SECOND\n"
+                        + ") with ('k1' = 'v1')");
+        CatalogTable table =
+                (CatalogTable) catalog.getTable(new ObjectPath(DEFAULT_DB, 
"expression_test"));
+        Schema.Builder schemaBuilder = Schema.newBuilder();
+        schemaBuilder
+                .column("user", DataTypes.BIGINT().notNull())
+                .column("product", DataTypes.STRING())
+                .withComment("comment1")
+                .column("price", DataTypes.DOUBLE())
+                .column("quantity", DataTypes.DOUBLE())
+                .columnByExpression("cost", "`price` * `quantity`")
+                .column("order_time", DataTypes.TIMESTAMP(3))
+                .watermark("order_time", "`order_time` - INTERVAL '5' SECOND")
+                .primaryKey("user")
+                .index("user");

Review Comment:
   ditto



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to