This is an automated email from the ASF dual-hosted git repository.
lzljs3620320 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/paimon.git
The following commit(s) were added to refs/heads/master by this push:
new 3297f0eb0 [doc] Ajdust the doc order in java-api
3297f0eb0 is described below
commit 3297f0eb0a6f286690e49b2876089ceb4381206a
Author: Jingsong <[email protected]>
AuthorDate: Fri Aug 23 13:27:08 2024 +0800
[doc] Ajdust the doc order in java-api
---
docs/content/program-api/java-api.md | 808 +++++++++++++++++------------------
1 file changed, 404 insertions(+), 404 deletions(-)
diff --git a/docs/content/program-api/java-api.md
b/docs/content/program-api/java-api.md
index 171ace1d6..021a7418c 100644
--- a/docs/content/program-api/java-api.md
+++ b/docs/content/program-api/java-api.md
@@ -85,82 +85,6 @@ public class CreateCatalog {
}
```
-## Create Database
-
-You can use the catalog to create databases. The created databases are
persistence in the file system.
-
-```java
-import org.apache.paimon.catalog.Catalog;
-
-public class CreateDatabase {
-
- public static void main(String[] args) {
- try {
- Catalog catalog = CreateCatalog.createFilesystemCatalog();
- catalog.createDatabase("my_db", false);
- } catch (Catalog.DatabaseAlreadyExistException e) {
- // do something
- }
- }
-}
-```
-
-## Determine Whether Database Exists
-
-You can use the catalog to determine whether the database exists
-
-```java
-import org.apache.paimon.catalog.Catalog;
-
-public class DatabaseExists {
-
- public static void main(String[] args) {
- Catalog catalog = CreateCatalog.createFilesystemCatalog();
- boolean exists = catalog.databaseExists("my_db");
- }
-}
-```
-
-## List Databases
-
-You can use the catalog to list databases.
-
-```java
-import org.apache.paimon.catalog.Catalog;
-
-import java.util.List;
-
-public class ListDatabases {
-
- public static void main(String[] args) {
- Catalog catalog = CreateCatalog.createFilesystemCatalog();
- List<String> databases = catalog.listDatabases();
- }
-}
-```
-
-## Drop Database
-
-You can use the catalog to drop databases.
-
-```java
-import org.apache.paimon.catalog.Catalog;
-
-public class DropDatabase {
-
- public static void main(String[] args) {
- try {
- Catalog catalog = CreateCatalog.createFilesystemCatalog();
- catalog.dropDatabase("my_db", false, true);
- } catch (Catalog.DatabaseNotEmptyException e) {
- // do something
- } catch (Catalog.DatabaseNotExistException e) {
- // do something
- }
- }
-}
-```
-
## Create Table
You can use the catalog to create tables. The created tables are persistence
in the file system.
@@ -219,373 +143,148 @@ public class GetTable {
}
```
-## Determine Whether Table Exists
+## Batch Read
-You can use the catalog to determine whether the table exists
+For relatively small amounts of data, or for data that has undergone
projection and filtering,
+you can directly use a standalone program to read the table data.
+
+But if the data volume of the table is relatively large, you can distribute
splits to different tasks for reading.
+
+The reading is divided into two stages:
+
+1. Scan Plan: Generate plan splits in a global node ('Coordinator', or named
'Driver').
+2. Read Split: Read split in distributed tasks.
```java
-import org.apache.paimon.catalog.Catalog;
-import org.apache.paimon.catalog.Identifier;
+import org.apache.paimon.data.InternalRow;
+import org.apache.paimon.predicate.Predicate;
+import org.apache.paimon.predicate.PredicateBuilder;
+import org.apache.paimon.reader.RecordReader;
+import org.apache.paimon.table.Table;
+import org.apache.paimon.table.source.ReadBuilder;
+import org.apache.paimon.table.source.Split;
+import org.apache.paimon.table.source.TableRead;
+import org.apache.paimon.types.DataTypes;
+import org.apache.paimon.types.RowType;
-public class TableExists {
+import com.google.common.collect.Lists;
- public static void main(String[] args) {
- Identifier identifier = Identifier.create("my_db", "my_table");
- Catalog catalog = CreateCatalog.createFilesystemCatalog();
- boolean exists = catalog.tableExists(identifier);
- }
-}
-```
+import java.util.List;
-## List Tables
+public class ReadTable {
-You can use the catalog to list tables.
+ public static void main(String[] args) throws Exception {
+ // 1. Create a ReadBuilder and push filter (`withFilter`)
+ // and projection (`withProjection`) if necessary
+ Table table = GetTable.getTable();
-```java
-import org.apache.paimon.catalog.Catalog;
+ PredicateBuilder builder =
+ new PredicateBuilder(RowType.of(DataTypes.STRING(),
DataTypes.INT()));
+ Predicate notNull = builder.isNotNull(0);
+ Predicate greaterOrEqual = builder.greaterOrEqual(1, 12);
-import java.util.List;
+ int[] projection = new int[] {0, 1};
-public class ListTables {
+ ReadBuilder readBuilder =
+ table.newReadBuilder()
+ .withProjection(projection)
+ .withFilter(Lists.newArrayList(notNull, greaterOrEqual));
- public static void main(String[] args) {
- try {
- Catalog catalog = CreateCatalog.createFilesystemCatalog();
- List<String> tables = catalog.listTables("my_db");
- } catch (Catalog.DatabaseNotExistException e) {
- // do something
- }
+ // 2. Plan splits in 'Coordinator' (or named 'Driver')
+ List<Split> splits = readBuilder.newScan().plan().splits();
+
+ // 3. Distribute these splits to different tasks
+
+ // 4. Read a split in task
+ TableRead read = readBuilder.newRead();
+ RecordReader<InternalRow> reader = read.createReader(splits);
+ reader.forEachRemaining(System.out::println);
}
}
```
-## Drop Table
+## Batch Write
-You can use the catalog to drop table.
+The writing is divided into two stages:
+
+1. Write records: Write records in distributed tasks, generate commit messages.
+2. Commit/Abort: Collect all CommitMessages, commit them in a global node
('Coordinator', or named 'Driver', or named 'Committer').
+ When the commit fails for certain reason, abort unsuccessful commit via
CommitMessages.
```java
-import org.apache.paimon.catalog.Catalog;
-import org.apache.paimon.catalog.Identifier;
+import org.apache.paimon.data.BinaryString;
+import org.apache.paimon.data.GenericRow;
+import org.apache.paimon.table.Table;
+import org.apache.paimon.table.sink.BatchTableCommit;
+import org.apache.paimon.table.sink.BatchTableWrite;
+import org.apache.paimon.table.sink.BatchWriteBuilder;
+import org.apache.paimon.table.sink.CommitMessage;
-public class DropTable {
+import java.util.List;
- public static void main(String[] args) {
- Identifier identifier = Identifier.create("my_db", "my_table");
- try {
- Catalog catalog = CreateCatalog.createFilesystemCatalog();
- catalog.dropTable(identifier, false);
- } catch (Catalog.TableNotExistException e) {
- // do something
- }
- }
-}
-```
+public class BatchWrite {
+ public static void main(String[] args) throws Exception {
+ // 1. Create a WriteBuilder (Serializable)
+ Table table = GetTable.getTable();
+ BatchWriteBuilder writeBuilder =
table.newBatchWriteBuilder().withOverwrite();
-## Rename Table
+ // 2. Write records in distributed tasks
+ BatchTableWrite write = writeBuilder.newWrite();
-You can use the catalog to rename a table.
+ GenericRow record1 = GenericRow.of(BinaryString.fromString("Alice"),
12);
+ GenericRow record2 = GenericRow.of(BinaryString.fromString("Bob"), 5);
+ GenericRow record3 = GenericRow.of(BinaryString.fromString("Emily"),
18);
-```java
-import org.apache.paimon.catalog.Catalog;
-import org.apache.paimon.catalog.Identifier;
+ // If this is a distributed write, you can use
writeBuilder.newWriteSelector.
+ // WriteSelector determines to which logical downstream writers a
record should be written to.
+ // If it returns empty, no data distribution is required.
-public class RenameTable {
+ write.write(record1);
+ write.write(record2);
+ write.write(record3);
- public static void main(String[] args) {
- Identifier fromTableIdentifier = Identifier.create("my_db",
"my_table");
- Identifier toTableIdentifier = Identifier.create("my_db",
"test_table");
- try {
- Catalog catalog = CreateCatalog.createFilesystemCatalog();
- catalog.renameTable(fromTableIdentifier, toTableIdentifier, false);
- } catch (Catalog.TableAlreadyExistException e) {
- // do something
- } catch (Catalog.TableNotExistException e) {
- // do something
- }
+ List<CommitMessage> messages = write.prepareCommit();
+
+ // 3. Collect all CommitMessages to a global node and commit
+ BatchTableCommit commit = writeBuilder.newCommit();
+ commit.commit(messages);
+
+ // Abort unsuccessful commit to delete data files
+ // commit.abort(messages);
}
}
```
-## Alter Table
+## Stream Read
-You can use the catalog to alter a table, but you need to pay attention to the
following points.
+The difference of Stream Read is that StreamTableScan can continuously scan
and generate splits.
-- Column %s cannot specify NOT NULL in the %s table.
-- Cannot update partition column type in the table.
-- Cannot change nullability of primary key.
-- If the type of the column is nested row type, update the column type is not
supported.
-- Update column to nested row type is not supported.
+StreamTableScan provides the ability to checkpoint and restore, which can let
you save the correct state
+during stream reading.
```java
-import org.apache.paimon.catalog.Catalog;
-import org.apache.paimon.catalog.Identifier;
-import org.apache.paimon.schema.Schema;
-import org.apache.paimon.schema.SchemaChange;
-import org.apache.paimon.types.DataField;
+import org.apache.paimon.data.InternalRow;
+import org.apache.paimon.predicate.Predicate;
+import org.apache.paimon.predicate.PredicateBuilder;
+import org.apache.paimon.reader.RecordReader;
+import org.apache.paimon.table.Table;
+import org.apache.paimon.table.source.ReadBuilder;
+import org.apache.paimon.table.source.Split;
+import org.apache.paimon.table.source.StreamTableScan;
+import org.apache.paimon.table.source.TableRead;
import org.apache.paimon.types.DataTypes;
+import org.apache.paimon.types.RowType;
import com.google.common.collect.Lists;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
+import java.util.List;
-public class AlterTable {
+public class StreamReadTable {
- public static void main(String[] args) {
- Identifier identifier = Identifier.create("my_db", "my_table");
-
- Map<String, String> options = new HashMap<>();
- options.put("bucket", "4");
- options.put("compaction.max.file-num", "40");
-
- Catalog catalog = CreateCatalog.createFilesystemCatalog();
- catalog.createDatabase("my_db", false);
-
- try {
- catalog.createTable(
- identifier,
- new Schema(
- Lists.newArrayList(
- new DataField(0, "col1", DataTypes.STRING(), "field1"),
- new DataField(1, "col2", DataTypes.STRING(), "field2"),
- new DataField(2, "col3", DataTypes.STRING(), "field3"),
- new DataField(3, "col4", DataTypes.BIGINT(), "field4"),
- new DataField(
- 4,
- "col5",
- DataTypes.ROW(
- new DataField(
- 5, "f1", DataTypes.STRING(), "f1"),
- new DataField(
- 6, "f2", DataTypes.STRING(), "f2"),
- new DataField(
- 7, "f3", DataTypes.STRING(), "f3")),
- "field5"),
- new DataField(8, "col6", DataTypes.STRING(),
"field6")),
- Lists.newArrayList("col1"), // partition keys
- Lists.newArrayList("col1", "col2"), // primary key
- options,
- "table comment"),
- false);
- } catch (Catalog.TableAlreadyExistException e) {
- // do something
- } catch (Catalog.DatabaseNotExistException e) {
- // do something
- }
-
- // add option
- SchemaChange addOption =
SchemaChange.setOption("snapshot.time-retained", "2h");
- // remove option
- SchemaChange removeOption =
SchemaChange.removeOption("compaction.max.file-num");
- // add column
- SchemaChange addColumn = SchemaChange.addColumn("col1_after",
DataTypes.STRING());
- // add a column after col1
- SchemaChange.Move after = SchemaChange.Move.after("col1_after",
"col1");
- SchemaChange addColumnAfterField =
- SchemaChange.addColumn("col7", DataTypes.STRING(), "", after);
- // rename column
- SchemaChange renameColumn = SchemaChange.renameColumn("col3",
"col3_new_name");
- // drop column
- SchemaChange dropColumn = SchemaChange.dropColumn("col6");
- // update column comment
- SchemaChange updateColumnComment =
- SchemaChange.updateColumnComment(new String[] {"col4"}, "col4
field");
- // update nested column comment
- SchemaChange updateNestedColumnComment =
- SchemaChange.updateColumnComment(new String[] {"col5", "f1"},
"col5 f1 field");
- // update column type
- SchemaChange updateColumnType = SchemaChange.updateColumnType("col4",
DataTypes.DOUBLE());
- // update column position, you need to pass in a parameter of type Move
- SchemaChange updateColumnPosition =
- SchemaChange.updateColumnPosition(SchemaChange.Move.first("col4"));
- // update column nullability
- SchemaChange updateColumnNullability =
- SchemaChange.updateColumnNullability(new String[] {"col4"}, false);
- // update nested column nullability
- SchemaChange updateNestedColumnNullability =
- SchemaChange.updateColumnNullability(new String[] {"col5", "f2"},
false);
-
- SchemaChange[] schemaChanges =
- new SchemaChange[] {
- addOption,
- removeOption,
- addColumn,
- addColumnAfterField,
- renameColumn,
- dropColumn,
- updateColumnComment,
- updateNestedColumnComment,
- updateColumnType,
- updateColumnPosition,
- updateColumnNullability,
- updateNestedColumnNullability
- };
- try {
- catalog.alterTable(identifier, Arrays.asList(schemaChanges),
false);
- } catch (Catalog.TableNotExistException e) {
- // do something
- } catch (Catalog.ColumnAlreadyExistException e) {
- // do something
- } catch (Catalog.ColumnNotExistException e) {
- // do something
- }
- }
-}
-```
-
-Table metadata:
-
-- `name` return a name string to identify this table.
-- `rowType` return the current row type of this table containing a sequence of
table's fields.
-- `partitionKeys` returns the partition keys of this table.
-- `parimaryKeys` returns the primary keys of this table.
-- `options` returns the configuration of this table in a map of key-value.
-- `comment` returns the optional comment of this table.
-- `copy` return a new table by applying dynamic options to this table.
-
-## Batch Read
-
-For relatively small amounts of data, or for data that has undergone
projection and filtering,
-you can directly use a standalone program to read the table data.
-
-But if the data volume of the table is relatively large, you can distribute
splits to different tasks for reading.
-
-The reading is divided into two stages:
-
-1. Scan Plan: Generate plan splits in a global node ('Coordinator', or named
'Driver').
-2. Read Split: Read split in distributed tasks.
-
-```java
-import org.apache.paimon.data.InternalRow;
-import org.apache.paimon.predicate.Predicate;
-import org.apache.paimon.predicate.PredicateBuilder;
-import org.apache.paimon.reader.RecordReader;
-import org.apache.paimon.table.Table;
-import org.apache.paimon.table.source.ReadBuilder;
-import org.apache.paimon.table.source.Split;
-import org.apache.paimon.table.source.TableRead;
-import org.apache.paimon.types.DataTypes;
-import org.apache.paimon.types.RowType;
-
-import com.google.common.collect.Lists;
-
-import java.util.List;
-
-public class ReadTable {
-
- public static void main(String[] args) throws Exception {
- // 1. Create a ReadBuilder and push filter (`withFilter`)
- // and projection (`withProjection`) if necessary
- Table table = GetTable.getTable();
-
- PredicateBuilder builder =
- new PredicateBuilder(RowType.of(DataTypes.STRING(),
DataTypes.INT()));
- Predicate notNull = builder.isNotNull(0);
- Predicate greaterOrEqual = builder.greaterOrEqual(1, 12);
-
- int[] projection = new int[] {0, 1};
-
- ReadBuilder readBuilder =
- table.newReadBuilder()
- .withProjection(projection)
- .withFilter(Lists.newArrayList(notNull, greaterOrEqual));
-
- // 2. Plan splits in 'Coordinator' (or named 'Driver')
- List<Split> splits = readBuilder.newScan().plan().splits();
-
- // 3. Distribute these splits to different tasks
-
- // 4. Read a split in task
- TableRead read = readBuilder.newRead();
- RecordReader<InternalRow> reader = read.createReader(splits);
- reader.forEachRemaining(System.out::println);
- }
-}
-```
-
-## Batch Write
-
-The writing is divided into two stages:
-
-1. Write records: Write records in distributed tasks, generate commit messages.
-2. Commit/Abort: Collect all CommitMessages, commit them in a global node
('Coordinator', or named 'Driver', or named 'Committer').
- When the commit fails for certain reason, abort unsuccessful commit via
CommitMessages.
-
-```java
-import org.apache.paimon.data.BinaryString;
-import org.apache.paimon.data.GenericRow;
-import org.apache.paimon.table.Table;
-import org.apache.paimon.table.sink.BatchTableCommit;
-import org.apache.paimon.table.sink.BatchTableWrite;
-import org.apache.paimon.table.sink.BatchWriteBuilder;
-import org.apache.paimon.table.sink.CommitMessage;
-
-import java.util.List;
-
-public class BatchWrite {
- public static void main(String[] args) throws Exception {
- // 1. Create a WriteBuilder (Serializable)
- Table table = GetTable.getTable();
- BatchWriteBuilder writeBuilder =
table.newBatchWriteBuilder().withOverwrite();
-
- // 2. Write records in distributed tasks
- BatchTableWrite write = writeBuilder.newWrite();
-
- GenericRow record1 = GenericRow.of(BinaryString.fromString("Alice"),
12);
- GenericRow record2 = GenericRow.of(BinaryString.fromString("Bob"), 5);
- GenericRow record3 = GenericRow.of(BinaryString.fromString("Emily"),
18);
-
- // If this is a distributed write, you can use
writeBuilder.newWriteSelector.
- // WriteSelector determines to which logical downstream writers a
record should be written to.
- // If it returns empty, no data distribution is required.
-
- write.write(record1);
- write.write(record2);
- write.write(record3);
-
- List<CommitMessage> messages = write.prepareCommit();
-
- // 3. Collect all CommitMessages to a global node and commit
- BatchTableCommit commit = writeBuilder.newCommit();
- commit.commit(messages);
-
- // Abort unsuccessful commit to delete data files
- // commit.abort(messages);
- }
-}
-```
-
-## Stream Read
-
-The difference of Stream Read is that StreamTableScan can continuously scan
and generate splits.
-
-StreamTableScan provides the ability to checkpoint and restore, which can let
you save the correct state
-during stream reading.
-
-```java
-import org.apache.paimon.data.InternalRow;
-import org.apache.paimon.predicate.Predicate;
-import org.apache.paimon.predicate.PredicateBuilder;
-import org.apache.paimon.reader.RecordReader;
-import org.apache.paimon.table.Table;
-import org.apache.paimon.table.source.ReadBuilder;
-import org.apache.paimon.table.source.Split;
-import org.apache.paimon.table.source.StreamTableScan;
-import org.apache.paimon.table.source.TableRead;
-import org.apache.paimon.types.DataTypes;
-import org.apache.paimon.types.RowType;
-
-import com.google.common.collect.Lists;
-
-import java.util.List;
-
-public class StreamReadTable {
-
- public static void main(String[] args) throws Exception {
- // 1. Create a ReadBuilder and push filter (`withFilter`)
- // and projection (`withProjection`) if necessary
- Table table = GetTable.getTable();
+ public static void main(String[] args) throws Exception {
+ // 1. Create a ReadBuilder and push filter (`withFilter`)
+ // and projection (`withProjection`) if necessary
+ Table table = GetTable.getTable();
PredicateBuilder builder =
new PredicateBuilder(RowType.of(DataTypes.STRING(),
DataTypes.INT()));
@@ -725,3 +424,304 @@ public class StreamWriteTable {
| <= | org.apache.paimon.predicate.PredicateBuilder.LessOrEqual |
| > | org.apache.paimon.predicate.PredicateBuilder.GreaterThan |
| >= | org.apache.paimon.predicate.PredicateBuilder.GreaterOrEqual |
+
+## Create Database
+
+You can use the catalog to create databases. The created databases are
persistence in the file system.
+
+```java
+import org.apache.paimon.catalog.Catalog;
+
+public class CreateDatabase {
+
+ public static void main(String[] args) {
+ try {
+ Catalog catalog = CreateCatalog.createFilesystemCatalog();
+ catalog.createDatabase("my_db", false);
+ } catch (Catalog.DatabaseAlreadyExistException e) {
+ // do something
+ }
+ }
+}
+```
+
+## Determine Whether Database Exists
+
+You can use the catalog to determine whether the database exists
+
+```java
+import org.apache.paimon.catalog.Catalog;
+
+public class DatabaseExists {
+
+ public static void main(String[] args) {
+ Catalog catalog = CreateCatalog.createFilesystemCatalog();
+ boolean exists = catalog.databaseExists("my_db");
+ }
+}
+```
+
+## List Databases
+
+You can use the catalog to list databases.
+
+```java
+import org.apache.paimon.catalog.Catalog;
+
+import java.util.List;
+
+public class ListDatabases {
+
+ public static void main(String[] args) {
+ Catalog catalog = CreateCatalog.createFilesystemCatalog();
+ List<String> databases = catalog.listDatabases();
+ }
+}
+```
+
+## Drop Database
+
+You can use the catalog to drop databases.
+
+```java
+import org.apache.paimon.catalog.Catalog;
+
+public class DropDatabase {
+
+ public static void main(String[] args) {
+ try {
+ Catalog catalog = CreateCatalog.createFilesystemCatalog();
+ catalog.dropDatabase("my_db", false, true);
+ } catch (Catalog.DatabaseNotEmptyException e) {
+ // do something
+ } catch (Catalog.DatabaseNotExistException e) {
+ // do something
+ }
+ }
+}
+```
+
+## Determine Whether Table Exists
+
+You can use the catalog to determine whether the table exists
+
+```java
+import org.apache.paimon.catalog.Catalog;
+import org.apache.paimon.catalog.Identifier;
+
+public class TableExists {
+
+ public static void main(String[] args) {
+ Identifier identifier = Identifier.create("my_db", "my_table");
+ Catalog catalog = CreateCatalog.createFilesystemCatalog();
+ boolean exists = catalog.tableExists(identifier);
+ }
+}
+```
+
+## List Tables
+
+You can use the catalog to list tables.
+
+```java
+import org.apache.paimon.catalog.Catalog;
+
+import java.util.List;
+
+public class ListTables {
+
+ public static void main(String[] args) {
+ try {
+ Catalog catalog = CreateCatalog.createFilesystemCatalog();
+ List<String> tables = catalog.listTables("my_db");
+ } catch (Catalog.DatabaseNotExistException e) {
+ // do something
+ }
+ }
+}
+```
+
+## Drop Table
+
+You can use the catalog to drop table.
+
+```java
+import org.apache.paimon.catalog.Catalog;
+import org.apache.paimon.catalog.Identifier;
+
+public class DropTable {
+
+ public static void main(String[] args) {
+ Identifier identifier = Identifier.create("my_db", "my_table");
+ try {
+ Catalog catalog = CreateCatalog.createFilesystemCatalog();
+ catalog.dropTable(identifier, false);
+ } catch (Catalog.TableNotExistException e) {
+ // do something
+ }
+ }
+}
+```
+
+## Rename Table
+
+You can use the catalog to rename a table.
+
+```java
+import org.apache.paimon.catalog.Catalog;
+import org.apache.paimon.catalog.Identifier;
+
+public class RenameTable {
+
+ public static void main(String[] args) {
+ Identifier fromTableIdentifier = Identifier.create("my_db",
"my_table");
+ Identifier toTableIdentifier = Identifier.create("my_db",
"test_table");
+ try {
+ Catalog catalog = CreateCatalog.createFilesystemCatalog();
+ catalog.renameTable(fromTableIdentifier, toTableIdentifier, false);
+ } catch (Catalog.TableAlreadyExistException e) {
+ // do something
+ } catch (Catalog.TableNotExistException e) {
+ // do something
+ }
+ }
+}
+```
+
+## Alter Table
+
+You can use the catalog to alter a table, but you need to pay attention to the
following points.
+
+- Column %s cannot specify NOT NULL in the %s table.
+- Cannot update partition column type in the table.
+- Cannot change nullability of primary key.
+- If the type of the column is nested row type, update the column type is not
supported.
+- Update column to nested row type is not supported.
+
+```java
+import org.apache.paimon.catalog.Catalog;
+import org.apache.paimon.catalog.Identifier;
+import org.apache.paimon.schema.Schema;
+import org.apache.paimon.schema.SchemaChange;
+import org.apache.paimon.types.DataField;
+import org.apache.paimon.types.DataTypes;
+
+import com.google.common.collect.Lists;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+public class AlterTable {
+
+ public static void main(String[] args) {
+ Identifier identifier = Identifier.create("my_db", "my_table");
+
+ Map<String, String> options = new HashMap<>();
+ options.put("bucket", "4");
+ options.put("compaction.max.file-num", "40");
+
+ Catalog catalog = CreateCatalog.createFilesystemCatalog();
+ catalog.createDatabase("my_db", false);
+
+ try {
+ catalog.createTable(
+ identifier,
+ new Schema(
+ Lists.newArrayList(
+ new DataField(0, "col1", DataTypes.STRING(), "field1"),
+ new DataField(1, "col2", DataTypes.STRING(), "field2"),
+ new DataField(2, "col3", DataTypes.STRING(), "field3"),
+ new DataField(3, "col4", DataTypes.BIGINT(), "field4"),
+ new DataField(
+ 4,
+ "col5",
+ DataTypes.ROW(
+ new DataField(
+ 5, "f1", DataTypes.STRING(), "f1"),
+ new DataField(
+ 6, "f2", DataTypes.STRING(), "f2"),
+ new DataField(
+ 7, "f3", DataTypes.STRING(), "f3")),
+ "field5"),
+ new DataField(8, "col6", DataTypes.STRING(),
"field6")),
+ Lists.newArrayList("col1"), // partition keys
+ Lists.newArrayList("col1", "col2"), // primary key
+ options,
+ "table comment"),
+ false);
+ } catch (Catalog.TableAlreadyExistException e) {
+ // do something
+ } catch (Catalog.DatabaseNotExistException e) {
+ // do something
+ }
+
+ // add option
+ SchemaChange addOption =
SchemaChange.setOption("snapshot.time-retained", "2h");
+ // remove option
+ SchemaChange removeOption =
SchemaChange.removeOption("compaction.max.file-num");
+ // add column
+ SchemaChange addColumn = SchemaChange.addColumn("col1_after",
DataTypes.STRING());
+ // add a column after col1
+ SchemaChange.Move after = SchemaChange.Move.after("col1_after",
"col1");
+ SchemaChange addColumnAfterField =
+ SchemaChange.addColumn("col7", DataTypes.STRING(), "", after);
+ // rename column
+ SchemaChange renameColumn = SchemaChange.renameColumn("col3",
"col3_new_name");
+ // drop column
+ SchemaChange dropColumn = SchemaChange.dropColumn("col6");
+ // update column comment
+ SchemaChange updateColumnComment =
+ SchemaChange.updateColumnComment(new String[] {"col4"}, "col4
field");
+ // update nested column comment
+ SchemaChange updateNestedColumnComment =
+ SchemaChange.updateColumnComment(new String[] {"col5", "f1"},
"col5 f1 field");
+ // update column type
+ SchemaChange updateColumnType = SchemaChange.updateColumnType("col4",
DataTypes.DOUBLE());
+ // update column position, you need to pass in a parameter of type Move
+ SchemaChange updateColumnPosition =
+ SchemaChange.updateColumnPosition(SchemaChange.Move.first("col4"));
+ // update column nullability
+ SchemaChange updateColumnNullability =
+ SchemaChange.updateColumnNullability(new String[] {"col4"}, false);
+ // update nested column nullability
+ SchemaChange updateNestedColumnNullability =
+ SchemaChange.updateColumnNullability(new String[] {"col5", "f2"},
false);
+
+ SchemaChange[] schemaChanges =
+ new SchemaChange[] {
+ addOption,
+ removeOption,
+ addColumn,
+ addColumnAfterField,
+ renameColumn,
+ dropColumn,
+ updateColumnComment,
+ updateNestedColumnComment,
+ updateColumnType,
+ updateColumnPosition,
+ updateColumnNullability,
+ updateNestedColumnNullability
+ };
+ try {
+ catalog.alterTable(identifier, Arrays.asList(schemaChanges),
false);
+ } catch (Catalog.TableNotExistException e) {
+ // do something
+ } catch (Catalog.ColumnAlreadyExistException e) {
+ // do something
+ } catch (Catalog.ColumnNotExistException e) {
+ // do something
+ }
+ }
+}
+```
+
+Table metadata:
+
+- `name` return a name string to identify this table.
+- `rowType` return the current row type of this table containing a sequence of
table's fields.
+- `partitionKeys` returns the partition keys of this table.
+- `parimaryKeys` returns the primary keys of this table.
+- `options` returns the configuration of this table in a map of key-value.
+- `comment` returns the optional comment of this table.
+- `copy` return a new table by applying dynamic options to this table.