lirui-apache commented on a change in pull request #11935:
URL: https://github.com/apache/flink/pull/11935#discussion_r422616831
##########
File path:
flink-table/flink-sql-parser-hive/src/main/java/org/apache/flink/sql/parser/hive/ddl/HiveDDLUtils.java
##########
@@ -91,4 +154,149 @@ public static SqlTableOption toTableOption(String key,
SqlNode value, SqlParserP
public static SqlTableOption toTableOption(String key, String value,
SqlParserPos pos) {
return new SqlTableOption(SqlLiteral.createCharString(key,
pos), SqlLiteral.createCharString(value, pos), pos);
}
+
+ public static void convertDataTypes(SqlNodeList columns) {
+ if (columns != null) {
+ for (SqlNode node : columns) {
+ convertDataTypes((SqlTableColumn) node);
+ }
+ }
+ }
+
+ // data types may need to be converted to comply with HiveQL, e.g.
TIMESTAMP and BINARY
+ public static void convertDataTypes(SqlTableColumn column) {
+ column.setType(convertDataTypes(column.getType()));
+ }
+
+ private static SqlDataTypeSpec convertDataTypes(SqlDataTypeSpec
typeSpec) {
+ SqlTypeNameSpec nameSpec = typeSpec.getTypeNameSpec();
+ SqlTypeNameSpec convertedNameSpec = convertDataTypes(nameSpec);
+ if (nameSpec != convertedNameSpec) {
+ typeSpec = new SqlDataTypeSpec(convertedNameSpec,
typeSpec.getTimeZone(), typeSpec.getNullable(),
+ typeSpec.getParserPosition());
+ }
+ return typeSpec;
+ }
+
+ private static SqlTypeNameSpec convertDataTypes(SqlTypeNameSpec
nameSpec) {
+ if (nameSpec instanceof SqlBasicTypeNameSpec) {
+ SqlBasicTypeNameSpec basicNameSpec =
(SqlBasicTypeNameSpec) nameSpec;
+ if
(basicNameSpec.getTypeName().getSimple().equalsIgnoreCase(SqlTypeName.TIMESTAMP.name()))
{
+ if (basicNameSpec.getPrecision() < 0) {
+ nameSpec = new
SqlBasicTypeNameSpec(SqlTypeName.TIMESTAMP, 9, basicNameSpec.getScale(),
+
basicNameSpec.getCharSetName(), basicNameSpec.getParserPos());
+ }
+ } else if
(basicNameSpec.getTypeName().getSimple().equalsIgnoreCase(SqlTypeName.BINARY.name()))
{
+ if (basicNameSpec.getPrecision() < 0) {
+ nameSpec = new
SqlBasicTypeNameSpec(SqlTypeName.VARBINARY, Integer.MAX_VALUE,
basicNameSpec.getScale(),
+
basicNameSpec.getCharSetName(), basicNameSpec.getParserPos());
+ }
+ }
+ } else if (nameSpec instanceof
ExtendedSqlCollectionTypeNameSpec) {
+ ExtendedSqlCollectionTypeNameSpec collectionNameSpec =
(ExtendedSqlCollectionTypeNameSpec) nameSpec;
+ SqlTypeNameSpec elementNameSpec =
collectionNameSpec.getElementTypeName();
+ SqlTypeNameSpec convertedElementNameSpec =
convertDataTypes(elementNameSpec);
+ if (convertedElementNameSpec != elementNameSpec) {
+ nameSpec = new
ExtendedSqlCollectionTypeNameSpec(convertedElementNameSpec,
+
collectionNameSpec.elementNullable(),
collectionNameSpec.getCollectionTypeName(),
+
collectionNameSpec.unparseAsStandard(), collectionNameSpec.getParserPos());
+ }
+ } else if (nameSpec instanceof SqlMapTypeNameSpec) {
+ SqlMapTypeNameSpec mapNameSpec = (SqlMapTypeNameSpec)
nameSpec;
+ SqlDataTypeSpec keyTypeSpec = mapNameSpec.getKeyType();
+ SqlDataTypeSpec valTypeSpec = mapNameSpec.getValType();
+ SqlDataTypeSpec convertedKeyTypeSpec =
convertDataTypes(keyTypeSpec);
+ SqlDataTypeSpec convertedValTypeSpec =
convertDataTypes(valTypeSpec);
+ if (keyTypeSpec != convertedKeyTypeSpec || valTypeSpec
!= convertedValTypeSpec) {
+ nameSpec = new
SqlMapTypeNameSpec(convertedKeyTypeSpec, convertedValTypeSpec,
nameSpec.getParserPos());
+ }
+ } else if (nameSpec instanceof ExtendedSqlRowTypeNameSpec) {
+ ExtendedSqlRowTypeNameSpec rowNameSpec =
(ExtendedSqlRowTypeNameSpec) nameSpec;
+ List<SqlDataTypeSpec> fieldTypeSpecs =
rowNameSpec.getFieldTypes();
+ List<SqlDataTypeSpec> convertedFieldTypeSpecs = new
ArrayList<>(fieldTypeSpecs.size());
+ boolean updated = false;
+ for (SqlDataTypeSpec fieldTypeSpec : fieldTypeSpecs) {
+ SqlDataTypeSpec convertedFieldTypeSpec =
convertDataTypes(fieldTypeSpec);
+ if (fieldTypeSpec != convertedFieldTypeSpec) {
+ updated = true;
+ }
+
convertedFieldTypeSpecs.add(convertedFieldTypeSpec);
+ }
+ if (updated) {
+ nameSpec = new
ExtendedSqlRowTypeNameSpec(nameSpec.getParserPos(), rowNameSpec.getFieldNames(),
+ convertedFieldTypeSpecs,
rowNameSpec.getComments(), rowNameSpec.unparseAsStandard());
+ }
+ }
+ return nameSpec;
+ }
+
+ // a constraint is by default ENABLE NOVALIDATE RELY
+ public static byte defaultTrait() {
+ byte res = enableConstraint((byte) 0);
+ res = relyConstraint(res);
+ return res;
+ }
+
+ // returns a constraint trait that requires ENABLE
+ public static byte enableConstraint(byte trait) {
+ return (byte) (trait | HIVE_CONSTRAINT_ENABLE);
+ }
+
+ // returns a constraint trait that doesn't require ENABLE
+ public static byte disableConstraint(byte trait) {
+ return (byte) (trait & (~HIVE_CONSTRAINT_ENABLE));
+ }
+
+ // returns a constraint trait that requires VALIDATE
+ public static byte validateConstraint(byte trait) {
+ return (byte) (trait | HIVE_CONSTRAINT_VALIDATE);
Review comment:
I added enums for these keywords and use them during unparse. I still
encode them as bytes in the table properties to be consistent with how we deal
with constraints in hive catalog currently.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]