danny0405 commented on a change in pull request #11935:
URL: https://github.com/apache/flink/pull/11935#discussion_r422574699
##########
File path:
flink-table/flink-sql-parser-hive/src/main/java/org/apache/flink/sql/parser/hive/ddl/HiveDDLUtils.java
##########
@@ -91,4 +154,149 @@ public static SqlTableOption toTableOption(String key,
SqlNode value, SqlParserP
public static SqlTableOption toTableOption(String key, String value,
SqlParserPos pos) {
return new SqlTableOption(SqlLiteral.createCharString(key,
pos), SqlLiteral.createCharString(value, pos), pos);
}
+
+ public static void convertDataTypes(SqlNodeList columns) {
+ if (columns != null) {
+ for (SqlNode node : columns) {
+ convertDataTypes((SqlTableColumn) node);
+ }
+ }
+ }
+
+ // data types may need to be converted to comply with HiveQL, e.g.
TIMESTAMP and BINARY
+ public static void convertDataTypes(SqlTableColumn column) {
+ column.setType(convertDataTypes(column.getType()));
+ }
+
+ private static SqlDataTypeSpec convertDataTypes(SqlDataTypeSpec
typeSpec) {
+ SqlTypeNameSpec nameSpec = typeSpec.getTypeNameSpec();
+ SqlTypeNameSpec convertedNameSpec = convertDataTypes(nameSpec);
+ if (nameSpec != convertedNameSpec) {
+ typeSpec = new SqlDataTypeSpec(convertedNameSpec,
typeSpec.getTimeZone(), typeSpec.getNullable(),
+ typeSpec.getParserPosition());
+ }
+ return typeSpec;
+ }
+
+ private static SqlTypeNameSpec convertDataTypes(SqlTypeNameSpec
nameSpec) {
+ if (nameSpec instanceof SqlBasicTypeNameSpec) {
+ SqlBasicTypeNameSpec basicNameSpec =
(SqlBasicTypeNameSpec) nameSpec;
+ if
(basicNameSpec.getTypeName().getSimple().equalsIgnoreCase(SqlTypeName.TIMESTAMP.name()))
{
+ if (basicNameSpec.getPrecision() < 0) {
Review comment:
Compare directly with `SqlTypeName` is enough, it is a `Enum`.
##########
File path:
flink-table/flink-sql-parser-hive/src/main/java/org/apache/flink/sql/parser/hive/ddl/HiveDDLUtils.java
##########
@@ -91,4 +154,149 @@ public static SqlTableOption toTableOption(String key,
SqlNode value, SqlParserP
public static SqlTableOption toTableOption(String key, String value,
SqlParserPos pos) {
return new SqlTableOption(SqlLiteral.createCharString(key,
pos), SqlLiteral.createCharString(value, pos), pos);
}
+
+ public static void convertDataTypes(SqlNodeList columns) {
+ if (columns != null) {
+ for (SqlNode node : columns) {
+ convertDataTypes((SqlTableColumn) node);
+ }
+ }
+ }
+
+ // data types may need to be converted to comply with HiveQL, e.g.
TIMESTAMP and BINARY
+ public static void convertDataTypes(SqlTableColumn column) {
+ column.setType(convertDataTypes(column.getType()));
+ }
+
+ private static SqlDataTypeSpec convertDataTypes(SqlDataTypeSpec
typeSpec) {
+ SqlTypeNameSpec nameSpec = typeSpec.getTypeNameSpec();
+ SqlTypeNameSpec convertedNameSpec = convertDataTypes(nameSpec);
+ if (nameSpec != convertedNameSpec) {
+ typeSpec = new SqlDataTypeSpec(convertedNameSpec,
typeSpec.getTimeZone(), typeSpec.getNullable(),
+ typeSpec.getParserPosition());
+ }
+ return typeSpec;
+ }
+
+ private static SqlTypeNameSpec convertDataTypes(SqlTypeNameSpec
nameSpec) {
+ if (nameSpec instanceof SqlBasicTypeNameSpec) {
+ SqlBasicTypeNameSpec basicNameSpec =
(SqlBasicTypeNameSpec) nameSpec;
+ if
(basicNameSpec.getTypeName().getSimple().equalsIgnoreCase(SqlTypeName.TIMESTAMP.name()))
{
+ if (basicNameSpec.getPrecision() < 0) {
+ nameSpec = new
SqlBasicTypeNameSpec(SqlTypeName.TIMESTAMP, 9, basicNameSpec.getScale(),
+
basicNameSpec.getCharSetName(), basicNameSpec.getParserPos());
+ }
+ } else if
(basicNameSpec.getTypeName().getSimple().equalsIgnoreCase(SqlTypeName.BINARY.name()))
{
+ if (basicNameSpec.getPrecision() < 0) {
+ nameSpec = new
SqlBasicTypeNameSpec(SqlTypeName.VARBINARY, Integer.MAX_VALUE,
basicNameSpec.getScale(),
+
basicNameSpec.getCharSetName(), basicNameSpec.getParserPos());
+ }
+ }
+ } else if (nameSpec instanceof
ExtendedSqlCollectionTypeNameSpec) {
+ ExtendedSqlCollectionTypeNameSpec collectionNameSpec =
(ExtendedSqlCollectionTypeNameSpec) nameSpec;
+ SqlTypeNameSpec elementNameSpec =
collectionNameSpec.getElementTypeName();
+ SqlTypeNameSpec convertedElementNameSpec =
convertDataTypes(elementNameSpec);
+ if (convertedElementNameSpec != elementNameSpec) {
+ nameSpec = new
ExtendedSqlCollectionTypeNameSpec(convertedElementNameSpec,
+
collectionNameSpec.elementNullable(),
collectionNameSpec.getCollectionTypeName(),
+
collectionNameSpec.unparseAsStandard(), collectionNameSpec.getParserPos());
+ }
+ } else if (nameSpec instanceof SqlMapTypeNameSpec) {
+ SqlMapTypeNameSpec mapNameSpec = (SqlMapTypeNameSpec)
nameSpec;
+ SqlDataTypeSpec keyTypeSpec = mapNameSpec.getKeyType();
+ SqlDataTypeSpec valTypeSpec = mapNameSpec.getValType();
+ SqlDataTypeSpec convertedKeyTypeSpec =
convertDataTypes(keyTypeSpec);
+ SqlDataTypeSpec convertedValTypeSpec =
convertDataTypes(valTypeSpec);
+ if (keyTypeSpec != convertedKeyTypeSpec || valTypeSpec
!= convertedValTypeSpec) {
+ nameSpec = new
SqlMapTypeNameSpec(convertedKeyTypeSpec, convertedValTypeSpec,
nameSpec.getParserPos());
+ }
+ } else if (nameSpec instanceof ExtendedSqlRowTypeNameSpec) {
+ ExtendedSqlRowTypeNameSpec rowNameSpec =
(ExtendedSqlRowTypeNameSpec) nameSpec;
+ List<SqlDataTypeSpec> fieldTypeSpecs =
rowNameSpec.getFieldTypes();
+ List<SqlDataTypeSpec> convertedFieldTypeSpecs = new
ArrayList<>(fieldTypeSpecs.size());
+ boolean updated = false;
+ for (SqlDataTypeSpec fieldTypeSpec : fieldTypeSpecs) {
+ SqlDataTypeSpec convertedFieldTypeSpec =
convertDataTypes(fieldTypeSpec);
+ if (fieldTypeSpec != convertedFieldTypeSpec) {
+ updated = true;
+ }
+
convertedFieldTypeSpecs.add(convertedFieldTypeSpec);
+ }
+ if (updated) {
+ nameSpec = new
ExtendedSqlRowTypeNameSpec(nameSpec.getParserPos(), rowNameSpec.getFieldNames(),
+ convertedFieldTypeSpecs,
rowNameSpec.getComments(), rowNameSpec.unparseAsStandard());
+ }
+ }
+ return nameSpec;
+ }
+
+ // a constraint is by default ENABLE NOVALIDATE RELY
+ public static byte defaultTrait() {
+ byte res = enableConstraint((byte) 0);
+ res = relyConstraint(res);
+ return res;
+ }
+
+ // returns a constraint trait that requires ENABLE
+ public static byte enableConstraint(byte trait) {
+ return (byte) (trait | HIVE_CONSTRAINT_ENABLE);
+ }
+
+ // returns a constraint trait that doesn't require ENABLE
+ public static byte disableConstraint(byte trait) {
+ return (byte) (trait & (~HIVE_CONSTRAINT_ENABLE));
+ }
+
+ // returns a constraint trait that requires VALIDATE
+ public static byte validateConstraint(byte trait) {
+ return (byte) (trait | HIVE_CONSTRAINT_VALIDATE);
Review comment:
We can represent these constraint as a `Literal` and unparse them
directly, just like `SqlConstraintEnforcement`. There is no need to do these
complex transition.
##########
File path:
flink-table/flink-sql-parser-hive/src/main/java/org/apache/flink/sql/parser/hive/ddl/HiveDDLUtils.java
##########
@@ -91,4 +154,149 @@ public static SqlTableOption toTableOption(String key,
SqlNode value, SqlParserP
public static SqlTableOption toTableOption(String key, String value,
SqlParserPos pos) {
return new SqlTableOption(SqlLiteral.createCharString(key,
pos), SqlLiteral.createCharString(value, pos), pos);
}
+
+ public static void convertDataTypes(SqlNodeList columns) {
+ if (columns != null) {
+ for (SqlNode node : columns) {
+ convertDataTypes((SqlTableColumn) node);
+ }
+ }
+ }
+
+ // data types may need to be converted to comply with HiveQL, e.g.
TIMESTAMP and BINARY
+ public static void convertDataTypes(SqlTableColumn column) {
+ column.setType(convertDataTypes(column.getType()));
+ }
+
+ private static SqlDataTypeSpec convertDataTypes(SqlDataTypeSpec
typeSpec) {
+ SqlTypeNameSpec nameSpec = typeSpec.getTypeNameSpec();
+ SqlTypeNameSpec convertedNameSpec = convertDataTypes(nameSpec);
+ if (nameSpec != convertedNameSpec) {
+ typeSpec = new SqlDataTypeSpec(convertedNameSpec,
typeSpec.getTimeZone(), typeSpec.getNullable(),
+ typeSpec.getParserPosition());
+ }
+ return typeSpec;
+ }
+
+ private static SqlTypeNameSpec convertDataTypes(SqlTypeNameSpec
nameSpec) {
+ if (nameSpec instanceof SqlBasicTypeNameSpec) {
+ SqlBasicTypeNameSpec basicNameSpec =
(SqlBasicTypeNameSpec) nameSpec;
+ if
(basicNameSpec.getTypeName().getSimple().equalsIgnoreCase(SqlTypeName.TIMESTAMP.name()))
{
+ if (basicNameSpec.getPrecision() < 0) {
+ nameSpec = new
SqlBasicTypeNameSpec(SqlTypeName.TIMESTAMP, 9, basicNameSpec.getScale(),
+
basicNameSpec.getCharSetName(), basicNameSpec.getParserPos());
+ }
+ } else if
(basicNameSpec.getTypeName().getSimple().equalsIgnoreCase(SqlTypeName.BINARY.name()))
{
+ if (basicNameSpec.getPrecision() < 0) {
+ nameSpec = new
SqlBasicTypeNameSpec(SqlTypeName.VARBINARY, Integer.MAX_VALUE,
basicNameSpec.getScale(),
+
basicNameSpec.getCharSetName(), basicNameSpec.getParserPos());
+ }
+ }
+ } else if (nameSpec instanceof
ExtendedSqlCollectionTypeNameSpec) {
+ ExtendedSqlCollectionTypeNameSpec collectionNameSpec =
(ExtendedSqlCollectionTypeNameSpec) nameSpec;
+ SqlTypeNameSpec elementNameSpec =
collectionNameSpec.getElementTypeName();
+ SqlTypeNameSpec convertedElementNameSpec =
convertDataTypes(elementNameSpec);
+ if (convertedElementNameSpec != elementNameSpec) {
+ nameSpec = new
ExtendedSqlCollectionTypeNameSpec(convertedElementNameSpec,
+
collectionNameSpec.elementNullable(),
collectionNameSpec.getCollectionTypeName(),
+
collectionNameSpec.unparseAsStandard(), collectionNameSpec.getParserPos());
+ }
+ } else if (nameSpec instanceof SqlMapTypeNameSpec) {
+ SqlMapTypeNameSpec mapNameSpec = (SqlMapTypeNameSpec)
nameSpec;
+ SqlDataTypeSpec keyTypeSpec = mapNameSpec.getKeyType();
+ SqlDataTypeSpec valTypeSpec = mapNameSpec.getValType();
+ SqlDataTypeSpec convertedKeyTypeSpec =
convertDataTypes(keyTypeSpec);
+ SqlDataTypeSpec convertedValTypeSpec =
convertDataTypes(valTypeSpec);
+ if (keyTypeSpec != convertedKeyTypeSpec || valTypeSpec
!= convertedValTypeSpec) {
+ nameSpec = new
SqlMapTypeNameSpec(convertedKeyTypeSpec, convertedValTypeSpec,
nameSpec.getParserPos());
+ }
+ } else if (nameSpec instanceof ExtendedSqlRowTypeNameSpec) {
+ ExtendedSqlRowTypeNameSpec rowNameSpec =
(ExtendedSqlRowTypeNameSpec) nameSpec;
+ List<SqlDataTypeSpec> fieldTypeSpecs =
rowNameSpec.getFieldTypes();
+ List<SqlDataTypeSpec> convertedFieldTypeSpecs = new
ArrayList<>(fieldTypeSpecs.size());
+ boolean updated = false;
+ for (SqlDataTypeSpec fieldTypeSpec : fieldTypeSpecs) {
+ SqlDataTypeSpec convertedFieldTypeSpec =
convertDataTypes(fieldTypeSpec);
+ if (fieldTypeSpec != convertedFieldTypeSpec) {
+ updated = true;
+ }
+
convertedFieldTypeSpecs.add(convertedFieldTypeSpec);
+ }
+ if (updated) {
+ nameSpec = new
ExtendedSqlRowTypeNameSpec(nameSpec.getParserPos(), rowNameSpec.getFieldNames(),
+ convertedFieldTypeSpecs,
rowNameSpec.getComments(), rowNameSpec.unparseAsStandard());
+ }
+ }
+ return nameSpec;
+ }
+
+ // a constraint is by default ENABLE NOVALIDATE RELY
+ public static byte defaultTrait() {
+ byte res = enableConstraint((byte) 0);
+ res = relyConstraint(res);
+ return res;
+ }
+
+ // returns a constraint trait that requires ENABLE
+ public static byte enableConstraint(byte trait) {
+ return (byte) (trait | HIVE_CONSTRAINT_ENABLE);
+ }
+
+ // returns a constraint trait that doesn't require ENABLE
+ public static byte disableConstraint(byte trait) {
+ return (byte) (trait & (~HIVE_CONSTRAINT_ENABLE));
+ }
+
+ // returns a constraint trait that requires VALIDATE
+ public static byte validateConstraint(byte trait) {
+ return (byte) (trait | HIVE_CONSTRAINT_VALIDATE);
Review comment:
Then the unparse of `SqlCreateTable` can also be simplified.
##########
File path:
flink-table/flink-sql-parser-hive/src/main/codegen/includes/parserImpls.ftl
##########
@@ -218,4 +218,549 @@ SqlNode TableOption() :
{
return new SqlTableOption(key, value, getPos());
}
-}
\ No newline at end of file
+}
+
+
+SqlCreate SqlCreateTemporary(Span s, boolean replace) :
+{
+ boolean isTemporary = false;
+ SqlCreate create;
+}
+{
+ [ <TEMPORARY> {isTemporary = true;} ]
+
+ create = SqlCreateTable(s, isTemporary)
+ {
+ return create;
+ }
+}
+
+/**
+* Parse a "Show Tables" metadata query command.
+*/
+SqlShowTables SqlShowTables() :
+{
+}
+{
+ <SHOW> <TABLES>
+ {
+ return new SqlShowTables(getPos());
+ }
+}
+
+/**
+ * Here we add Rich in className to distinguish from calcite's original
SqlDescribeTable.
+ */
+SqlRichDescribeTable SqlRichDescribeTable() :
+{
+ SqlIdentifier tableName;
+ SqlParserPos pos;
+ boolean isExtended = false;
+}
+{
+ <DESCRIBE> { pos = getPos();}
+ [ LOOKAHEAD(2) ( <EXTENDED> | <FORMATTED> ) { isExtended = true;} ]
+ tableName = CompoundIdentifier()
+ {
+ return new SqlRichDescribeTable(pos, tableName, isExtended);
+ }
+}
+
+SqlCreate SqlCreateTable(Span s, boolean isTemporary) :
+{
+ final SqlParserPos startPos = s.pos();
+ SqlIdentifier tableName;
+ SqlNodeList primaryKeyList = SqlNodeList.EMPTY;
+ List<SqlNodeList> uniqueKeysList = new ArrayList<SqlNodeList>();
+ SqlNodeList columnList = SqlNodeList.EMPTY;
+ SqlCharStringLiteral comment = null;
+
+ SqlNodeList propertyList;
+ SqlNodeList partitionColumns = SqlNodeList.EMPTY;
+ SqlParserPos pos = startPos;
+ boolean isExternal = false;
+ HiveTableRowFormat rowFormat = null;
+ HiveTableStoredAs storedAs = null;
+ SqlCharStringLiteral location = null;
+ HiveTableCreationContext ctx = new HiveTableCreationContext();
+}
+{
+ [ <EXTERNAL> { isExternal = true; } ]
+ <TABLE> { propertyList = new SqlNodeList(getPos()); }
+
+ tableName = CompoundIdentifier()
+ [
+ <LPAREN> { pos = getPos(); }
+ TableColumn(ctx)
+ (
+ <COMMA> TableColumn(ctx)
+ )*
+ {
+ pos = pos.plus(getPos());
+ columnList = new SqlNodeList(ctx.columnList, pos);
+ }
+ <RPAREN>
+ ]
+ [ <COMMENT> <QUOTED_STRING> {
+ comment = createStringLiteral(token.image, getPos());
+ }]
+ [
+ <PARTITIONED> <BY>
+ <LPAREN>
+ {
+ List<SqlNode> partCols = new ArrayList();
+ if ( columnList == SqlNodeList.EMPTY ) {
+ columnList = new SqlNodeList(pos.plus(getPos()));
+ }
+ }
+ PartColumnDef(partCols)
+ (
+ <COMMA> PartColumnDef(partCols)
+ )*
+ {
+ partitionColumns = new SqlNodeList(partCols, pos.plus(getPos()));
+ }
+ <RPAREN>
+ ]
+ [
+ <ROW> <FORMAT>
+ rowFormat = TableRowFormat(getPos())
+ ]
+ [
+ <STORED> <AS>
+ storedAs = TableStoredAs(getPos())
+ ]
+ [
+ <LOCATION> <QUOTED_STRING>
+ { location = createStringLiteral(token.image, getPos()); }
+ ]
+ [
+ <TBLPROPERTIES>
+ {
+ SqlNodeList props = TableProperties();
+ for (SqlNode node : props) {
+ propertyList.add(node);
+ }
+ }
+ ]
+ {
+ return new SqlCreateHiveTable(startPos.plus(getPos()),
+ tableName,
+ columnList,
+ ctx,
+ propertyList,
+ partitionColumns,
+ comment,
+ isTemporary,
+ isExternal,
+ rowFormat,
+ storedAs,
+ location);
+ }
+}
+
+SqlDrop SqlDropTable(Span s, boolean replace) :
+{
+ SqlIdentifier tableName = null;
+ boolean ifExists = false;
+}
+{
+ <TABLE>
+
+ (
+ <IF> <EXISTS> { ifExists = true; }
+ |
+ { ifExists = false; }
+ )
+
+ tableName = CompoundIdentifier()
+
+ {
+ return new SqlDropTable(s.pos(), tableName, ifExists, false);
+ }
+}
+
+void TableColumn2(List<SqlNode> list) :
+{
+ SqlParserPos pos;
+ SqlIdentifier name;
+ SqlDataTypeSpec type;
+ SqlCharStringLiteral comment = null;
+}
+{
+ name = SimpleIdentifier()
+ type = ExtendedDataType()
+ [ <COMMENT> <QUOTED_STRING> {
+ comment = createStringLiteral(token.image, getPos());
+ }]
+ {
+ SqlTableColumn tableColumn = new SqlTableColumn(name, type, null,
comment, getPos());
+ list.add(tableColumn);
+ }
+}
+
+void PartColumnDef(List<SqlNode> list) :
+{
+ SqlParserPos pos;
+ SqlIdentifier name;
+ SqlDataTypeSpec type;
+ SqlCharStringLiteral comment = null;
+}
+{
+ name = SimpleIdentifier()
+ type = DataType()
+ [ <COMMENT> <QUOTED_STRING> {
+ comment = createStringLiteral(token.image, getPos());
+ }]
+ {
+ type = type.withNullable(true);
+ SqlTableColumn tableColumn = new SqlTableColumn(name, type, null,
comment, getPos());
+ list.add(tableColumn);
+ }
+}
+
+void TableColumn(HiveTableCreationContext context) :
+{
+}
+{
+ (LOOKAHEAD(2)
+ TableColumnWithConstraint(context)
+ |
+ TableConstraint(context)
+ )
+}
+
+/** Parses a table constraint for CREATE TABLE. */
+void TableConstraint(HiveTableCreationContext context) :
+{
+ SqlIdentifier constraintName = null;
+ final SqlLiteral spec;
+ final SqlNodeList columns;
+}
+{
+ [ constraintName = ConstraintName() ]
+ spec = TableConstraintSpec()
+ columns = ParenthesizedSimpleIdentifierList()
+ context.pkTrait = ConstraintTrait()
+ {
+ SqlTableConstraint tableConstraint = new SqlTableConstraint(
+ constraintName,
+ spec,
+ columns,
+
SqlConstraintEnforcement.NOT_ENFORCED.symbol(getPos()),
+ true,
+ getPos());
+ context.constraints.add(tableConstraint);
+ }
+}
+
+SqlLiteral TableConstraintSpec() :
+{
+ SqlLiteral spec;
+}
+{
+ <PRIMARY> <KEY>
+ {
+ spec = SqlUniqueSpec.PRIMARY_KEY.symbol(getPos());
+ return spec;
+ }
+}
+
+SqlIdentifier ConstraintName() :
+{
+ SqlIdentifier constraintName;
+}
+{
+ <CONSTRAINT> constraintName = SimpleIdentifier() {
+ return constraintName;
+ }
+}
+
+void TableColumnWithConstraint(HiveTableCreationContext context) :
+{
+ SqlParserPos pos;
+ SqlIdentifier name;
+ SqlDataTypeSpec type;
+ SqlCharStringLiteral comment = null;
+}
+{
+ name = SimpleIdentifier()
+ type = ExtendedDataType()
+ {
+ // we have NOT NULL column constraint here
+ if (!type.getNullable()) {
+ if(context.notNullTraits == null) {
+ context.notNullTraits = new ArrayList();
+ }
+ context.notNullTraits.add(ConstraintTrait());
+ }
+ SqlTableColumn tableColumn = new SqlTableColumn(name, type, null,
comment, getPos());
+ context.columnList.add(tableColumn);
+ }
+ [ <COMMENT> <QUOTED_STRING> {
+ comment = createStringLiteral(token.image, getPos());
+ }]
+}
+
+byte ConstraintTrait() :
+{
+ // a constraint is by default ENABLE NOVALIDATE RELY
+ byte constraintTrait = HiveDDLUtils.defaultTrait();
+}
+{
+ [
+ <ENABLE>
+ |
+ <DISABLE> { constraintTrait =
HiveDDLUtils.disableConstraint(constraintTrait); }
+ ]
+ [
+ <NOVALIDATE>
+ |
+ <VALIDATE> { constraintTrait =
HiveDDLUtils.validateConstraint(constraintTrait); }
+ ]
+ [
+ <RELY>
+ |
+ <NORELY> { constraintTrait =
HiveDDLUtils.noRelyConstraint(constraintTrait); }
+ ]
+ { return constraintTrait; }
+}
+
+/**
+* Different with {@link #DataType()}, we support a [ NULL | NOT NULL ] suffix
syntax for both the
+* collection element data type and the data type itself.
+*
+* <p>See {@link #SqlDataTypeSpec} for the syntax details of {@link
#DataType()}.
+*/
+SqlDataTypeSpec ExtendedDataType() :
+{
+ SqlTypeNameSpec typeName;
+ final Span s;
+ boolean elementNullable = true;
+ boolean nullable = true;
+}
+{
+ <#-- #DataType does not take care of the nullable attribute. -->
+ typeName = TypeName() {
+ s = span();
+ }
+ (
+ LOOKAHEAD(3)
+ elementNullable = NullableOptDefaultTrue()
+ typeName = ExtendedCollectionsTypeName(typeName, elementNullable)
+ )*
+ nullable = NullableOptDefaultTrue()
+ {
+ return new SqlDataTypeSpec(typeName,
s.end(this)).withNullable(nullable);
+ }
+}
+
+HiveTableStoredAs TableStoredAs(SqlParserPos pos) :
+{
+ SqlIdentifier fileFormat = null;
+ SqlCharStringLiteral inputFormat = null;
+ SqlCharStringLiteral outputFormat = null;
+}
+{
+ (
+ LOOKAHEAD(2)
+ <INPUTFORMAT> <QUOTED_STRING> { inputFormat =
createStringLiteral(token.image, getPos()); }
+ <OUTPUTFORMAT> <QUOTED_STRING> { outputFormat =
createStringLiteral(token.image, getPos()); }
+ { return HiveTableStoredAs.ofInputOutputFormat(pos, inputFormat,
outputFormat); }
+ |
+ fileFormat = SimpleIdentifier()
+ { return HiveTableStoredAs.ofFileFormat(pos, fileFormat); }
+ )
+}
+
+HiveTableRowFormat TableRowFormat(SqlParserPos pos) :
+{
+ SqlCharStringLiteral fieldsTerminator = null;
+ SqlCharStringLiteral escape = null;
+ SqlCharStringLiteral collectionTerminator = null;
+ SqlCharStringLiteral mapKeyTerminator = null;
+ SqlCharStringLiteral linesTerminator = null;
+ SqlCharStringLiteral nullAs = null;
+ SqlCharStringLiteral serdeClass = null;
+ SqlNodeList serdeProps = null;
+}
+{
+ (
+ <DELIMITED>
+ [ <FIELDS> <TERMINATED> <BY> <QUOTED_STRING>
+ { fieldsTerminator = createStringLiteral(token.image, getPos()); }
+ [ <ESCAPED> <BY> <QUOTED_STRING> { escape =
createStringLiteral(token.image, getPos()); } ]
+ ]
+ [ <COLLECTION> <ITEMS> <TERMINATED> <BY> <QUOTED_STRING> {
collectionTerminator = createStringLiteral(token.image, getPos()); } ]
+ [ <MAP> <KEYS> <TERMINATED> <BY> <QUOTED_STRING> { mapKeyTerminator =
createStringLiteral(token.image, getPos()); } ]
+ [ <LINES> <TERMINATED> <BY> <QUOTED_STRING> { linesTerminator =
createStringLiteral(token.image, getPos()); } ]
+ [ <NULL> <DEFINED> <AS> <QUOTED_STRING> { nullAs =
createStringLiteral(token.image, getPos()); } ]
+ { return HiveTableRowFormat.withDelimited(pos, fieldsTerminator, escape,
collectionTerminator, mapKeyTerminator, linesTerminator, nullAs); }
+ |
+ <SERDE> <QUOTED_STRING>
+ {
+ serdeClass = createStringLiteral(token.image, getPos());
+ }
+ [ <WITH> <SERDEPROPERTIES> serdeProps = TableProperties() ]
+ { return HiveTableRowFormat.withSerDe(pos, serdeClass, serdeProps); }
+ )
+}
+
+/**
+* A sql type name extended basic data type, it has a counterpart basic
+* sql type name but always represents as a special alias compared with the
standard name.
+*
+* <p>For example, STRING is synonym of VARCHAR(INT_MAX)
+* and BYTES is synonym of VARBINARY(INT_MAX).
+*/
+SqlTypeNameSpec ExtendedSqlBasicTypeName() :
+{
+ final SqlTypeName typeName;
+ final String typeAlias;
+ int precision = -1;
+}
+{
+ (
+ <STRING> {
+ typeName = SqlTypeName.VARCHAR;
+ typeAlias = token.image;
+ precision = Integer.MAX_VALUE;
+ }
+ |
+ <BYTES> {
+ typeName = SqlTypeName.VARBINARY;
+ typeAlias = token.image;
Review comment:
It seems that Hive does not support `BYTES` type [1] and its varchar
type max precision is 65535.
[1]
https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-CreateTableCreate/Drop/TruncateTable
[2]
https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Types#LanguageManualTypes-ColumnTypes
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]