This is an automated email from the ASF dual-hosted git repository.
ayushsaxena pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 748838a8748 HIVE-29383: Iceberg: [V3] Add support for timestamp with
nanosecond precession (#6258)
748838a8748 is described below
commit 748838a874880d3c45cb64c84b4cde4132ce74e2
Author: Ayush Saxena <[email protected]>
AuthorDate: Thu Jan 22 18:25:59 2026 +0530
HIVE-29383: Iceberg: [V3] Add support for timestamp with nanosecond
precession (#6258)
---
.../java/org/apache/hadoop/hive/ql/ErrorMsg.java | 1 +
.../apache/iceberg/hive/HiveSchemaConverter.java | 10 ++
.../org/apache/iceberg/hive/HiveSchemaUtil.java | 20 +++-
.../objectinspector/IcebergObjectInspector.java | 5 +
.../IcebergTimestampObjectInspectorHive3.java | 18 +++-
...ebergTimestampWithZoneObjectInspectorHive3.java | 17 +++-
.../mapreduce/HiveIdentityPartitionConverters.java | 6 ++
.../src/test/queries/positive/timestamp_ns.q | 40 ++++++++
.../src/test/results/positive/timestamp_ns.q.out | 110 +++++++++++++++++++++
.../org/apache/hadoop/hive/ql/parse/HiveParser.g | 11 ++-
.../org/apache/hadoop/hive/ql/metadata/Table.java | 12 ++-
.../calcite/translator/TypeConverter.java | 2 +-
.../hadoop/hive/ql/parse/BaseSemanticAnalyzer.java | 13 ++-
.../hive/ql/udf/generic/GenericUDFTimestamp.java | 3 +
.../clientnegative/timestamp_ns_add_column.q | 2 +
.../clientnegative/timestamp_ns_non_iceberg.q | 1 +
.../clientnegative/timestamp_ns_add_column.q.out | 13 +++
.../clientnegative/timestamp_ns_non_iceberg.q.out | 5 +
.../primitive/JavaTimestampObjectInspector.java | 5 +
.../primitive/PrimitiveObjectInspectorFactory.java | 15 +++
.../WritableTimestampObjectInspector.java | 5 +
.../org/apache/hadoop/hive/serde2/thrift/Type.java | 10 +-
.../serde2/typeinfo/TimestampLocalTZTypeInfo.java | 49 ++++++---
.../hive/serde2/typeinfo/TimestampTypeInfo.java | 77 +++++++++++++++
.../hive/serde2/typeinfo/TypeInfoFactory.java | 46 ++++++++-
.../hadoop/hive/serde2/typeinfo/TypeInfoUtils.java | 17 ++++
26 files changed, 478 insertions(+), 35 deletions(-)
diff --git a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 7ab0604252b..57f7cadac18 100644
--- a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -499,6 +499,7 @@ public enum ErrorMsg {
CATALOG_ALREADY_EXISTS(10444, "Catalog {0} already exists", true),
CATALOG_NOT_EXISTS(10445, "Catalog {0} does not exists:", true),
INVALID_SCHEDULED_QUERY(10446, "Scheduled query {0} does not exist", true),
+ UNSUPPORTED_TIMESTAMP_PRECISION(10447, "Unsupported value for precision:
{0}", true),
//========================== 20000 range starts here
========================//
diff --git
a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveSchemaConverter.java
b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveSchemaConverter.java
index 330f67e32bb..22d49556fa9 100644
---
a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveSchemaConverter.java
+++
b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveSchemaConverter.java
@@ -27,6 +27,8 @@
import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TimestampLocalTZTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TimestampTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.iceberg.Schema;
import org.apache.iceberg.expressions.Expressions;
@@ -130,6 +132,10 @@ Type convertType(TypeInfo typeInfo, String defaultValue) {
case STRING:
return Types.StringType.get();
case TIMESTAMP:
+ TimestampTypeInfo ts = (TimestampTypeInfo) typeInfo;
+ if (ts.getPrecision() == 9) {
+ return Types.TimestampNanoType.withoutZone();
+ }
return Types.TimestampType.withoutZone();
case DATE:
return Types.DateType.get();
@@ -141,6 +147,10 @@ Type convertType(TypeInfo typeInfo, String defaultValue) {
default:
// special case for Timestamp with Local TZ which is only
available in Hive3
if ("TIMESTAMPLOCALTZ".equalsIgnoreCase(((PrimitiveTypeInfo)
typeInfo).getPrimitiveCategory().name())) {
+ TimestampLocalTZTypeInfo tz = (TimestampLocalTZTypeInfo)
typeInfo;
+ if (tz.getPrecision() == 9) {
+ return Types.TimestampNanoType.withZone();
+ }
return Types.TimestampType.withZone();
}
throw new IllegalArgumentException("Unsupported Hive type (" +
diff --git
a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveSchemaUtil.java
b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveSchemaUtil.java
index 8277546b525..ddcdc150928 100644
---
a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveSchemaUtil.java
+++
b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveSchemaUtil.java
@@ -230,6 +230,9 @@ private static void getDefaultValDiff(Types.NestedField
field, Map<String, Strin
if (field.type().isPrimitiveType()) {
Object expectedDefault = HiveSchemaUtil.getDefaultValue(defaultStr,
field.type());
+ if (expectedDefault instanceof Literal<?>) {
+ expectedDefault = ((Literal<?>) expectedDefault).value();
+ }
if (!Objects.equals(expectedDefault, field.writeDefault())) {
difference.addDefaultChanged(field, expectedDefault);
}
@@ -379,6 +382,12 @@ public static String convertToTypeString(Type type) {
return "timestamp with local time zone";
}
return "timestamp";
+ case TIMESTAMP_NANO:
+ Types.TimestampNanoType timestampNanoType = (Types.TimestampNanoType)
type;
+ if (timestampNanoType.shouldAdjustToUTC()) {
+ return "timestamp with local time zone(9)";
+ }
+ return "timestamp(9)";
case FIXED:
case BINARY:
return "binary";
@@ -506,6 +515,15 @@ public static Object convertToWriteType(Object value, Type
type) {
DateTimeUtil.timestampFromMicros((Long) value);
}
break;
+ case TIMESTAMP_NANO:
+ // Convert nanoseconds since epoch (Long) to LocalDateTime
+ if (value instanceof Long) {
+ Types.TimestampNanoType timestampNanoType =
(Types.TimestampNanoType) type;
+ return timestampNanoType.shouldAdjustToUTC() ?
+ DateTimeUtil.timestamptzFromNanos((Long) value) :
+ DateTimeUtil.timestampFromNanos((Long) value);
+ }
+ break;
default:
// For other types, no conversion needed
return value;
@@ -536,7 +554,7 @@ public static Object getDefaultValue(String defaultValue,
Type type) {
}
return switch (type.typeId()) {
case DATE, TIME, TIMESTAMP, TIMESTAMP_NANO ->
- Literal.of(stripQuotes(defaultValue)).to(type).value();
+ Literal.of(stripQuotes(defaultValue)).to(type);
default -> Conversions.fromPartitionString(type,
stripQuotes(defaultValue));
};
}
diff --git
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/serde/objectinspector/IcebergObjectInspector.java
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/serde/objectinspector/IcebergObjectInspector.java
index 0951e30128a..4a9c4e71619 100644
---
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/serde/objectinspector/IcebergObjectInspector.java
+++
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/serde/objectinspector/IcebergObjectInspector.java
@@ -133,6 +133,11 @@ public ObjectInspector primitive(Type.PrimitiveType
primitiveType) {
case TIMESTAMP:
boolean adjustToUTC = ((Types.TimestampType)
primitiveType).shouldAdjustToUTC();
return adjustToUTC ? TIMESTAMP_INSPECTOR_WITH_TZ : TIMESTAMP_INSPECTOR;
+ case TIMESTAMP_NANO:
+ boolean adjustUTC = ((Types.TimestampNanoType)
primitiveType).shouldAdjustToUTC();
+ return adjustUTC ?
+ IcebergTimestampWithZoneObjectInspectorHive3.get(9) :
+ IcebergTimestampObjectInspectorHive3.get(9);
case TIME:
return IcebergTimeObjectInspector.get();
default:
diff --git
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/serde/objectinspector/IcebergTimestampObjectInspectorHive3.java
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/serde/objectinspector/IcebergTimestampObjectInspectorHive3.java
index f2e5c12dab8..714b069c186 100644
---
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/serde/objectinspector/IcebergTimestampObjectInspectorHive3.java
+++
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/serde/objectinspector/IcebergTimestampObjectInspectorHive3.java
@@ -26,20 +26,32 @@
import org.apache.hadoop.hive.serde2.io.TimestampWritableV2;
import
org.apache.hadoop.hive.serde2.objectinspector.primitive.AbstractPrimitiveJavaObjectInspector;
import
org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
public class IcebergTimestampObjectInspectorHive3 extends
AbstractPrimitiveJavaObjectInspector
implements TimestampObjectInspector, WriteObjectInspector {
- private static final IcebergTimestampObjectInspectorHive3 INSTANCE = new
IcebergTimestampObjectInspectorHive3();
+ private static final IcebergTimestampObjectInspectorHive3 INSTANCE =
+ new
IcebergTimestampObjectInspectorHive3(TypeInfoFactory.timestampTypeInfo);
+
+ private static final IcebergTimestampObjectInspectorHive3 NANO_INSTANCE =
+ new
IcebergTimestampObjectInspectorHive3(TypeInfoFactory.nanoTimestampTypeInfo);
public static IcebergTimestampObjectInspectorHive3 get() {
return INSTANCE;
}
- private IcebergTimestampObjectInspectorHive3() {
- super(TypeInfoFactory.timestampTypeInfo);
+ public static IcebergTimestampObjectInspectorHive3 get(int precision) {
+ if (precision == 9) {
+ return NANO_INSTANCE;
+ }
+ return INSTANCE;
+ }
+
+ private IcebergTimestampObjectInspectorHive3(PrimitiveTypeInfo typeInfo) {
+ super(typeInfo);
}
@Override
diff --git
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/serde/objectinspector/IcebergTimestampWithZoneObjectInspectorHive3.java
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/serde/objectinspector/IcebergTimestampWithZoneObjectInspectorHive3.java
index 864f3db4d9d..e94f9034c34 100644
---
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/serde/objectinspector/IcebergTimestampWithZoneObjectInspectorHive3.java
+++
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/serde/objectinspector/IcebergTimestampWithZoneObjectInspectorHive3.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.hive.serde2.io.TimestampLocalTZWritable;
import
org.apache.hadoop.hive.serde2.objectinspector.primitive.AbstractPrimitiveJavaObjectInspector;
import
org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampLocalTZObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TimestampLocalTZTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
@@ -33,14 +34,24 @@ public class IcebergTimestampWithZoneObjectInspectorHive3
extends AbstractPrimit
implements TimestampLocalTZObjectInspector, WriteObjectInspector {
private static final IcebergTimestampWithZoneObjectInspectorHive3 INSTANCE =
- new IcebergTimestampWithZoneObjectInspectorHive3();
+ new
IcebergTimestampWithZoneObjectInspectorHive3(TypeInfoFactory.timestampLocalTZTypeInfo);
+
+ private static final IcebergTimestampWithZoneObjectInspectorHive3
NANO_INSTANCE =
+ new
IcebergTimestampWithZoneObjectInspectorHive3(TypeInfoFactory.timestampNanoLocalTZTypeInfo);
public static IcebergTimestampWithZoneObjectInspectorHive3 get() {
return INSTANCE;
}
- private IcebergTimestampWithZoneObjectInspectorHive3() {
- super(TypeInfoFactory.timestampLocalTZTypeInfo);
+ public static IcebergTimestampWithZoneObjectInspectorHive3 get(int
precision) {
+ if (precision == 9) {
+ return NANO_INSTANCE;
+ }
+ return INSTANCE;
+ }
+
+ private IcebergTimestampWithZoneObjectInspectorHive3(PrimitiveTypeInfo
typeInfo) {
+ super(typeInfo);
}
@Override
diff --git
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/HiveIdentityPartitionConverters.java
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/HiveIdentityPartitionConverters.java
index 6c51de9dabb..31710fd92b8 100644
---
a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/HiveIdentityPartitionConverters.java
+++
b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/HiveIdentityPartitionConverters.java
@@ -51,6 +51,12 @@ public static Object convertConstant(Type type, Object
value) {
} else {
return new Timestamp(DateTimeUtil.timestampFromMicros((Long) value));
}
+ case TIMESTAMP_NANO:
+ if (((Types.TimestampNanoType) type).shouldAdjustToUTC()) {
+ return DateTimeUtil.timestamptzFromNanos((Long)
value).toOffsetTime();
+ } else {
+ return new Timestamp(DateTimeUtil.timestampFromNanos((Long) value));
+ }
case DECIMAL:
if (value.getClass().isAssignableFrom(BigDecimal.class)) {
return HiveDecimal.create((BigDecimal) value);
diff --git a/iceberg/iceberg-handler/src/test/queries/positive/timestamp_ns.q
b/iceberg/iceberg-handler/src/test/queries/positive/timestamp_ns.q
new file mode 100644
index 00000000000..a9f6d78ab44
--- /dev/null
+++ b/iceberg/iceberg-handler/src/test/queries/positive/timestamp_ns.q
@@ -0,0 +1,40 @@
+-- Mask random uuid
+--! qt:replace:/(\s+'uuid'=')\S+('\s*)/$1#Masked#$2/
+--! qt:replace:/(\s+uuid\s+)\S+/$1#Masked#/
+-- Mask random snapshot id
+--! qt:replace:/('current-snapshot-id'=')\d+/$1#SnapshotId#/
+-- Mask current-snapshot-timestamp-ms
+--! qt:replace:/('current-snapshot-timestamp-ms'=')\d+/$1#Masked#/
+-- Mask iceberg version
+--!
qt:replace:/("iceberg-version":")(\w+\s\w+\s\d+\.\d+\.\d+\s\(\w+\s\w+\))/$1#Masked#/
+-- Mask added-files-size
+--! qt:replace:/(\S\"added-files-size":")(\d+)(")/$1#Masked#$3/
+-- Mask total-files-size
+--! qt:replace:/(\S\"total-files-size":")(\d+)(")/$1#Masked#$3/
+
+CREATE TABLE t (
+ ts_us timestamp,
+ ts_ns timestamp(9),
+ ts_tz_us timestamp with local time zone,
+ ts_tz_ns timestamp with local time zone(9)
+)
+STORED BY ICEBERG
+TBLPROPERTIES ('format-version'='3');
+
+INSERT INTO t VALUES (
+ '2025-12-18 10:15:30.123456789',
+ '2025-12-18 10:15:30.123456789',
+ '2025-12-18 10:15:30.123456789',
+ '2025-12-18 10:15:30.123456789'
+);
+
+SELECT ts_ns FROM t ORDER BY ts_ns;
+SELECT ts_tz_ns FROM t ORDER BY ts_tz_ns;
+SELECT CAST(ts_ns AS STRING) FROM t;
+SELECT CAST(ts_tz_ns AS STRING) FROM t;
+
+SELECT * FROM t;
+
+CREATE TABLE tgt STORED BY ICEBERG TBLPROPERTIES ('format-version'='3') AS
SELECT * FROM t;
+
+SELECT * FROM tgt;
\ No newline at end of file
diff --git
a/iceberg/iceberg-handler/src/test/results/positive/timestamp_ns.q.out
b/iceberg/iceberg-handler/src/test/results/positive/timestamp_ns.q.out
new file mode 100644
index 00000000000..c3223d3856d
--- /dev/null
+++ b/iceberg/iceberg-handler/src/test/results/positive/timestamp_ns.q.out
@@ -0,0 +1,110 @@
+PREHOOK: query: CREATE TABLE t (
+ ts_us timestamp,
+ ts_ns timestamp(9),
+ ts_tz_us timestamp with local time zone,
+ ts_tz_ns timestamp with local time zone(9)
+)
+STORED BY ICEBERG
+TBLPROPERTIES ('format-version'='3')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t
+POSTHOOK: query: CREATE TABLE t (
+ ts_us timestamp,
+ ts_ns timestamp(9),
+ ts_tz_us timestamp with local time zone,
+ ts_tz_ns timestamp with local time zone(9)
+)
+STORED BY ICEBERG
+TBLPROPERTIES ('format-version'='3')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t
+PREHOOK: query: INSERT INTO t VALUES (
+ '2025-12-18 10:15:30.123456789',
+ '2025-12-18 10:15:30.123456789',
+ '2025-12-18 10:15:30.123456789',
+ '2025-12-18 10:15:30.123456789'
+)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@t
+POSTHOOK: query: INSERT INTO t VALUES (
+ '2025-12-18 10:15:30.123456789',
+ '2025-12-18 10:15:30.123456789',
+ '2025-12-18 10:15:30.123456789',
+ '2025-12-18 10:15:30.123456789'
+)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@t
+PREHOOK: query: SELECT ts_ns FROM t ORDER BY ts_ns
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT ts_ns FROM t ORDER BY ts_ns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+2025-12-18 10:15:30.123456789
+PREHOOK: query: SELECT ts_tz_ns FROM t ORDER BY ts_tz_ns
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT ts_tz_ns FROM t ORDER BY ts_tz_ns
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+2025-12-18 10:15:30.123456789 US/Pacific
+PREHOOK: query: SELECT CAST(ts_ns AS STRING) FROM t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT CAST(ts_ns AS STRING) FROM t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+2025-12-18 10:15:30.123456789
+PREHOOK: query: SELECT CAST(ts_tz_ns AS STRING) FROM t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT CAST(ts_tz_ns AS STRING) FROM t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+2025-12-18 10:15:30.123456789 US/Pacific
+PREHOOK: query: SELECT * FROM t
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT * FROM t
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+2025-12-18 10:15:30.123456 2025-12-18 10:15:30.123456789 2025-12-18
10:15:30.123456 US/Pacific 2025-12-18 10:15:30.123456789 US/Pacific
+PREHOOK: query: CREATE TABLE tgt STORED BY ICEBERG TBLPROPERTIES
('format-version'='3') AS SELECT * FROM t
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@t
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tgt
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: CREATE TABLE tgt STORED BY ICEBERG TBLPROPERTIES
('format-version'='3') AS SELECT * FROM t
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@t
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tgt
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: Lineage: tgt.ts_ns SIMPLE [(t)t.FieldSchema(name:ts_ns,
type:timestamp(9), comment:null), ]
+POSTHOOK: Lineage: tgt.ts_tz_ns SIMPLE [(t)t.FieldSchema(name:ts_tz_ns,
type:timestamp with local time zone(9), comment:null), ]
+POSTHOOK: Lineage: tgt.ts_tz_us SIMPLE [(t)t.FieldSchema(name:ts_tz_us,
type:timestamp with local time zone, comment:null), ]
+POSTHOOK: Lineage: tgt.ts_us SIMPLE [(t)t.FieldSchema(name:ts_us,
type:timestamp, comment:null), ]
+PREHOOK: query: SELECT * FROM tgt
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tgt
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: SELECT * FROM tgt
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tgt
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+2025-12-18 10:15:30.123456 2025-12-18 10:15:30.123456789 2025-12-18
10:15:30.123456 US/Pacific 2025-12-18 10:15:30.123456789 US/Pacific
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index 1fa3a011328..54417ff0fcb 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -2434,9 +2434,16 @@ primitiveType
| KW_DOUBLE KW_PRECISION? -> TOK_DOUBLE
| KW_DATE -> TOK_DATE
| KW_DATETIME -> TOK_DATETIME
- | KW_TIMESTAMP -> TOK_TIMESTAMP
| KW_TIMESTAMPLOCALTZ -> TOK_TIMESTAMPLOCALTZ
- | KW_TIMESTAMP KW_WITH KW_LOCAL KW_TIME KW_ZONE -> TOK_TIMESTAMPLOCALTZ
+ | KW_TIMESTAMP
+ (
+ KW_WITH KW_LOCAL KW_TIME KW_ZONE
+ (LPAREN p=Number RPAREN)?
+ -> ^(TOK_TIMESTAMPLOCALTZ $p?)
+ |
+ (LPAREN p=Number RPAREN)?
+ -> ^(TOK_TIMESTAMP $p?)
+ )
// Uncomment to allow intervals as table column types
//| KW_INTERVAL KW_YEAR KW_TO KW_MONTH -> TOK_INTERVAL_YEAR_MONTH
//| KW_INTERVAL KW_DAY KW_TO KW_SECOND -> TOK_INTERVAL_DAY_TIME
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index 000eb8a6d91..5be6735c1e3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -76,6 +76,7 @@
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.InputFormat;
@@ -1160,9 +1161,10 @@ public static void validateColumns(List<FieldSchema>
columns, List<FieldSchema>
throw new HiveException("Duplicate column name " + colName
+ " in the table definition.");
}
- if (!icebergTable && VARIANT_TYPE_NAME.equalsIgnoreCase(col.getType())) {
+ if (!icebergTable && isUnsupportedInNonIceberg(col.getType())) {
throw new HiveException(
- "Column name " + colName + " cannot be of type 'variant' as it is
not supported in non-Iceberg tables.");
+ "Column name " + colName + " cannot be of type '" + col.getType()
+ "' as it is not supported in "
+ + "non-Iceberg tables.");
}
colNames.add(colName);
}
@@ -1392,4 +1394,10 @@ public List<VirtualColumn> getVirtualColumns() {
return virtualColumns;
}
+
+ private static boolean isUnsupportedInNonIceberg(String columnType) {
+ return VARIANT_TYPE_NAME.equalsIgnoreCase(columnType) ||
+
TypeInfoFactory.nanoTimestampTypeInfo.getQualifiedName().equalsIgnoreCase(columnType)
||
+
TypeInfoFactory.timestampNanoLocalTZTypeInfo.getQualifiedName().equalsIgnoreCase(columnType);
+ }
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
index d191ecef829..1f7ccd1773c 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
@@ -355,7 +355,7 @@ public static TypeInfo convertPrimitiveType(RelDataType
rType) {
} catch (HiveException e) {
throw new RuntimeException(e);
}
- return TypeInfoFactory.getTimestampTZTypeInfo(conf.getLocalTimeZone());
+ return TypeInfoFactory.getTimestampTZTypeInfo(conf.getLocalTimeZone(),
6);
case INTERVAL_YEAR:
case INTERVAL_MONTH:
case INTERVAL_YEAR_MONTH:
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index 393006bd8f0..18043120610 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -1067,14 +1067,23 @@ private static String getTypeName(ASTNode node) throws
SemanticException {
typeName = varcharTypeInfo.getQualifiedName();
break;
case HiveParser.TOK_TIMESTAMPLOCALTZ:
- TimestampLocalTZTypeInfo timestampLocalTZTypeInfo =
- TypeInfoFactory.getTimestampTZTypeInfo(null);
+ int precision = 6;
+ if (node.getChildCount() > 0) {
+ precision = Integer.parseInt(node.getChild(0).getText());
+ }
+ TimestampLocalTZTypeInfo timestampLocalTZTypeInfo =
TypeInfoFactory.getTimestampTZTypeInfo(null, precision);
typeName = timestampLocalTZTypeInfo.getQualifiedName();
break;
case HiveParser.TOK_DECIMAL:
DecimalTypeInfo decTypeInfo = ParseUtils.getDecimalTypeTypeInfo(node);
typeName = decTypeInfo.getQualifiedName();
break;
+ case HiveParser.TOK_TIMESTAMP:
+ int prec = 6;
+ if (node.getChildCount() > 0) {
+ prec = Integer.parseInt(node.getChild(0).getText());
+ }
+ return TypeInfoFactory.getTimestampTypeInfo(prec).getQualifiedName();
default:
typeName = TOKEN_TO_TYPE.get(token);
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
index 55043413839..cf80c7b82c0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
@@ -105,6 +105,9 @@ public void configure(MapredContext context) {
@Override
public Object evaluate(DeferredObject[] arguments) throws HiveException {
+ if (tsConvertors[0] instanceof
ObjectInspectorConverters.IdentityConverter) {
+ return arguments[0].get();
+ }
PrimitiveObjectInspectorConverter.TimestampConverter ts =
(PrimitiveObjectInspectorConverter.TimestampConverter) tsConvertors[0];
ts.setIntToTimestampInSeconds(intToTimestampInSeconds);
diff --git a/ql/src/test/queries/clientnegative/timestamp_ns_add_column.q
b/ql/src/test/queries/clientnegative/timestamp_ns_add_column.q
new file mode 100644
index 00000000000..9cc773e5e2b
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/timestamp_ns_add_column.q
@@ -0,0 +1,2 @@
+create table emp(id int);
+alter table emp add columns (t timestamp(9));
\ No newline at end of file
diff --git a/ql/src/test/queries/clientnegative/timestamp_ns_non_iceberg.q
b/ql/src/test/queries/clientnegative/timestamp_ns_non_iceberg.q
new file mode 100644
index 00000000000..27d281bdf66
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/timestamp_ns_non_iceberg.q
@@ -0,0 +1 @@
+CREATE EXTERNAL TABLE nano_timestamp_basic (id INT, t timestamp with local
time zone(9));
\ No newline at end of file
diff --git a/ql/src/test/results/clientnegative/timestamp_ns_add_column.q.out
b/ql/src/test/results/clientnegative/timestamp_ns_add_column.q.out
new file mode 100644
index 00000000000..8b625540b13
--- /dev/null
+++ b/ql/src/test/results/clientnegative/timestamp_ns_add_column.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create table emp(id int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@emp
+POSTHOOK: query: create table emp(id int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@emp
+PREHOOK: query: alter table emp add columns (t timestamp(9))
+PREHOOK: type: ALTERTABLE_ADDCOLS
+PREHOOK: Input: default@emp
+PREHOOK: Output: default@emp
+FAILED: Execution Error, return code 40000 from
org.apache.hadoop.hive.ql.ddl.DDLTask. Column name t cannot be of type
'timestamp(9)' as it is not supported in non-Iceberg tables.
diff --git a/ql/src/test/results/clientnegative/timestamp_ns_non_iceberg.q.out
b/ql/src/test/results/clientnegative/timestamp_ns_non_iceberg.q.out
new file mode 100644
index 00000000000..27e4c5459ac
--- /dev/null
+++ b/ql/src/test/results/clientnegative/timestamp_ns_non_iceberg.q.out
@@ -0,0 +1,5 @@
+PREHOOK: query: CREATE EXTERNAL TABLE nano_timestamp_basic (id INT, t
timestamp with local time zone(9))
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@nano_timestamp_basic
+FAILED: Execution Error, return code 40000 from
org.apache.hadoop.hive.ql.ddl.DDLTask.
org.apache.hadoop.hive.ql.metadata.HiveException: Column name t cannot be of
type 'timestamp with local time zone(9)' as it is not supported in non-Iceberg
tables.
diff --git
a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaTimestampObjectInspector.java
b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaTimestampObjectInspector.java
index 47719c85642..17cc1059e18 100644
---
a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaTimestampObjectInspector.java
+++
b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaTimestampObjectInspector.java
@@ -19,6 +19,7 @@
import org.apache.hadoop.hive.common.type.Timestamp;
import org.apache.hadoop.hive.serde2.io.TimestampWritableV2;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
public class JavaTimestampObjectInspector
@@ -29,6 +30,10 @@ protected JavaTimestampObjectInspector() {
super(TypeInfoFactory.timestampTypeInfo);
}
+ protected JavaTimestampObjectInspector(PrimitiveTypeInfo typeInfo) {
+ super(typeInfo);
+ }
+
public TimestampWritableV2 getPrimitiveWritableObject(Object o) {
return o == null ? null : new TimestampWritableV2((Timestamp) o);
}
diff --git
a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorFactory.java
b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorFactory.java
index 0b31d9a9a26..18ebc643eea 100644
---
a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorFactory.java
+++
b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorFactory.java
@@ -89,8 +89,12 @@ public final class PrimitiveObjectInspectorFactory {
new WritableDateObjectInspector();
public static final WritableTimestampObjectInspector
writableTimestampObjectInspector =
new WritableTimestampObjectInspector();
+ public static final WritableTimestampObjectInspector
writableNanoTimestampObjectInspector =
+ new
WritableTimestampObjectInspector(TypeInfoFactory.nanoTimestampTypeInfo);
public static final WritableTimestampLocalTZObjectInspector
writableTimestampTZObjectInspector =
new
WritableTimestampLocalTZObjectInspector(TypeInfoFactory.timestampLocalTZTypeInfo);
+ public static final WritableTimestampLocalTZObjectInspector
writableNanoTimestampTZObjectInspector =
+ new
WritableTimestampLocalTZObjectInspector(TypeInfoFactory.timestampNanoLocalTZTypeInfo);
public static final WritableHiveIntervalYearMonthObjectInspector
writableHiveIntervalYearMonthObjectInspector =
new WritableHiveIntervalYearMonthObjectInspector();
public static final WritableHiveIntervalDayTimeObjectInspector
writableHiveIntervalDayTimeObjectInspector =
@@ -139,6 +143,10 @@ public final class PrimitiveObjectInspectorFactory {
cachedPrimitiveWritableInspectorCache.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.BINARY_TYPE_NAME),
writableBinaryObjectInspector);
cachedPrimitiveWritableInspectorCache.put(TypeInfoFactory.decimalTypeInfo,
writableHiveDecimalObjectInspector);
+
cachedPrimitiveWritableInspectorCache.put(TypeInfoFactory.nanoTimestampTypeInfo,
+ writableNanoTimestampObjectInspector);
+
cachedPrimitiveWritableInspectorCache.put(TypeInfoFactory.timestampNanoLocalTZTypeInfo,
+ writableNanoTimestampTZObjectInspector);
}
private static Map<PrimitiveCategory,
AbstractPrimitiveWritableObjectInspector>
@@ -193,8 +201,12 @@ public final class PrimitiveObjectInspectorFactory {
new JavaDateObjectInspector();
public static final JavaTimestampObjectInspector
javaTimestampObjectInspector =
new JavaTimestampObjectInspector();
+ public static final JavaTimestampObjectInspector
javaNanoTimestampObjectInspector =
+ new JavaTimestampObjectInspector(TypeInfoFactory.nanoTimestampTypeInfo);
public static final JavaTimestampLocalTZObjectInspector
javaTimestampTZObjectInspector =
new
JavaTimestampLocalTZObjectInspector(TypeInfoFactory.timestampLocalTZTypeInfo);
+ public static final JavaTimestampLocalTZObjectInspector
javaNanoTimestampTZObjectInspector =
+ new
JavaTimestampLocalTZObjectInspector(TypeInfoFactory.timestampNanoLocalTZTypeInfo);
public static final JavaHiveIntervalYearMonthObjectInspector
javaHiveIntervalYearMonthObjectInspector =
new JavaHiveIntervalYearMonthObjectInspector();
public static final JavaHiveIntervalDayTimeObjectInspector
javaHiveIntervalDayTimeObjectInspector =
@@ -232,7 +244,10 @@ public final class PrimitiveObjectInspectorFactory {
javaDateObjectInspector);
cachedPrimitiveJavaInspectorCache.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.TIMESTAMP_TYPE_NAME),
javaTimestampObjectInspector);
+
cachedPrimitiveJavaInspectorCache.put(TypeInfoFactory.nanoTimestampTypeInfo,
javaNanoTimestampObjectInspector);
cachedPrimitiveJavaInspectorCache.put(TypeInfoFactory.timestampLocalTZTypeInfo,
javaTimestampTZObjectInspector);
+
cachedPrimitiveJavaInspectorCache.put(TypeInfoFactory.timestampNanoLocalTZTypeInfo,
+ javaNanoTimestampTZObjectInspector);
cachedPrimitiveJavaInspectorCache.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME),
javaHiveIntervalYearMonthObjectInspector);
cachedPrimitiveJavaInspectorCache.put(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME),
diff --git
a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableTimestampObjectInspector.java
b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableTimestampObjectInspector.java
index e0ab191b73c..8939186562f 100644
---
a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableTimestampObjectInspector.java
+++
b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableTimestampObjectInspector.java
@@ -19,6 +19,7 @@
import org.apache.hadoop.hive.common.type.Timestamp;
import org.apache.hadoop.hive.serde2.io.TimestampWritableV2;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
public class WritableTimestampObjectInspector extends
@@ -29,6 +30,10 @@ public WritableTimestampObjectInspector() {
super(TypeInfoFactory.timestampTypeInfo);
}
+ public WritableTimestampObjectInspector(PrimitiveTypeInfo typeInfo) {
+ super(typeInfo);
+ }
+
@Override
public TimestampWritableV2 getPrimitiveWritableObject(Object o) {
return o == null ? null : (TimestampWritableV2) o;
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/thrift/Type.java
b/serde/src/java/org/apache/hadoop/hive/serde2/thrift/Type.java
index 9df1b8dc613..ca5abdbbf2c 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/thrift/Type.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/thrift/Type.java
@@ -69,12 +69,14 @@ public enum Type {
DATE_TYPE("DATE",
java.sql.Types.DATE,
TTypeId.DATE_TYPE),
- TIMESTAMP_TYPE("TIMESTAMP",
- java.sql.Types.TIMESTAMP,
- TTypeId.TIMESTAMP_TYPE),
TIMESTAMPLOCALTZ_TYPE(serdeConstants.TIMESTAMPLOCALTZ_TYPE_NAME.toUpperCase(),
java.sql.Types.OTHER,
- TTypeId.TIMESTAMPLOCALTZ_TYPE),
+ TTypeId.TIMESTAMPLOCALTZ_TYPE,
+ true, false, false),
+ TIMESTAMP_TYPE("TIMESTAMP",
+ java.sql.Types.TIMESTAMP,
+ TTypeId.TIMESTAMP_TYPE,
+ true, false, false),
INTERVAL_YEAR_MONTH_TYPE("INTERVAL_YEAR_MONTH",
java.sql.Types.OTHER,
TTypeId.INTERVAL_YEAR_MONTH_TYPE),
diff --git
a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TimestampLocalTZTypeInfo.java
b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TimestampLocalTZTypeInfo.java
index e1f9a2699a8..bb3f947af15 100644
---
a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TimestampLocalTZTypeInfo.java
+++
b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TimestampLocalTZTypeInfo.java
@@ -22,25 +22,34 @@
import java.util.Objects;
import org.apache.hadoop.hive.common.type.TimestampTZUtil;
+import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.serde.serdeConstants;
public class TimestampLocalTZTypeInfo extends PrimitiveTypeInfo {
private static final long serialVersionUID = 1L;
private ZoneId timeZone;
+ private final int precision;
public TimestampLocalTZTypeInfo() {
- super(serdeConstants.TIMESTAMPLOCALTZ_TYPE_NAME);
+ this(6, ZoneId.systemDefault());
}
public TimestampLocalTZTypeInfo(String timeZoneStr) {
+ this(6, TimestampTZUtil.parseTimeZone(timeZoneStr));
+ }
+
+ public TimestampLocalTZTypeInfo(int precision, ZoneId timeZone) {
super(serdeConstants.TIMESTAMPLOCALTZ_TYPE_NAME);
- this.timeZone = TimestampTZUtil.parseTimeZone(timeZoneStr);
+ if (precision != 6 && precision != 9) {
+ throw new
RuntimeException(ErrorMsg.UNSUPPORTED_TIMESTAMP_PRECISION.format(String.valueOf(precision)));
+ }
+ this.precision = precision;
+ this.timeZone = timeZone;
}
- @Override
- public String getTypeName() {
- return serdeConstants.TIMESTAMPLOCALTZ_TYPE_NAME;
+ public int getPrecision() {
+ return precision;
}
@Override
@@ -49,6 +58,14 @@ public void setTypeName(String typeName) {
return;
}
+ @Override
+ public String getTypeName() {
+ if (precision == 9) {
+ return serdeConstants.TIMESTAMPLOCALTZ_TYPE_NAME + "(9)";
+ }
+ return super.getTypeName();
+ }
+
@Override
public boolean equals(Object other) {
if (this == other) {
@@ -58,9 +75,8 @@ public boolean equals(Object other) {
return false;
}
- TimestampLocalTZTypeInfo dti = (TimestampLocalTZTypeInfo) other;
-
- return this.timeZone().equals(dti.timeZone());
+ TimestampLocalTZTypeInfo that = (TimestampLocalTZTypeInfo) other;
+ return precision == that.precision && Objects.equals(timeZone,
that.timeZone);
}
/**
@@ -68,29 +84,38 @@ public boolean equals(Object other) {
*/
@Override
public int hashCode() {
- return Objects.hash(typeName, timeZone);
+ return Objects.hash(typeName, precision, timeZone);
}
@Override
public String toString() {
- return getQualifiedName(timeZone);
+ return getQualifiedName(timeZone, precision);
}
@Override
public String getQualifiedName() {
- return getQualifiedName(null);
+ return getQualifiedName(null, precision);
}
- public static String getQualifiedName(ZoneId timeZone) {
+ static String getQualifiedName(ZoneId timeZone, int precision) {
StringBuilder sb = new
StringBuilder(serdeConstants.TIMESTAMPLOCALTZ_TYPE_NAME);
if (timeZone != null) {
sb.append("('");
sb.append(timeZone);
sb.append("')");
}
+ if (precision > 6) {
+ sb.append("(");
+ sb.append(precision);
+ sb.append(")");
+ }
return sb.toString();
}
+ public static String getQualifiedName(ZoneId timeZone) {
+ return getQualifiedName(timeZone, 6);
+ }
+
public ZoneId timeZone() {
return timeZone;
}
diff --git
a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TimestampTypeInfo.java
b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TimestampTypeInfo.java
new file mode 100644
index 00000000000..d8d9ce285e0
--- /dev/null
+++
b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TimestampTypeInfo.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.serde2.typeinfo;
+
+import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.serde.serdeConstants;
+
+public class TimestampTypeInfo extends PrimitiveTypeInfo {
+
+ private final int precision;
+
+ public TimestampTypeInfo() {
+ this(6);
+ }
+
+ public TimestampTypeInfo(int precision) {
+ super(serdeConstants.TIMESTAMP_TYPE_NAME);
+ if (precision != 6 && precision != 9) {
+ throw new
RuntimeException(ErrorMsg.UNSUPPORTED_TIMESTAMP_PRECISION.format(String.valueOf(precision)));
+ }
+ this.precision = precision;
+ }
+
+ public int getPrecision() {
+ return precision;
+ }
+
+ @Override
+ public String getQualifiedName() {
+ return precision == 6
+ ? serdeConstants.TIMESTAMP_TYPE_NAME
+ : serdeConstants.TIMESTAMP_TYPE_NAME + "(" + precision + ")";
+ }
+
+ @Override
+ public String getTypeName() {
+ return getQualifiedName();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (!(o instanceof TimestampTypeInfo)) {
+ return false;
+ }
+ TimestampTypeInfo that = (TimestampTypeInfo) o;
+ return precision == that.precision;
+ }
+
+ @Override
+ public String toString() {
+ return getQualifiedName();
+ }
+
+ @Override
+ public int hashCode() {
+ return Integer.hashCode(precision);
+ }
+}
diff --git
a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoFactory.java
b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoFactory.java
index 977bbd0277f..f80c5192604 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoFactory.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoFactory.java
@@ -23,9 +23,11 @@
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
+import org.apache.commons.lang3.math.NumberUtils;
import org.apache.hadoop.hive.common.type.HiveChar;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.common.type.HiveVarchar;
+import org.apache.hadoop.hive.common.type.TimestampTZUtil;
import org.apache.hadoop.hive.serde.serdeConstants;
import
org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
import
org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveTypeEntry;
@@ -55,7 +57,8 @@ private TypeInfoFactory() {
public static final PrimitiveTypeInfo byteTypeInfo = new
PrimitiveTypeInfo(serdeConstants.TINYINT_TYPE_NAME);
public static final PrimitiveTypeInfo shortTypeInfo = new
PrimitiveTypeInfo(serdeConstants.SMALLINT_TYPE_NAME);
public static final PrimitiveTypeInfo dateTypeInfo = new
PrimitiveTypeInfo(serdeConstants.DATE_TYPE_NAME);
- public static final PrimitiveTypeInfo timestampTypeInfo = new
PrimitiveTypeInfo(serdeConstants.TIMESTAMP_TYPE_NAME);
+ public static final PrimitiveTypeInfo timestampTypeInfo = new
TimestampTypeInfo();
+ public static final PrimitiveTypeInfo nanoTimestampTypeInfo = new
TimestampTypeInfo(9);
public static final PrimitiveTypeInfo intervalYearMonthTypeInfo = new
PrimitiveTypeInfo(serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME);
public static final PrimitiveTypeInfo intervalDayTimeTypeInfo = new
PrimitiveTypeInfo(serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME);
public static final PrimitiveTypeInfo binaryTypeInfo = new
PrimitiveTypeInfo(serdeConstants.BINARY_TYPE_NAME);
@@ -72,6 +75,9 @@ private TypeInfoFactory() {
public static final TimestampLocalTZTypeInfo timestampLocalTZTypeInfo = new
TimestampLocalTZTypeInfo(
ZoneId.systemDefault().getId());
+ public static final TimestampLocalTZTypeInfo timestampNanoLocalTZTypeInfo =
+ new TimestampLocalTZTypeInfo(9, ZoneId.systemDefault());
+
public static final PrimitiveTypeInfo unknownTypeInfo = new
PrimitiveTypeInfo("unknown");
// Map from type name (such as int or varchar(40) to the corresponding
PrimitiveTypeInfo
@@ -98,6 +104,8 @@ private TypeInfoFactory() {
cachedPrimitiveTypeInfo.put(serdeConstants.BINARY_TYPE_NAME,
binaryTypeInfo);
cachedPrimitiveTypeInfo.put(decimalTypeInfo.getQualifiedName(),
decimalTypeInfo);
cachedPrimitiveTypeInfo.put("unknown", unknownTypeInfo);
+ cachedPrimitiveTypeInfo.put(nanoTimestampTypeInfo.toString(),
nanoTimestampTypeInfo);
+ cachedPrimitiveTypeInfo.put(timestampNanoLocalTZTypeInfo.toString(),
timestampNanoLocalTZTypeInfo);
}
/**
@@ -163,11 +171,31 @@ private static PrimitiveTypeInfo
createPrimitiveTypeInfo(String fullName) {
}
return new DecimalTypeInfo(Integer.valueOf(parts.typeParams[0]),
Integer.valueOf(parts.typeParams[1]));
- case TIMESTAMPLOCALTZ:
+ case TIMESTAMP:
if (parts.typeParams.length != 1) {
return null;
}
- return new TimestampLocalTZTypeInfo(parts.typeParams[0]);
+ int precision = Integer.parseInt(parts.typeParams[0]);
+ return new TimestampTypeInfo(precision);
+ case TIMESTAMPLOCALTZ:
+ int prec = 6;
+ ZoneId tz = ZoneId.systemDefault();
+
+ if (parts.typeParams.length == 1) {
+ String p0 = parts.typeParams[0];
+
+ if (NumberUtils.isCreatable((p0))) {
+ prec = Integer.parseInt(p0);
+ } else {
+ tz = TimestampTZUtil.parseTimeZone(p0);
+ }
+ } else if (parts.typeParams.length == 2) {
+ prec = Integer.parseInt(parts.typeParams[0]);
+ tz = TimestampTZUtil.parseTimeZone(parts.typeParams[1]);
+ } else {
+ return null;
+ }
+ return new TimestampLocalTZTypeInfo(prec, tz);
default:
return null;
}
@@ -186,11 +214,19 @@ public static DecimalTypeInfo getDecimalTypeInfo(int
precision, int scale) {
return (DecimalTypeInfo) getPrimitiveTypeInfo(fullName);
};
- public static TimestampLocalTZTypeInfo getTimestampTZTypeInfo(ZoneId
defaultTimeZone) {
- String fullName =
TimestampLocalTZTypeInfo.getQualifiedName(defaultTimeZone);
+ public static TimestampLocalTZTypeInfo getTimestampTZTypeInfo(ZoneId
defaultTimeZone, int precision) {
+ String fullName =
TimestampLocalTZTypeInfo.getQualifiedName(defaultTimeZone, precision);
return (TimestampLocalTZTypeInfo) getPrimitiveTypeInfo(fullName);
};
+ public static TimestampLocalTZTypeInfo getTimestampTZTypeInfo(ZoneId
defaultTimeZone) {
+ return getTimestampTZTypeInfo(defaultTimeZone, 6);
+ };
+
+ public static TimestampTypeInfo getTimestampTypeInfo(int precision) {
+ return new TimestampTypeInfo(precision);
+ }
+
public static TypeInfo getPrimitiveTypeInfoFromPrimitiveWritable(
Class<?> clazz) {
String typeName = PrimitiveObjectInspectorUtils
diff --git
a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
index 581437194db..b25d35868df 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
@@ -22,6 +22,7 @@
import java.lang.reflect.Method;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
+import java.time.ZoneId;
import java.util.ArrayList;
import java.util.EnumMap;
import java.util.EnumSet;
@@ -498,6 +499,10 @@ private TypeInfo parseType() {
}
return TypeInfoFactory.getDecimalTypeInfo(precision, scale);
+ case TIMESTAMP:
+ return
TypeInfoFactory.getTimestampTypeInfo(getTimestampPrecision(params));
+ case TIMESTAMPLOCALTZ:
+ return
TypeInfoFactory.getTimestampTZTypeInfo(ZoneId.systemDefault(),
getTimestampPrecision(params));
default:
return TypeInfoFactory.getPrimitiveTypeInfo(typeEntry.typeName);
}
@@ -578,6 +583,18 @@ private TypeInfo parseType() {
+ t.position + " of '" + typeInfoString + "'");
}
+ private static int getTimestampPrecision(String[] params) {
+ int prec = 6;
+ if (params != null) {
+ if (params.length == 1) {
+ prec = Integer.parseInt(params[0]);
+ } else if (params.length > 1) {
+ throw new IllegalArgumentException("Timestamp takes only one
parameter, but " + params.length + " is seen");
+ }
+ }
+ return prec;
+ }
+
public PrimitiveParts parsePrimitiveParts() {
PrimitiveParts parts = new PrimitiveParts();
Token t = expect("type");