HIVE-17468: Shade and package appropriate jackson version for druid storage 
handler (Slim Bouguerra, reviewed by Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5f26f393
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5f26f393
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5f26f393

Branch: refs/heads/hive-14535
Commit: 5f26f393ecc0cca4afd3a2ed3c1d46acf47701d1
Parents: 09afd83
Author: Slim Bouguerra <slim.bougue...@gmail.com>
Authored: Fri Sep 8 09:08:29 2017 -0700
Committer: Jesus Camacho Rodriguez <jcama...@apache.org>
Committed: Fri Sep 8 09:08:29 2017 -0700

----------------------------------------------------------------------
 druid-handler/pom.xml                           | 48 +++++++++-----------
 .../hive/druid/DruidStorageHandlerUtils.java    |  7 +++
 .../hadoop/hive/druid/io/DruidOutputFormat.java |  9 ++--
 .../druid/io/DruidQueryBasedInputFormat.java    |  8 +---
 .../hadoop/hive/druid/io/DruidRecordWriter.java |  3 +-
 .../serde/DruidGroupByQueryRecordReader.java    |  5 +-
 .../serde/DruidSelectQueryRecordReader.java     |  5 +-
 .../hadoop/hive/druid/serde/DruidSerDe.java     | 15 +++---
 .../serde/DruidTimeseriesQueryRecordReader.java |  5 +-
 .../druid/serde/DruidTopNQueryRecordReader.java |  5 +-
 .../TestHiveDruidQueryBasedInputFormat.java     |  6 +--
 .../hive/ql/io/TestDruidRecordWriter.java       | 19 +++++---
 12 files changed, 66 insertions(+), 69 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/5f26f393/druid-handler/pom.xml
----------------------------------------------------------------------
diff --git a/druid-handler/pom.xml b/druid-handler/pom.xml
index 81c744f..48b2af9 100644
--- a/druid-handler/pom.xml
+++ b/druid-handler/pom.xml
@@ -53,6 +53,18 @@
           <groupId>com.google.guava</groupId>
           <artifactId>guava</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-annotations</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-databind</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
     <!-- inter-project -->
@@ -119,18 +131,6 @@
       <version>${druid.version}</version>
       <exclusions>
         <exclusion>
-          <groupId>com.fasterxml.jackson.core</groupId>
-          <artifactId>jackson-core</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.fasterxml.jackson.core</groupId>
-          <artifactId>jackson-annotations</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.fasterxml.jackson.core</groupId>
-          <artifactId>jackson-databind</artifactId>
-        </exclusion>
-        <exclusion>
           <!--LGPL licenced library-->
           <groupId>com.google.code.findbugs</groupId>
           <artifactId>annotations</artifactId>
@@ -216,24 +216,20 @@
           <groupId>com.google.guava</groupId>
           <artifactId>guava</artifactId>
         </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.calcite</groupId>
-      <artifactId>calcite-druid</artifactId>
-      <version>${calcite.version}</version>
-      <exclusions>
         <exclusion>
-          <groupId>org.apache.calcite.avatica</groupId>
-          <artifactId>avatica-core</artifactId>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-annotations</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-databind</artifactId>
         </exclusion>
       </exclusions>
     </dependency>
-    <dependency>
-      <groupId>org.apache.calcite.avatica</groupId>
-      <artifactId>avatica</artifactId>
-      <version>${avatica.version}</version>
-    </dependency>
     <!-- test inter-project -->
     <dependency>
       <groupId>junit</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/5f26f393/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
index 3eeb0c3..7169140 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
@@ -41,6 +41,7 @@ import io.druid.timeline.partition.NumberedShardSpec;
 import io.druid.timeline.partition.PartitionChunk;
 import io.druid.timeline.partition.ShardSpec;
 
+import org.apache.calcite.adapter.druid.LocalInterval;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -77,6 +78,7 @@ import org.jboss.netty.handler.codec.http.HttpHeaders;
 import org.jboss.netty.handler.codec.http.HttpMethod;
 import org.joda.time.DateTime;
 import org.joda.time.Interval;
+import org.joda.time.chrono.ISOChronology;
 import org.joda.time.format.ISODateTimeFormat;
 import org.skife.jdbi.v2.FoldController;
 import org.skife.jdbi.v2.Folder3;
@@ -122,7 +124,12 @@ public final class DruidStorageHandlerUtils {
   private static final Logger LOG = 
LoggerFactory.getLogger(DruidStorageHandlerUtils.class);
 
   private static final String SMILE_CONTENT_TYPE = 
"application/x-jackson-smile";
+  public static final String DEFAULT_TIMESTAMP_COLUMN = "__time";
 
+  public static final Interval DEFAULT_INTERVAL = new Interval(
+          new DateTime("1900-01-01", ISOChronology.getInstanceUTC()),
+          new DateTime("3000-01-01", ISOChronology.getInstanceUTC())
+  ).withChronology(ISOChronology.getInstanceUTC());
   /**
    * Mapper to use to serialize/deserialize Druid objects (JSON)
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/5f26f393/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
index 9d2ec82..8156231 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidOutputFormat.java
@@ -41,7 +41,6 @@ import io.druid.segment.indexing.granularity.GranularitySpec;
 import io.druid.segment.indexing.granularity.UniformGranularitySpec;
 import io.druid.segment.realtime.plumber.CustomVersioningPolicy;
 
-import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -117,8 +116,8 @@ public class DruidOutputFormat<K, V> implements 
HiveOutputFormat<K, DruidWritabl
     for (String name : columnNameProperty.split(",")) {
       columnNames.add(name);
     }
-    if (!columnNames.contains(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
-      throw new IllegalStateException("Timestamp column (' " + 
DruidTable.DEFAULT_TIMESTAMP_COLUMN +
+    if 
(!columnNames.contains(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN)) {
+      throw new IllegalStateException("Timestamp column (' " + 
DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN +
               "') not specified in create table; list of columns is : " +
               tableProperties.getProperty(serdeConstants.LIST_COLUMNS));
     }
@@ -144,7 +143,7 @@ public class DruidOutputFormat<K, V> implements 
HiveOutputFormat<K, DruidWritabl
           break;
         case TIMESTAMP:
           String tColumnName = columnNames.get(i);
-          if (!tColumnName.equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN) && 
!tColumnName
+          if 
(!tColumnName.equals(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN) && 
!tColumnName
                   .equals(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME)) {
             throw new IOException("Dimension " + tColumnName + " does not have 
STRING type: " +
                     f.getPrimitiveCategory());
@@ -165,7 +164,7 @@ public class DruidOutputFormat<K, V> implements 
HiveOutputFormat<K, DruidWritabl
     }
     List<AggregatorFactory> aggregatorFactories = 
aggregatorFactoryBuilder.build();
     final InputRowParser inputRowParser = new MapInputRowParser(new 
TimeAndDimsParseSpec(
-            new TimestampSpec(DruidTable.DEFAULT_TIMESTAMP_COLUMN, "auto", 
null),
+            new 
TimestampSpec(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, "auto", null),
             new DimensionsSpec(dimensions,
                     
Lists.newArrayList(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME), null
             )

http://git-wip-us.apache.org/repos/asf/hive/blob/5f26f393/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
index 2f53616..bcabbd6 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
@@ -27,7 +27,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.calcite.adapter.druid.DruidDateTimeUtils;
 import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.commons.lang3.StringEscapeUtils;
 import org.apache.commons.lang3.StringUtils;
@@ -56,7 +55,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 import org.jboss.netty.handler.codec.http.HttpMethod;
 import org.joda.time.Interval;
-import org.joda.time.Period;
 import org.joda.time.chrono.ISOChronology;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -65,10 +63,6 @@ import com.fasterxml.jackson.core.JsonParseException;
 import com.fasterxml.jackson.core.type.TypeReference;
 import com.fasterxml.jackson.databind.JsonMappingException;
 import com.google.common.collect.Lists;
-import com.metamx.common.lifecycle.Lifecycle;
-import com.metamx.http.client.HttpClient;
-import com.metamx.http.client.HttpClientConfig;
-import com.metamx.http.client.HttpClientInit;
 import com.metamx.http.client.Request;
 
 import io.druid.query.BaseQuery;
@@ -308,7 +302,7 @@ public class DruidQueryBasedInputFormat extends 
InputFormat<NullWritable, DruidW
     // following the Select threshold configuration property
     final List<Interval> intervals = new ArrayList<>();
     if (query.getIntervals().size() == 1 && 
query.getIntervals().get(0).withChronology(
-            
ISOChronology.getInstanceUTC()).equals(DruidTable.DEFAULT_INTERVAL)) {
+            
ISOChronology.getInstanceUTC()).equals(DruidStorageHandlerUtils.DEFAULT_INTERVAL))
 {
       // Default max and min, we should execute a time boundary query to get a
       // more precise range
       TimeBoundaryQueryBuilder timeBuilder = new 
Druids.TimeBoundaryQueryBuilder();

http://git-wip-us.apache.org/repos/asf/hive/blob/5f26f393/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java
index e97f588..cf4dad6 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidRecordWriter.java
@@ -41,7 +41,6 @@ import 
io.druid.segment.realtime.appenderator.SegmentsAndMetadata;
 import io.druid.segment.realtime.plumber.Committers;
 import io.druid.timeline.DataSegment;
 import io.druid.timeline.partition.LinearShardSpec;
-import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -239,7 +238,7 @@ public class DruidRecordWriter implements 
RecordWriter<NullWritable, DruidWritab
   @Override
   public void write(Writable w) throws IOException {
     DruidWritable record = (DruidWritable) w;
-    final long timestamp = (long) 
record.getValue().get(DruidTable.DEFAULT_TIMESTAMP_COLUMN);
+    final long timestamp = (long) 
record.getValue().get(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN);
     final long truncatedTime = (long) record.getValue()
             .get(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/5f26f393/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
index fddabf7..b5b254a 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidGroupByQueryRecordReader.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.List;
 
-import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
@@ -141,7 +140,7 @@ public class DruidGroupByQueryRecordReader
     // Create new value
     DruidWritable value = new DruidWritable();
     // 1) The timestamp column
-    value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, 
current.getTimestamp().getMillis());
+    value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, 
current.getTimestamp().getMillis());
     // 2) The dimension columns
     for (int i = 0; i < query.getDimensions().size(); i++) {
       DimensionSpec ds = query.getDimensions().get(i);
@@ -193,7 +192,7 @@ public class DruidGroupByQueryRecordReader
       // Update value
       value.getValue().clear();
       // 1) The timestamp column
-      value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, 
current.getTimestamp().getMillis());
+      value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, 
current.getTimestamp().getMillis());
       // 2) The dimension columns
       for (int i = 0; i < query.getDimensions().size(); i++) {
         DimensionSpec ds = query.getDimensions().get(i);

http://git-wip-us.apache.org/repos/asf/hive/blob/5f26f393/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
index 8a41e91..82eec5d 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSelectQueryRecordReader.java
@@ -22,7 +22,6 @@ import java.io.InputStream;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
 import org.apache.hadoop.io.NullWritable;
 
@@ -81,7 +80,7 @@ public class DruidSelectQueryRecordReader
     // Create new value
     DruidWritable value = new DruidWritable();
     EventHolder e = values.next();
-    value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, 
e.getTimestamp().getMillis());
+    value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, 
e.getTimestamp().getMillis());
     value.getValue().putAll(e.getEvent());
     return value;
   }
@@ -92,7 +91,7 @@ public class DruidSelectQueryRecordReader
       // Update value
       value.getValue().clear();
       EventHolder e = values.next();
-      value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, 
e.getTimestamp().getMillis());
+      value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, 
e.getTimestamp().getMillis());
       value.getValue().putAll(e.getEvent());
       return true;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/5f26f393/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
index 8d98b3b..8750285 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java
@@ -27,7 +27,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
 
-import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.type.HiveChar;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
@@ -127,8 +126,8 @@ public class DruidSerDe extends AbstractSerDe {
               && !org.apache.commons.lang3.StringUtils
               
.isEmpty(properties.getProperty(serdeConstants.LIST_COLUMN_TYPES))) {
         columnNames.addAll(Utilities.getColumnNames(properties));
-        if (!columnNames.contains(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
-          throw new SerDeException("Timestamp column (' " + 
DruidTable.DEFAULT_TIMESTAMP_COLUMN +
+        if 
(!columnNames.contains(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN)) {
+          throw new SerDeException("Timestamp column (' " + 
DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN +
                   "') not specified in create table; list of columns is : " +
                   properties.getProperty(serdeConstants.LIST_COLUMNS));
         }
@@ -181,7 +180,7 @@ public class DruidSerDe extends AbstractSerDe {
           throw new SerDeException(e);
         }
         for (Entry<String, ColumnAnalysis> columnInfo : 
schemaInfo.getColumns().entrySet()) {
-          if (columnInfo.getKey().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) 
{
+          if 
(columnInfo.getKey().equals(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN)) 
{
             // Special handling for timestamp column
             columnNames.add(columnInfo.getKey()); // field name
             PrimitiveTypeInfo type = TypeInfoFactory.timestampTypeInfo; // 
field type
@@ -308,7 +307,7 @@ public class DruidSerDe extends AbstractSerDe {
           List<String> columnNames, List<PrimitiveTypeInfo> columnTypes,
           Map<String, PrimitiveTypeInfo> mapColumnNamesTypes) {
     // Timestamp column
-    columnNames.add(DruidTable.DEFAULT_TIMESTAMP_COLUMN);
+    columnNames.add(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN);
     columnTypes.add(TypeInfoFactory.timestampTypeInfo);
     // Aggregator columns
     for (AggregatorFactory af : query.getAggregatorSpecs()) {
@@ -336,7 +335,7 @@ public class DruidSerDe extends AbstractSerDe {
           List<String> columnNames, List<PrimitiveTypeInfo> columnTypes,
           Map<String, PrimitiveTypeInfo> mapColumnNamesTypes) {
     // Timestamp column
-    columnNames.add(DruidTable.DEFAULT_TIMESTAMP_COLUMN);
+    columnNames.add(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN);
     columnTypes.add(TypeInfoFactory.timestampTypeInfo);
     // Dimension column
     columnNames.add(query.getDimensionSpec().getOutputName());
@@ -368,7 +367,7 @@ public class DruidSerDe extends AbstractSerDe {
           String address, Map<String, PrimitiveTypeInfo> mapColumnNamesTypes)
                   throws SerDeException {
     // Timestamp column
-    columnNames.add(DruidTable.DEFAULT_TIMESTAMP_COLUMN);
+    columnNames.add(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN);
     columnTypes.add(TypeInfoFactory.timestampTypeInfo);
     // Dimension columns
     for (DimensionSpec ds : query.getDimensions()) {
@@ -410,7 +409,7 @@ public class DruidSerDe extends AbstractSerDe {
           List<String> columnNames, List<PrimitiveTypeInfo> columnTypes,
           Map<String, PrimitiveTypeInfo> mapColumnNamesTypes) {
     // Timestamp column
-    columnNames.add(DruidTable.DEFAULT_TIMESTAMP_COLUMN);
+    columnNames.add(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN);
     columnTypes.add(TypeInfoFactory.timestampTypeInfo);
     // Dimension columns
     for (DimensionSpec ds : query.getDimensions()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/5f26f393/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
index 8c2fb10..a1c8488 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTimeseriesQueryRecordReader.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.List;
 
-import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
 import org.apache.hadoop.io.NullWritable;
 
@@ -71,7 +70,7 @@ public class DruidTimeseriesQueryRecordReader
   public DruidWritable getCurrentValue() throws IOException, 
InterruptedException {
     // Create new value
     DruidWritable value = new DruidWritable();
-    value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, 
current.getTimestamp().getMillis());
+    value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, 
current.getTimestamp().getMillis());
     value.getValue().putAll(current.getValue().getBaseObject());
     return value;
   }
@@ -81,7 +80,7 @@ public class DruidTimeseriesQueryRecordReader
     if (nextKeyValue()) {
       // Update value
       value.getValue().clear();
-      value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, 
current.getTimestamp().getMillis());
+      value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, 
current.getTimestamp().getMillis());
       value.getValue().putAll(current.getValue().getBaseObject());
       return true;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/5f26f393/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
index d431925..afdf670 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidTopNQueryRecordReader.java
@@ -22,7 +22,6 @@ import java.io.InputStream;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.calcite.adapter.druid.DruidTable;
 import org.apache.hadoop.hive.druid.DruidStorageHandlerUtils;
 import org.apache.hadoop.io.NullWritable;
 
@@ -80,7 +79,7 @@ public class DruidTopNQueryRecordReader
   public DruidWritable getCurrentValue() throws IOException, 
InterruptedException {
     // Create new value
     DruidWritable value = new DruidWritable();
-    value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, 
current.getTimestamp().getMillis());
+    value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, 
current.getTimestamp().getMillis());
     if (values.hasNext()) {
       value.getValue().putAll(values.next().getBaseObject());
       return value;
@@ -93,7 +92,7 @@ public class DruidTopNQueryRecordReader
     if (nextKeyValue()) {
       // Update value
       value.getValue().clear();
-      value.getValue().put(DruidTable.DEFAULT_TIMESTAMP_COLUMN, 
current.getTimestamp().getMillis());
+      value.getValue().put(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, 
current.getTimestamp().getMillis());
       if (values.hasNext()) {
         value.getValue().putAll(values.next().getBaseObject());
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/5f26f393/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
 
b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
index 2aeb279..fb15830 100644
--- 
a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
+++ 
b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
@@ -139,7 +139,7 @@ public class TestHiveDruidQueryBasedInputFormat extends 
TestCase {
   private static final String TIMESERIES_QUERY =
       "{  \"queryType\": \"timeseries\", "
           + " \"dataSource\": \"sample_datasource\", "
-          + " \"granularity\": \"day\", "
+          + " \"granularity\": \"DAY\", "
           + " \"descending\": \"true\", "
           + " \"intervals\": [ 
\"2012-01-01T00:00:00.000/2012-01-03T00:00:00.000\" ]}";
   private static final String TIMESERIES_QUERY_SPLIT =
@@ -149,7 +149,7 @@ public class TestHiveDruidQueryBasedInputFormat extends 
TestCase {
           + "\"descending\":true,"
           + "\"virtualColumns\":[],"
           + "\"filter\":null,"
-          + 
"\"granularity\":{\"type\":\"period\",\"period\":\"P1D\",\"timeZone\":\"America/Los_Angeles\",\"origin\":null},"
+          + "\"granularity\":\"DAY\","
           + "\"aggregations\":[],"
           + "\"postAggregations\":[],"
           + "\"context\":null}, [localhost:8082]}]";
@@ -213,7 +213,7 @@ public class TestHiveDruidQueryBasedInputFormat extends 
TestCase {
           + 
"\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2012-01-01T00:00:00.000-08:00/2012-01-03T00:00:00.000-08:00\"]},"
           + "\"virtualColumns\":[],"
           + "\"filter\":null,"
-          + 
"\"granularity\":{\"type\":\"period\",\"period\":\"P1D\",\"timeZone\":\"America/Los_Angeles\",\"origin\":null},"
+          + "\"granularity\":\"DAY\","
           + 
"\"dimensions\":[{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"country\",\"outputName\":\"country\",\"outputType\":\"STRING\"},"
           + 
"{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"device\",\"outputName\":\"device\",\"outputType\":\"STRING\"}],"
           + 
"\"aggregations\":[{\"type\":\"longSum\",\"name\":\"total_usage\",\"fieldName\":\"user_count\",\"expression\":null},"

http://git-wip-us.apache.org/repos/asf/hive/blob/5f26f393/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
 
b/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
index 4962e0b..af75bfb 100644
--- 
a/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
+++ 
b/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
@@ -87,21 +87,21 @@ public class TestDruidRecordWriter {
 
   final List<ImmutableMap<String, Object>> expectedRows = ImmutableList.of(
           ImmutableMap.<String, Object>of(
-                  DruidTable.DEFAULT_TIMESTAMP_COLUMN,
+                  DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN,
                   DateTime.parse("2014-10-22T00:00:00.000Z").getMillis(),
                   "host", ImmutableList.of("a.example.com"),
                   "visited_sum", 190L,
                   "unique_hosts", 1.0d
           ),
           ImmutableMap.<String, Object>of(
-                  DruidTable.DEFAULT_TIMESTAMP_COLUMN,
+                  DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN,
                   DateTime.parse("2014-10-22T01:00:00.000Z").getMillis(),
                   "host", ImmutableList.of("b.example.com"),
                   "visited_sum", 175L,
                   "unique_hosts", 1.0d
           ),
           ImmutableMap.<String, Object>of(
-                  DruidTable.DEFAULT_TIMESTAMP_COLUMN,
+                  DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN,
                   DateTime.parse("2014-10-22T02:00:00.000Z").getMillis(),
                   "host", ImmutableList.of("c.example.com"),
                   "visited_sum", 270L,
@@ -109,6 +109,13 @@ public class TestDruidRecordWriter {
           )
   );
 
+
+  @Test
+  public void testTimeStampColumnName() {
+    Assert.assertEquals("Time column name need to match to ensure serdeser 
compatibility",
+            DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, 
DruidTable.DEFAULT_TIMESTAMP_COLUMN
+    );
+  }
   // This test fails due to conflict of guava classes with hive-exec jar.
   @Ignore
   @Test
@@ -120,7 +127,7 @@ public class TestDruidRecordWriter {
     Configuration config = new Configuration();
 
     final InputRowParser inputRowParser = new MapInputRowParser(new 
TimeAndDimsParseSpec(
-            new TimestampSpec(DruidTable.DEFAULT_TIMESTAMP_COLUMN, "auto", 
null),
+            new 
TimestampSpec(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN, "auto", null),
             new DimensionsSpec(ImmutableList.<DimensionSchema>of(new 
StringDimensionSchema("host")),
                     null, null
             )
@@ -169,7 +176,7 @@ public class TestDruidRecordWriter {
                         .put(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME,
                                 Granularities.DAY.bucketStart(
                                         new DateTime((long) input
-                                                
.get(DruidTable.DEFAULT_TIMESTAMP_COLUMN)))
+                                                
.get(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN)))
                                         .getMillis()
                         ).build());
               }
@@ -217,7 +224,7 @@ public class TestDruidRecordWriter {
 
       Assert.assertEquals(ImmutableList.of("host"), actual.getDimensions());
 
-      Assert.assertEquals(expected.get(DruidTable.DEFAULT_TIMESTAMP_COLUMN),
+      
Assert.assertEquals(expected.get(DruidStorageHandlerUtils.DEFAULT_TIMESTAMP_COLUMN),
               actual.getTimestamp().getMillis()
       );
       Assert.assertEquals(expected.get("host"), actual.getDimension("host"));

Reply via email to