Repository: hive
Updated Branches:
  refs/heads/master 466f51034 -> 464a3f61a


HIVE-18518 : Upgrade druid version to 0.11.0 (Nishant Bangarwa via Ashutosh 
Chauhan)

Signed-off-by: Ashutosh Chauhan <hashut...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/464a3f61
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/464a3f61
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/464a3f61

Branch: refs/heads/master
Commit: 464a3f61a0c4a1c4e44a1ce427f604295534e969
Parents: 466f510
Author: Nishant Bangarwa <nishant.mon...@gmail.com>
Authored: Tue Jan 23 08:27:00 2018 -0800
Committer: Ashutosh Chauhan <hashut...@apache.org>
Committed: Fri Feb 2 10:53:17 2018 -0800

----------------------------------------------------------------------
 druid-handler/pom.xml                           |  2 +-
 .../hive/druid/DruidStorageHandlerUtils.java    |  2 +-
 .../serde/HiveDruidSerializationModule.java     |  3 ++
 .../hive/druid/TestDruidStorageHandler.java     | 45 ++++++++++----------
 .../TestHiveDruidQueryBasedInputFormat.java     | 16 +++----
 .../hive/ql/io/TestDruidRecordWriter.java       |  2 +-
 itests/qtest-druid/pom.xml                      |  4 +-
 pom.xml                                         |  4 +-
 8 files changed, 41 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/464a3f61/druid-handler/pom.xml
----------------------------------------------------------------------
diff --git a/druid-handler/pom.xml b/druid-handler/pom.xml
index 2a62b90..670d82b 100644
--- a/druid-handler/pom.xml
+++ b/druid-handler/pom.xml
@@ -29,7 +29,7 @@
 
   <properties>
     <hive.path.to.root>..</hive.path.to.root>
-    <druid.metamx.util.version>0.27.10</druid.metamx.util.version>
+    <druid.metamx.util.version>1.3.2</druid.metamx.util.version>
     <druid.guava.version>16.0.1</druid.guava.version>
   </properties>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/464a3f61/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
index 9de0097..2f956b1 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java
@@ -24,12 +24,12 @@ import com.fasterxml.jackson.dataformat.smile.SmileFactory;
 import com.google.common.base.Throwables;
 import com.google.common.collect.Interner;
 import com.google.common.collect.Interners;
+import com.metamx.common.JodaUtils;
 import com.metamx.emitter.EmittingLogger;
 import com.metamx.emitter.core.NoopEmitter;
 import com.metamx.emitter.service.ServiceEmitter;
 import com.metamx.http.client.HttpClient;
 import com.metamx.http.client.response.InputStreamResponseHandler;
-import io.druid.common.utils.JodaUtils;
 import io.druid.jackson.DefaultObjectMapper;
 import io.druid.math.expr.ExprMacroTable;
 import io.druid.metadata.MetadataStorageTablesConfig;

http://git-wip-us.apache.org/repos/asf/hive/blob/464a3f61/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
index f72fd0d..8a110ae 100644
--- 
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
+++ 
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/HiveDruidSerializationModule.java
@@ -18,10 +18,13 @@
 package org.apache.hadoop.hive.druid.serde;
 
 import io.druid.java.util.common.granularity.PeriodGranularity;
+import io.druid.query.spec.LegacySegmentSpec;
 
 import com.fasterxml.jackson.core.util.VersionUtil;
 import com.fasterxml.jackson.databind.module.SimpleModule;
 
+import org.joda.time.Interval;
+
 /**
  * This class is used to define/override any serde behavior for classes from 
druid.
  * Currently it is used to override the default behavior when serializing 
PeriodGranularity to include user timezone.

http://git-wip-us.apache.org/repos/asf/hive/blob/464a3f61/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
 
b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
index 6f7fc78..6a496c2 100644
--- 
a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
+++ 
b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java
@@ -45,6 +45,7 @@ import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 
+import org.joda.time.DateTimeZone;
 import org.joda.time.Interval;
 import org.junit.Assert;
 import org.junit.Before;
@@ -88,7 +89,7 @@ public class TestDruidStorageHandler {
   private DruidStorageHandler druidStorageHandler;
 
   private DataSegment createSegment(String location) throws IOException {
-    return createSegment(location, new Interval(100, 170), "v1", new 
LinearShardSpec(0));
+    return createSegment(location, new Interval(100, 170, DateTimeZone.UTC), 
"v1", new LinearShardSpec(0));
   }
 
   private DataSegment createSegment(String location, Interval interval, String 
version,
@@ -321,7 +322,7 @@ public class TestDruidStorageHandler {
     // This create and publish the segment to be overwritten
     List<DataSegment> existingSegments = Arrays
             .asList(createSegment(new Path(taskDirPath, 
DruidStorageHandlerUtils.INDEX_ZIP).toString(),
-                    new Interval(100, 150), "v0", new LinearShardSpec(0)));
+                    new Interval(100, 150, DateTimeZone.UTC), "v0", new 
LinearShardSpec(0)));
     DruidStorageHandlerUtils
             .publishSegmentsAndCommit(connector, metadataStorageTablesConfig, 
DATA_SOURCE_NAME,
                     existingSegments,
@@ -332,7 +333,7 @@ public class TestDruidStorageHandler {
 
     // This creates and publish new segment
     DataSegment dataSegment = createSegment(new Path(taskDirPath, 
DruidStorageHandlerUtils.INDEX_ZIP).toString(),
-            new Interval(180, 250), "v1", new LinearShardSpec(0));
+            new Interval(180, 250, DateTimeZone.UTC), "v1", new 
LinearShardSpec(0));
 
     Path descriptorPath = 
DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment,
             new Path(taskDirPath, 
DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME)
@@ -374,7 +375,7 @@ public class TestDruidStorageHandler {
     // This create and publish the segment to be overwritten
     List<DataSegment> existingSegments = Arrays
             .asList(createSegment(new Path(taskDirPath, 
DruidStorageHandlerUtils.INDEX_ZIP).toString(),
-                    new Interval(100, 150), "v0", new LinearShardSpec(0)));
+                    new Interval(100, 150, DateTimeZone.UTC), "v0", new 
LinearShardSpec(0)));
     DruidStorageHandlerUtils
             .publishSegmentsAndCommit(connector, metadataStorageTablesConfig, 
DATA_SOURCE_NAME,
                     existingSegments,
@@ -425,7 +426,7 @@ public class TestDruidStorageHandler {
 
     // #5
     DataSegment dataSegment1 = createSegment(new Path(taskDirPath, 
DruidStorageHandlerUtils.INDEX_ZIP).toString(),
-            new Interval(180, 250), "v1", new LinearShardSpec(0));
+            new Interval(180, 250, DateTimeZone.UTC), "v1", new 
LinearShardSpec(0));
     Path descriptorPath1 = 
DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment1,
             new Path(taskDirPath, 
DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME)
     );
@@ -440,7 +441,7 @@ public class TestDruidStorageHandler {
 
     // #6
     DataSegment dataSegment2 = createSegment(new Path(taskDirPath, 
DruidStorageHandlerUtils.INDEX_ZIP).toString(),
-            new Interval(200, 250), "v1", new LinearShardSpec(0));
+            new Interval(200, 250, DateTimeZone.UTC), "v1", new 
LinearShardSpec(0));
     Path descriptorPath2 = 
DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment2,
             new Path(taskDirPath, 
DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME)
     );
@@ -455,7 +456,7 @@ public class TestDruidStorageHandler {
 
     // #7
     DataSegment dataSegment3 = createSegment(new Path(taskDirPath, 
DruidStorageHandlerUtils.INDEX_ZIP).toString(),
-            new Interval(100, 200), "v1", new LinearShardSpec(0));
+            new Interval(100, 200, DateTimeZone.UTC), "v1", new 
LinearShardSpec(0));
     Path descriptorPath3 = 
DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment3,
             new Path(taskDirPath, 
DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME)
     );
@@ -514,7 +515,7 @@ public class TestDruidStorageHandler {
     Path taskDirPath = new Path(tableWorkingPath, 
druidStorageHandler.makeStagingName());
     List<DataSegment> existingSegments = Arrays
             .asList(createSegment(new Path(taskDirPath, 
DruidStorageHandlerUtils.INDEX_ZIP).toString(),
-                    new Interval(100, 150), "v0", new LinearShardSpec(1)));
+                    new Interval(100, 150, DateTimeZone.UTC), "v0", new 
LinearShardSpec(1)));
     HdfsDataSegmentPusherConfig pusherConfig = new 
HdfsDataSegmentPusherConfig();
     
pusherConfig.setStorageDirectory(config.get(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY)));
     DataSegmentPusher dataSegmentPusher = new 
HdfsDataSegmentPusher(pusherConfig, config, 
DruidStorageHandlerUtils.JSON_MAPPER);
@@ -527,7 +528,7 @@ public class TestDruidStorageHandler {
                     dataSegmentPusher
             );
     DataSegment dataSegment = createSegment(new Path(taskDirPath, 
DruidStorageHandlerUtils.INDEX_ZIP).toString(),
-            new Interval(100, 150), "v1", new LinearShardSpec(0));
+            new Interval(100, 150, DateTimeZone.UTC), "v1", new 
LinearShardSpec(0));
     Path descriptorPath = 
DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment,
             new Path(taskDirPath, 
DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME)
     );
@@ -571,7 +572,7 @@ public class TestDruidStorageHandler {
 
     List<DataSegment> existingSegments = Arrays
             .asList(createSegment(new Path(taskDirPath, 
DruidStorageHandlerUtils.INDEX_ZIP).toString(),
-                    new Interval(100, 150), "v0", new LinearShardSpec(0)));
+                    new Interval(100, 150, DateTimeZone.UTC), "v0", new 
LinearShardSpec(0)));
     DruidStorageHandlerUtils
             .publishSegmentsAndCommit(connector, metadataStorageTablesConfig, 
DATA_SOURCE_NAME,
                     existingSegments,
@@ -581,7 +582,7 @@ public class TestDruidStorageHandler {
             );
 
     DataSegment dataSegment = createSegment(new Path(taskDirPath, 
DruidStorageHandlerUtils.INDEX_ZIP).toString(),
-            new Interval(100, 150), "v0", new LinearShardSpec(0));
+            new Interval(100, 150, DateTimeZone.UTC), "v0", new 
LinearShardSpec(0));
     Path descriptorPath = 
DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment,
             new Path(taskDirPath, 
DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME)
     );
@@ -621,7 +622,7 @@ public class TestDruidStorageHandler {
     Path taskDirPath = new Path(tableWorkingPath, 
druidStorageHandler.makeStagingName());
     List<DataSegment> existingSegments = Arrays
             .asList(createSegment(new Path(taskDirPath, 
"index_old.zip").toString(),
-                    new Interval(100, 150), "v0", new LinearShardSpec(1)));
+                    new Interval(100, 150, DateTimeZone.UTC), "v0", new 
LinearShardSpec(1)));
     HdfsDataSegmentPusherConfig pusherConfig = new 
HdfsDataSegmentPusherConfig();
     
pusherConfig.setStorageDirectory(config.get(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY)));
     DataSegmentPusher dataSegmentPusher = new 
HdfsDataSegmentPusher(pusherConfig, config, 
DruidStorageHandlerUtils.JSON_MAPPER);
@@ -633,7 +634,7 @@ public class TestDruidStorageHandler {
                     dataSegmentPusher
             );
     DataSegment dataSegment = createSegment(new Path(taskDirPath, 
"index.zip").toString(),
-            new Interval(100, 150), "v1", new LinearShardSpec(0));
+            new Interval(100, 150, DateTimeZone.UTC), "v1", new 
LinearShardSpec(0));
     Path descriptorPath = 
DruidStorageHandlerUtils.makeSegmentDescriptorOutputPath(dataSegment,
             new Path(taskDirPath, 
DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME)
     );
@@ -641,7 +642,7 @@ public class TestDruidStorageHandler {
 
     // Create segment file at the destination location with LinearShardSpec(2)
     DataSegment segment = createSegment(new Path(taskDirPath, 
"index_conflict.zip").toString(),
-            new Interval(100, 150), "v1", new LinearShardSpec(1));
+            new Interval(100, 150, DateTimeZone.UTC), "v1", new 
LinearShardSpec(1));
     Path segmentPath = new Path(dataSegmentPusher.getPathForHadoop(), 
dataSegmentPusher.makeIndexPathName(segment, 
DruidStorageHandlerUtils.INDEX_ZIP));
     FileUtils.writeStringToFile(new File(segmentPath.toUri()), "dummy");
 
@@ -682,13 +683,13 @@ public class TestDruidStorageHandler {
     Path taskDirPath = new Path(tableWorkingPath, 
druidStorageHandler.makeStagingName());
     List<DataSegment> existingSegments = Arrays.asList(
             createSegment(new Path(taskDirPath, "index_old_1.zip").toString(),
-                    new Interval(100, 150),
+                    new Interval(100, 150, DateTimeZone.UTC),
                     "v0", new LinearShardSpec(0)),
             createSegment(new Path(taskDirPath, "index_old_2.zip").toString(),
-                    new Interval(150, 200),
+                    new Interval(150, 200, DateTimeZone.UTC),
                     "v0", new LinearShardSpec(0)),
             createSegment(new Path(taskDirPath, "index_old_3.zip").toString(),
-                    new Interval(200, 300),
+                    new Interval(200, 300, DateTimeZone.UTC),
                     "v0", new LinearShardSpec(0)));
     HdfsDataSegmentPusherConfig pusherConfig = new 
HdfsDataSegmentPusherConfig();
     pusherConfig.setStorageDirectory(taskDirPath.toString());
@@ -703,7 +704,7 @@ public class TestDruidStorageHandler {
 
     // Try appending segment with conflicting interval
     DataSegment conflictingSegment = createSegment(new Path(taskDirPath, 
DruidStorageHandlerUtils.INDEX_ZIP).toString(),
-            new Interval(100, 300), "v1", new LinearShardSpec(0));
+            new Interval(100, 300, DateTimeZone.UTC), "v1", new 
LinearShardSpec(0));
     Path descriptorPath = DruidStorageHandlerUtils
             .makeSegmentDescriptorOutputPath(conflictingSegment,
                     new Path(taskDirPath, 
DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME)
@@ -723,11 +724,11 @@ public class TestDruidStorageHandler {
     Path taskDirPath = new Path(tableWorkingPath, 
druidStorageHandler.makeStagingName());
     List<DataSegment> existingSegments = Arrays
             .asList(createSegment(new Path(taskDirPath, 
"index_old_1.zip").toString(),
-                    new Interval(100, 150), "v0", new NoneShardSpec()),
+                    new Interval(100, 150, DateTimeZone.UTC), "v0", new 
NoneShardSpec()),
                     createSegment(new Path(taskDirPath, 
"index_old_2.zip").toString(),
-                            new Interval(200, 250), "v0", new 
LinearShardSpec(0)),
+                            new Interval(200, 250, DateTimeZone.UTC), "v0", 
new LinearShardSpec(0)),
                     createSegment(new Path(taskDirPath, 
"index_old_3.zip").toString(),
-                            new Interval(250, 300), "v0", new 
LinearShardSpec(0)));
+                            new Interval(250, 300, DateTimeZone.UTC), "v0", 
new LinearShardSpec(0)));
     HdfsDataSegmentPusherConfig pusherConfig = new 
HdfsDataSegmentPusherConfig();
     pusherConfig.setStorageDirectory(taskDirPath.toString());
     DataSegmentPusher dataSegmentPusher = new 
HdfsDataSegmentPusher(pusherConfig, config, 
DruidStorageHandlerUtils.JSON_MAPPER);
@@ -741,7 +742,7 @@ public class TestDruidStorageHandler {
 
     // Try appending to non extendable shard spec
     DataSegment conflictingSegment = createSegment(new Path(taskDirPath, 
DruidStorageHandlerUtils.INDEX_ZIP).toString(),
-            new Interval(100, 150), "v1", new LinearShardSpec(0));
+            new Interval(100, 150, DateTimeZone.UTC), "v1", new 
LinearShardSpec(0));
     Path descriptorPath = DruidStorageHandlerUtils
             .makeSegmentDescriptorOutputPath(conflictingSegment,
                     new Path(taskDirPath, 
DruidStorageHandler.SEGMENTS_DESCRIPTOR_DIR_NAME)

http://git-wip-us.apache.org/repos/asf/hive/blob/464a3f61/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
 
b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
index 514dba3..bb43d51 100644
--- 
a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
+++ 
b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java
@@ -38,11 +38,11 @@ public class TestHiveDruidQueryBasedInputFormat extends 
TestCase {
           + " \"dataSource\": \"sample_datasource\", "
           + " \"granularity\": \"DAY\", "
           + " \"descending\": \"true\", "
-          + " \"intervals\": [ 
\"2012-01-01T00:00:00.000/2012-01-03T00:00:00.000\" ]}";
+          + " \"intervals\": [ 
\"2012-01-01T00:00:00.000-08:00/2012-01-03T00:00:00.000-08:00\" ]}";
   private static final String TIMESERIES_QUERY_SPLIT =
       "[HiveDruidSplit{{\"queryType\":\"timeseries\","
           + 
"\"dataSource\":{\"type\":\"table\",\"name\":\"sample_datasource\"},"
-          + 
"\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2012-01-01T00:00:00.000-08:00/2012-01-03T00:00:00.000-08:00\"]},"
+          + 
"\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2012-01-01T08:00:00.000Z/2012-01-03T08:00:00.000Z\"]},"
           + "\"descending\":true,"
           + "\"virtualColumns\":[],"
           + "\"filter\":null,"
@@ -71,7 +71,7 @@ public class TestHiveDruidQueryBasedInputFormat extends 
TestCase {
           + " ], "
           + " \"granularity\": \"all\", "
           + " \"intervals\": [  "
-          + "  \"2013-08-31T00:00:00.000/2013-09-03T00:00:00.000\" "
+          + "  \"2013-08-31T00:00:00.000-07:00/2013-09-03T00:00:00.000-07:00\" 
"
           + " ]}";
   private static final String TOPN_QUERY_SPLIT =
       "[HiveDruidSplit{{\"queryType\":\"topN\","
@@ -80,7 +80,7 @@ public class TestHiveDruidQueryBasedInputFormat extends 
TestCase {
           + 
"\"dimension\":{\"type\":\"LegacyDimensionSpec\",\"dimension\":\"sample_dim\",\"outputName\":\"sample_dim\",\"outputType\":\"STRING\"},"
           + 
"\"metric\":{\"type\":\"LegacyTopNMetricSpec\",\"metric\":\"count\"},"
           + "\"threshold\":5,"
-          + 
"\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2013-08-31T00:00:00.000-07:00/2013-09-03T00:00:00.000-07:00\"]},"
+          + 
"\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2013-08-31T07:00:00.000Z/2013-09-03T07:00:00.000Z\"]},"
           + "\"filter\":null,"
           + "\"granularity\":{\"type\":\"all\"},"
           + 
"\"aggregations\":[{\"type\":\"longSum\",\"name\":\"count\",\"fieldName\":\"count\",\"expression\":null},"
@@ -102,12 +102,12 @@ public class TestHiveDruidQueryBasedInputFormat extends 
TestCase {
           + "  { \"type\": \"longSum\", \"name\": \"total_usage\", 
\"fieldName\": \"user_count\" },  "
           + "  { \"type\": \"doubleSum\", \"name\": \"data_transfer\", 
\"fieldName\": \"data_transfer\" } "
           + " ], "
-          + " \"intervals\": [ 
\"2012-01-01T00:00:00.000/2012-01-03T00:00:00.000\" ]"
+          + " \"intervals\": [ 
\"2012-01-01T00:00:00.000-08:00/2012-01-03T00:00:00.000-08:00\" ]"
           + " }";
   private static final String GROUP_BY_QUERY_SPLIT =
       "[HiveDruidSplit{{\"queryType\":\"groupBy\","
           + 
"\"dataSource\":{\"type\":\"table\",\"name\":\"sample_datasource\"},"
-          + 
"\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2012-01-01T00:00:00.000-08:00/2012-01-03T00:00:00.000-08:00\"]},"
+          + 
"\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2012-01-01T08:00:00.000Z/2012-01-03T08:00:00.000Z\"]},"
           + "\"virtualColumns\":[],"
           + "\"filter\":null,"
           + "\"granularity\":\"DAY\","
@@ -128,13 +128,13 @@ public class TestHiveDruidQueryBasedInputFormat extends 
TestCase {
           + " 
\"dimensions\":[\"robot\",\"namespace\",\"anonymous\",\"unpatrolled\",\"page\",\"language\",\"newpage\",\"user\"],
  "
           + " 
\"metrics\":[\"count\",\"added\",\"delta\",\"variation\",\"deleted\"],  "
           + " \"granularity\": \"all\",  "
-          + " \"intervals\": [     \"2013-01-01/2013-01-02\"   ],  "
+          + " \"intervals\": [     
\"2013-01-01T00:00:00.000-08:00/2013-01-02T00:00:00.000-08:00\"   ],  "
           + " \"pagingSpec\":{\"pagingIdentifiers\": {}, \"threshold\":5}, "
           + " \"context\":{\"druid.query.fetch\":true}}";
   private static final String SELECT_QUERY_SPLIT =
       "[HiveDruidSplit{{\"queryType\":\"select\","
           + "\"dataSource\":{\"type\":\"table\",\"name\":\"wikipedia\"},"
-          + 
"\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2013-01-01T00:00:00.000-08:00/2013-01-02T00:00:00.000-08:00\"]},"
+          + 
"\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2013-01-01T08:00:00.000Z/2013-01-02T08:00:00.000Z\"]},"
           + "\"descending\":false,"
           + "\"filter\":null,"
           + "\"granularity\":{\"type\":\"all\"},"

http://git-wip-us.apache.org/repos/asf/hive/blob/464a3f61/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
----------------------------------------------------------------------
diff --git 
a/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
 
b/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
index e0a11e9..8fca03b 100644
--- 
a/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
+++ 
b/druid-handler/src/test/org/apache/hadoop/hive/ql/io/TestDruidRecordWriter.java
@@ -230,7 +230,7 @@ public class TestDruidRecordWriter {
       Assert.assertEquals(
               (Double) expected.get("unique_hosts"),
               (Double) HyperUniquesAggregatorFactory
-                      .estimateCardinality(actual.getRaw("unique_hosts")),
+                      .estimateCardinality(actual.getRaw("unique_hosts"), 
false),
               0.001
       );
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/464a3f61/itests/qtest-druid/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest-druid/pom.xml b/itests/qtest-druid/pom.xml
index a807d03..870e365 100644
--- a/itests/qtest-druid/pom.xml
+++ b/itests/qtest-druid/pom.xml
@@ -37,7 +37,7 @@
   <!-- test intra-project -->
   <properties>
     <hive.path.to.root>../..</hive.path.to.root>
-    <druid.curator.version>2.11.0</druid.curator.version>
+    <druid.curator.version>4.0.0</druid.curator.version>
     <druid.jersey.version>1.19.3</druid.jersey.version>
     <druid.jetty.version>9.3.19.v20170502</druid.jetty.version>
     <druid.derby.version>10.11.1.1</druid.derby.version>
@@ -252,4 +252,4 @@
   </build>
 
 
-</project>
\ No newline at end of file
+</project>

http://git-wip-us.apache.org/repos/asf/hive/blob/464a3f61/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index fdc331b..bd19ca3 100644
--- a/pom.xml
+++ b/pom.xml
@@ -140,7 +140,7 @@
     <derby.version>10.11.1.1</derby.version>
     <dropwizard.version>3.1.0</dropwizard.version>
     
<dropwizard-metrics-hadoop-metrics2-reporter.version>0.1.2</dropwizard-metrics-hadoop-metrics2-reporter.version>
-    <druid.version>0.10.1</druid.version>
+    <druid.version>0.11.0</druid.version>
     <guava.version>19.0</guava.version>
     <groovy.version>2.4.11</groovy.version>
     <h2database.version>1.3.166</h2database.version>
@@ -173,7 +173,7 @@
     <glassfish.jersey.version>2.22.2</glassfish.jersey.version>
     <jline.version>2.12</jline.version>
     <jms.version>1.1</jms.version>
-    <joda.version>2.8.1</joda.version>
+    <joda.version>2.9.9</joda.version>
     <jodd.version>3.5.2</jodd.version>
     <json.version>1.8</json.version>
     <junit.version>4.11</junit.version>

Reply via email to