http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
index a43d2be..eab8f6f 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
@@ -18,15 +18,15 @@
 package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
 
 import java.math.BigDecimal;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.Calendar;
 
 import org.apache.calcite.adapter.druid.DruidQuery;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.JoinRelType;
 import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.util.DateString;
+import org.apache.calcite.util.TimeString;
+import org.apache.calcite.util.TimestampString;
 import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
 import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
 import org.apache.hadoop.hive.conf.Constants;
@@ -37,8 +37,6 @@ import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.HiveParser;
 import org.apache.hadoop.hive.ql.parse.ParseDriver;
 import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
 
 public class ASTBuilder {
 
@@ -270,26 +268,17 @@ public class ASTBuilder {
       val = literal.getValue3();
       type = ((Boolean) val).booleanValue() ? HiveParser.KW_TRUE : 
HiveParser.KW_FALSE;
       break;
-    case DATE: {
-      //Calcite Calendar is always GMT, Hive atm uses JVM local
-      final Calendar c = (Calendar) literal.getValue();
-      final DateTime dt = new DateTime(c.getTimeInMillis(), 
DateTimeZone.forTimeZone(c.getTimeZone()));
+    case DATE:
+      val = "'" + literal.getValueAs(DateString.class).toString() + "'";
       type = HiveParser.TOK_DATELITERAL;
-      DateFormat df = new SimpleDateFormat("yyyy-MM-dd");
-      val = df.format(dt.toDateTime(DateTimeZone.getDefault()).toDate());
-      val = "'" + val + "'";
-    }
       break;
     case TIME:
-    case TIMESTAMP: {
-      //Calcite Calendar is always GMT, Hive atm uses JVM local
-      final Calendar c = (Calendar) literal.getValue();
-      final DateTime dt = new DateTime(c.getTimeInMillis(), 
DateTimeZone.forTimeZone(c.getTimeZone()));
+      val = "'" + literal.getValueAs(TimeString.class).toString() + "'";
+      type = HiveParser.TOK_TIMESTAMPLITERAL;
+      break;
+    case TIMESTAMP:
+      val = "'" + literal.getValueAs(TimestampString.class).toString() + "'";
       type = HiveParser.TOK_TIMESTAMPLITERAL;
-      DateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
-      val = df.format(dt.toDateTime(DateTimeZone.getDefault()).toDate());
-      val = "'" + val + "'";
-    }
       break;
     case INTERVAL_YEAR:
     case INTERVAL_MONTH:

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
index b1efbbd..f974cc9 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
@@ -18,9 +18,9 @@
 package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
 
 import java.math.BigDecimal;
+import java.sql.Date;
 import java.sql.Timestamp;
 import java.util.ArrayList;
-import java.util.Calendar;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Set;
@@ -42,6 +42,9 @@ import org.apache.calcite.rex.RexWindow;
 import org.apache.calcite.rex.RexWindowBound;
 import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.type.SqlTypeUtil;
+import org.apache.calcite.util.DateString;
+import org.apache.calcite.util.TimeString;
+import org.apache.calcite.util.TimestampString;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
 import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
@@ -74,8 +77,6 @@ import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -292,18 +293,15 @@ public class ExprNodeConverter extends 
RexVisitorImpl<ExprNodeDesc> {
       case DOUBLE:
         return new ExprNodeConstantDesc(TypeInfoFactory.doubleTypeInfo,
             Double.valueOf(((Number) literal.getValue3()).doubleValue()));
-      case DATE: {
-        final Calendar c = (Calendar) literal.getValue();
+      case DATE:
         return new ExprNodeConstantDesc(TypeInfoFactory.dateTypeInfo,
-            new java.sql.Date(c.getTimeInMillis()));
-      }
+            Date.valueOf(literal.getValueAs(DateString.class).toString()));
       case TIME:
-      case TIMESTAMP: {
-        final Calendar c = (Calendar) literal.getValue();
-        final DateTime dt = new DateTime(c.getTimeInMillis(), 
DateTimeZone.forTimeZone(c.getTimeZone()));
         return new ExprNodeConstantDesc(TypeInfoFactory.timestampTypeInfo,
-            new Timestamp(dt.getMillis()));
-      }
+            
Timestamp.valueOf(literal.getValueAs(TimeString.class).toString()));
+      case TIMESTAMP:
+        return new ExprNodeConstantDesc(TypeInfoFactory.timestampTypeInfo,
+            
Timestamp.valueOf(literal.getValueAs(TimestampString.class).toString()));
       case BINARY:
         return new ExprNodeConstantDesc(TypeInfoFactory.binaryTypeInfo, 
literal.getValue3());
       case DECIMAL:

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
index 52ca3b0..7665f56 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
@@ -28,7 +28,6 @@ import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 
-import org.apache.calcite.avatica.util.DateTimeUtils;
 import org.apache.calcite.avatica.util.TimeUnit;
 import org.apache.calcite.avatica.util.TimeUnitRange;
 import org.apache.calcite.plan.RelOptCluster;
@@ -50,7 +49,9 @@ import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.sql.parser.SqlParserPos;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.util.ConversionUtil;
+import org.apache.calcite.util.DateString;
 import org.apache.calcite.util.NlsString;
+import org.apache.calcite.util.TimestampString;
 import org.apache.hadoop.hive.common.type.Decimal128;
 import org.apache.hadoop.hive.common.type.HiveChar;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
@@ -99,8 +100,6 @@ import 
org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableList.Builder;
@@ -659,40 +658,29 @@ public class RexNodeConverter {
       calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) 
value));
       break;
     case DATE:
-        // The Calcite literal is in GMT, this will be converted back to JVM 
locale 
-        // by ASTBuilder.literal during Calcite->Hive plan conversion
-        final Calendar cal = Calendar.getInstance(DateTimeUtils.GMT_ZONE, 
Locale.getDefault());
-        cal.setTime((Date) value);
-        calciteLiteral = rexBuilder.makeDateLiteral(cal);
-        break;
-      case TIMESTAMP:
-        // The Calcite literal is in GMT, this will be converted back to JVM 
locale 
-        // by ASTBuilder.literal during Calcite->Hive plan conversion
-        final Calendar calt = Calendar.getInstance(DateTimeUtils.GMT_ZONE, 
Locale.getDefault());
-        if (value instanceof Calendar) {
-          final Calendar c = (Calendar) value;
-          long timeMs = c.getTimeInMillis();
-          calt.setTimeInMillis(timeMs);
-        } else {
-          final Timestamp ts = (Timestamp) value;
-          // CALCITE-1690
-          // Calcite cannot represent TIMESTAMP literals with precision higher 
than 3
-          if (ts.getNanos() % 1000000 != 0) {
-            throw new CalciteSemanticException(
-              "High Precision Timestamp: " + String.valueOf(ts),
-              UnsupportedFeature.HighPrecissionTimestamp);
-          }
-          calt.setTimeInMillis(ts.getTime());
-        }
-        // Must call makeLiteral, not makeTimestampLiteral 
-        // to have the RexBuilder.roundTime logic kick in
-        calciteLiteral = rexBuilder.makeLiteral(
-          calt,
-          rexBuilder.getTypeFactory().createSqlType(
-            SqlTypeName.TIMESTAMP,
-            
rexBuilder.getTypeFactory().getTypeSystem().getDefaultPrecision(SqlTypeName.TIMESTAMP)),
-          false);
-        break;
+      final Calendar cal = Calendar.getInstance(Locale.getDefault());
+      cal.setTime((Date) value);
+      calciteLiteral = 
rexBuilder.makeDateLiteral(DateString.fromCalendarFields(cal));
+      break;
+    case TIMESTAMP:
+      final TimestampString tsString;
+      if (value instanceof Calendar) {
+        tsString = TimestampString.fromCalendarFields((Calendar) value);
+      } else {
+        final Timestamp ts = (Timestamp) value;
+        final Calendar calt = Calendar.getInstance(Locale.getDefault());
+        calt.setTimeInMillis(ts.getTime());
+        tsString = 
TimestampString.fromCalendarFields(calt).withNanos(ts.getNanos());
+      }
+      // Must call makeLiteral, not makeTimestampLiteral
+      // to have the RexBuilder.roundTime logic kick in
+      calciteLiteral = rexBuilder.makeLiteral(
+        tsString,
+        rexBuilder.getTypeFactory().createSqlType(
+          SqlTypeName.TIMESTAMP,
+          
rexBuilder.getTypeFactory().getTypeSystem().getDefaultPrecision(SqlTypeName.TIMESTAMP)),
+        false);
+      break;
     case INTERVAL_YEAR_MONTH:
       // Calcite year-month literal value is months as BigDecimal
       BigDecimal totalMonths = BigDecimal.valueOf(((HiveIntervalYearMonth) 
value).getTotalMonths());

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 88054e7..97bdee0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -172,7 +172,6 @@ import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregateJoinTransp
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregateProjectMergeRule;
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregatePullUpConstantsRule;
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregateReduceRule;
-import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveDruidProjectFilterTransposeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveExceptRewriteRule;
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveExpandDistinctAggregatesRule;
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterAggregateTransposeRule;
@@ -212,7 +211,7 @@ import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSubQueryRemoveRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveUnionMergeRule;
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveUnionPullUpConstantsRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveWindowingFixRule;
-import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewFilterScanRule;
+import 
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTBuilder;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverter;
@@ -1488,7 +1487,8 @@ public class CalcitePlanner extends SemanticAnalyzer {
             planner.addMaterialization(materialization);
           }
           // Add view-based rewriting rules to planner
-          planner.addRule(HiveMaterializedViewFilterScanRule.INSTANCE);
+          planner.addRule(HiveMaterializedViewRule.INSTANCE_PROJECT_FILTER);
+          planner.addRule(HiveMaterializedViewRule.INSTANCE_FILTER);
           // Optimize plan
           planner.setRoot(calciteOptimizedPlan);
           calciteOptimizedPlan = planner.findBestExp();
@@ -1544,7 +1544,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, 
mdProvider.getMetadataProvider(), null,
               HepMatchOrder.BOTTOM_UP,
               DruidRules.FILTER,
-              HiveDruidProjectFilterTransposeRule.INSTANCE,
+              DruidRules.PROJECT_FILTER_TRANSPOSE,
               DruidRules.AGGREGATE_FILTER_TRANSPOSE,
               DruidRules.AGGREGATE_PROJECT,
               DruidRules.PROJECT,
@@ -3451,7 +3451,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
 
         w = cluster.getRexBuilder().makeOver(calciteAggFnRetType, 
calciteAggFn, calciteAggFnArgs,
             partitionKeys, ImmutableList.<RexFieldCollation> 
copyOf(orderKeys), lowerBound,
-            upperBound, isRows, true, false);
+            upperBound, isRows, true, false, hiveAggInfo.m_distinct);
       } else {
         // TODO: Convert to Semantic Exception
         throw new RuntimeException("Unsupported window Spec");

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/queries/clientpositive/materialized_view_create.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_create.q 
b/ql/src/test/queries/clientpositive/materialized_view_create.q
index 57f8bb7..bb50dbb 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_create.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_create.q
@@ -29,3 +29,9 @@ create materialized view cmv_mat_view5 tblproperties 
('key'='value') as select a
 select * from cmv_mat_view5;
 
 show tblproperties cmv_mat_view5;
+
+drop materialized view cmv_mat_view;
+drop materialized view cmv_mat_view2;
+drop materialized view cmv_mat_view3;
+drop materialized view cmv_mat_view4;
+drop materialized view cmv_mat_view5;

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q 
b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q
index e95a868..b17517f 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q
@@ -57,3 +57,5 @@ select * from (
   join
   (select a, c from cmv_basetable where d = 3) table2
   on table1.a = table2.a);
+
+drop materialized view cmv_mat_view;

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/auto_join12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join12.q.out 
b/ql/src/test/results/clientpositive/auto_join12.q.out
index 2fdd39f..0e9fa98 100644
--- a/ql/src/test/results/clientpositive/auto_join12.q.out
+++ b/ql/src/test/results/clientpositive/auto_join12.q.out
@@ -41,12 +41,12 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) < 
80.0)) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
+              predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)
@@ -57,12 +57,12 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) < 80.0) and (UDFToDouble(key) < 
100.0)) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
+              predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)
@@ -76,12 +76,12 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) < 
80.0)) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
+              predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
@@ -91,11 +91,11 @@ STAGE PLANS:
                     1 _col0 (type: string)
                     2 _col0 (type: string)
                   outputColumnNames: _col0, _col3
-                  Statistics: Num rows: 121 Data size: 1284 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 365 Data size: 3878 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
                     expressions: hash(_col0,_col3) (type: int)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 121 Data size: 1284 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 365 Data size: 3878 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(_col0)
                       mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/auto_join16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join16.q.out 
b/ql/src/test/results/clientpositive/auto_join16.q.out
index c4d654a..f05de41 100644
--- a/ql/src/test/results/clientpositive/auto_join16.q.out
+++ b/ql/src/test/results/clientpositive/auto_join16.q.out
@@ -32,12 +32,12 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 
20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
-              Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
+              predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 
200.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string), _col1 (type: string)
@@ -50,12 +50,12 @@ STAGE PLANS:
             alias: tab
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 
20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
-              Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
+              predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 
200.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
@@ -63,11 +63,11 @@ STAGE PLANS:
                     0 _col0 (type: string), _col1 (type: string)
                     1 _col0 (type: string), _col1 (type: string)
                   outputColumnNames: _col0, _col3
-                  Statistics: Num rows: 19 Data size: 210 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 60 Data size: 642 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
                     expressions: hash(_col0,_col3) (type: int)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 19 Data size: 210 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 60 Data size: 642 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(_col0)
                       mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/auto_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join4.q.out 
b/ql/src/test/results/clientpositive/auto_join4.q.out
index d4fb977..6c0fccd 100644
--- a/ql/src/test/results/clientpositive/auto_join4.q.out
+++ b/ql/src/test/results/clientpositive/auto_join4.q.out
@@ -55,12 +55,12 @@ STAGE PLANS:
             alias: src2
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 
25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: 
boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE 
Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 
20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/auto_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join5.q.out 
b/ql/src/test/results/clientpositive/auto_join5.q.out
index c3b562d..4103061 100644
--- a/ql/src/test/results/clientpositive/auto_join5.q.out
+++ b/ql/src/test/results/clientpositive/auto_join5.q.out
@@ -55,12 +55,12 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 
20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: 
boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE 
Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 
15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/auto_join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join8.q.out 
b/ql/src/test/results/clientpositive/auto_join8.q.out
index 5ca6798..2689578 100644
--- a/ql/src/test/results/clientpositive/auto_join8.q.out
+++ b/ql/src/test/results/clientpositive/auto_join8.q.out
@@ -55,12 +55,12 @@ STAGE PLANS:
             alias: src2
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 
25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: 
boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE 
Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 
20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
 
b/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
index 041621f..35e9a5d 100644
--- 
a/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
+++ 
b/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
@@ -320,3 +320,11 @@ POSTHOOK: Input: default@cmv_basetable
 #### A masked pattern was here ####
 3      9.80    3       978.76
 3      978.76  3       978.76
+PREHOOK: query: drop materialized view cmv_mat_view
+PREHOOK: type: DROP_MATERIALIZED_VIEW
+PREHOOK: Input: default@cmv_mat_view
+PREHOOK: Output: default@cmv_mat_view
+POSTHOOK: query: drop materialized view cmv_mat_view
+POSTHOOK: type: DROP_MATERIALIZED_VIEW
+POSTHOOK: Input: default@cmv_mat_view
+POSTHOOK: Output: default@cmv_mat_view

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out 
b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
index a2f5dbf..afc4fd9 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
@@ -715,12 +715,12 @@ STAGE PLANS:
             alias: subq2:subq1:a
             Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE 
Column stats: COMPLETE
             Filter Operator
-              predicate: ((key < 8) and (key < 6)) (type: boolean)
-              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: COMPLETE
+              predicate: (key < 6) (type: boolean)
+              Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE 
Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
                 outputColumnNames: key
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: COMPLETE
+                Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE 
Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
@@ -832,12 +832,12 @@ STAGE PLANS:
             alias: subq2:subq1:a
             Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE 
Column stats: COMPLETE
             Filter Operator
-              predicate: ((key < 8) and (key < 6)) (type: boolean)
-              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: COMPLETE
+              predicate: (key < 6) (type: boolean)
+              Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE 
Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
                 outputColumnNames: key
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: COMPLETE
+                Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE 
Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
@@ -1393,12 +1393,12 @@ STAGE PLANS:
             alias: a:subq2:subq1:a
             Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE 
Column stats: COMPLETE
             Filter Operator
-              predicate: ((key < 8) and (key < 6)) (type: boolean)
-              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: COMPLETE
+              predicate: (key < 6) (type: boolean)
+              Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE 
Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
                 outputColumnNames: key
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE 
Column stats: COMPLETE
+                Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE 
Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out 
b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out
index 6d6a38a..a4c7f77 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out
@@ -30,18 +30,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 
20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: 
boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE 
Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 
15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: key, value
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
                 Reduce Output Operator
                   key expressions: key (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: key (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: value (type: string)
                   auto parallelism: false
@@ -51,18 +51,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 
25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: 
boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE 
Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 
20.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: key, value
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: 
COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: key (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: key (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: 
COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: value (type: string)
                   auto parallelism: false
@@ -224,17 +224,17 @@ STAGE PLANS:
             0 key (type: string)
             1 key (type: string)
           outputColumnNames: key, value, key0, value0
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column 
stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
             expressions: key (type: string), value (type: string), key0 (type: 
string), value0 (type: string)
             outputColumnNames: key, value, key1, value1
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE 
Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
 #### A masked pattern was here ####
               NumFilesPerFileSink: 1
-              Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE 
Column stats: NONE
+              Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE 
Column stats: NONE
 #### A masked pattern was here ####
               table:
                   input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -328,18 +328,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 
20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: 
boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE 
Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 
15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: key, value
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE 
Column stats: NONE
                 Reduce Output Operator
                   key expressions: key (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: key (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: value (type: string)
                   auto parallelism: false
@@ -349,18 +349,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 
25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: 
boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE 
Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 
20.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: key, value
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: 
COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: key (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: key (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: 
COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: value (type: string)
                   auto parallelism: false
@@ -522,17 +522,17 @@ STAGE PLANS:
             0 key (type: string)
             1 key (type: string)
           outputColumnNames: key, value, key0, value0
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column 
stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE 
Column stats: NONE
           Select Operator
             expressions: key (type: string), value (type: string), key0 (type: 
string), value0 (type: string)
             outputColumnNames: key, value, key1, value1
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE 
Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE 
Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
 #### A masked pattern was here ####
               NumFilesPerFileSink: 1
-              Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE 
Column stats: NONE
+              Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE 
Column stats: NONE
 #### A masked pattern was here ####
               table:
                   input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/druid_basic2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_basic2.q.out 
b/ql/src/test/results/clientpositive/druid_basic2.q.out
index 38b07be..2e9c340 100644
--- a/ql/src/test/results/clientpositive/druid_basic2.q.out
+++ b/ql/src/test/results/clientpositive/druid_basic2.q.out
@@ -166,7 +166,7 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json 
{"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":["robot"],"limitSpec":{"type":"default"},"filter":{"type":"selector","dimension":"language","value":"en"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+            druid.query.json 
{"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default"},"filter":{"type":"selector","dimension":"language","value":"en"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
             druid.query.type groupBy
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
           GatherStats: false
@@ -554,12 +554,12 @@ STAGE PLANS:
           TableScan
             alias: druid_table_1
             properties:
-              druid.query.json 
{"queryType":"groupBy","dataSource":"wikipedia","granularity":"day","dimensions":["robot","language"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+              druid.query.json 
{"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
               druid.query.type groupBy
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
             GatherStats: false
             Select Operator
-              expressions: robot (type: string), __time (type: timestamp), $f3 
(type: float), $f4 (type: float), UDFToInteger(robot) (type: int)
+              expressions: robot (type: string), floor_day (type: timestamp), 
$f3 (type: float), $f4 (type: float), UDFToInteger(robot) (type: int)
               outputColumnNames: _col0, _col1, _col2, _col3, _col5
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
               Reduce Output Operator
@@ -589,7 +589,7 @@ STAGE PLANS:
               columns.comments 'from deserializer','from deserializer','from 
deserializer','from deserializer','from deserializer','from deserializer','from 
deserializer','from deserializer','from deserializer','from deserializer','from 
deserializer','from deserializer','from deserializer','from deserializer'
               columns.types 
timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
               druid.datasource wikipedia
-              druid.query.json 
{"queryType":"groupBy","dataSource":"wikipedia","granularity":"day","dimensions":["robot","language"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+              druid.query.json 
{"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
               druid.query.type groupBy
 #### A masked pattern was here ####
               name default.druid_table_1
@@ -615,7 +615,7 @@ STAGE PLANS:
                 columns.comments 'from deserializer','from deserializer','from 
deserializer','from deserializer','from deserializer','from deserializer','from 
deserializer','from deserializer','from deserializer','from deserializer','from 
deserializer','from deserializer','from deserializer','from deserializer'
                 columns.types 
timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
                 druid.datasource wikipedia
-                druid.query.json 
{"queryType":"groupBy","dataSource":"wikipedia","granularity":"day","dimensions":["robot","language"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+                druid.query.json 
{"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
                 druid.query.type groupBy
 #### A masked pattern was here ####
                 name default.druid_table_1
@@ -832,16 +832,16 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: druid_table_1
-            filterExpr: floor_day(__time) BETWEEN 1999-11-01 00:00:00.0 AND 
1999-11-10 00:00:00.0 (type: boolean)
+            filterExpr: floor_day(extract) BETWEEN 1999-11-01 00:00:00.0 AND 
1999-11-10 00:00:00.0 (type: boolean)
             properties:
-              druid.query.json 
{"queryType":"groupBy","dataSource":"wikipedia","granularity":"none","dimensions":["robot"],"limitSpec":{"type":"default"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+              druid.query.json 
{"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}},{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
               druid.query.type groupBy
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
             Filter Operator
-              predicate: floor_day(__time) BETWEEN 1999-11-01 00:00:00.0 AND 
1999-11-10 00:00:00.0 (type: boolean)
+              predicate: floor_day(extract) BETWEEN 1999-11-01 00:00:00.0 AND 
1999-11-10 00:00:00.0 (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
               Select Operator
-                expressions: robot (type: string), __time (type: timestamp)
+                expressions: robot (type: string), extract (type: timestamp)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL 
Column stats: NONE
                 Reduce Output Operator
@@ -889,83 +889,24 @@ ORDER BY robot
 LIMIT 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            filterExpr: __time BETWEEN 1999-11-01 00:00:00.0 AND 1999-11-10 
00:00:00.0 (type: boolean)
-            properties:
-              druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-            Filter Operator
-              predicate: __time BETWEEN 1999-11-01 00:00:00.0 AND 1999-11-10 
00:00:00.0 (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-              Select Operator
-                expressions: robot (type: string), floor_day(__time) (type: 
timestamp)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL 
Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: timestamp)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL 
Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: 
timestamp)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 
(type: timestamp)
-                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL 
Column stats: NONE
-                    TopN Hash Memory Usage: 0.1
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: timestamp)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-              TopN Hash Memory Usage: 0.1
-              value expressions: _col1 (type: timestamp)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: 
timestamp)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-              table:
-                  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
-      limit: 10
+      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json 
{"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"alphanumeric"}]},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1999-11-01T00:00:00.000/1999-11-10T00:00:00.001"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
+          Select Operator
+            expressions: robot (type: string), floor_day (type: timestamp)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN EXTENDED
 SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/druid_intervals.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_intervals.q.out 
b/ql/src/test/results/clientpositive/druid_intervals.q.out
index 90013c6..433f15e 100644
--- a/ql/src/test/results/clientpositive/druid_intervals.q.out
+++ b/ql/src/test/results/clientpositive/druid_intervals.q.out
@@ -109,7 +109,7 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/2012-03-01T08:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/2012-03-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
             druid.query.type select
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
           Select Operator
@@ -139,7 +139,7 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2012-03-01T08:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2012-03-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
             druid.query.type select
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
           Select Operator
@@ -171,7 +171,7 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2011-01-01T08:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
             druid.query.type select
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
           Select Operator
@@ -191,40 +191,24 @@ FROM druid_table_1
 WHERE `__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            filterExpr: __time BETWEEN 2010-01-01 00:00:00.0 AND 2011-01-01 
00:00:00.0 (type: boolean)
-            properties:
-              druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-            Filter Operator
-              predicate: __time BETWEEN 2010-01-01 00:00:00.0 AND 2011-01-01 
00:00:00.0 (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-              Select Operator
-                expressions: __time (type: timestamp)
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL 
Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL 
Column stats: NONE
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT `__time`
@@ -249,7 +233,7 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":[],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.001","2012-01-01T00:00:00.000/2013-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
             druid.query.type select
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
           Select Operator
@@ -281,7 +265,7 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":[],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2012-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
             druid.query.type select
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
           Select Operator
@@ -301,40 +285,24 @@ FROM druid_table_1
 WHERE `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00')
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            filterExpr: (__time) IN (2010-01-01 00:00:00.0, 2011-01-01 
00:00:00.0) (type: boolean)
-            properties:
-              druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-            Filter Operator
-              predicate: (__time) IN (2010-01-01 00:00:00.0, 2011-01-01 
00:00:00.0) (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-              Select Operator
-                expressions: __time (type: timestamp)
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL 
Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL 
Column stats: NONE
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2010-01-01T00:00:00.001","2011-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT `__time`, robot
@@ -347,40 +315,24 @@ FROM druid_table_1
 WHERE robot = 'user1' AND `__time` IN ('2010-01-01 00:00:00','2011-01-01 
00:00:00')
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            filterExpr: ((__time) IN (2010-01-01 00:00:00.0, 2011-01-01 
00:00:00.0) and (robot = 'user1')) (type: boolean)
-            properties:
-              druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-            Filter Operator
-              predicate: ((__time) IN (2010-01-01 00:00:00.0, 2011-01-01 
00:00:00.0) and (robot = 'user1')) (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-              Select Operator
-                expressions: __time (type: timestamp), 'user1' (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL 
Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL 
Column stats: NONE
-                  table:
-                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json 
{"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2010-01-01T00:00:00.001","2011-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"filter":{"type":"selector","dimension":"robot","value":"user1"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp), 'user1' (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT `__time`, robot

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/druid_timeseries.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_timeseries.q.out 
b/ql/src/test/results/clientpositive/druid_timeseries.q.out
index bd4d6de..0ce1abe 100644
--- a/ql/src/test/results/clientpositive/druid_timeseries.q.out
+++ b/ql/src/test/results/clientpositive/druid_timeseries.q.out
@@ -109,11 +109,11 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json 
{"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"none","aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
-            druid.query.type timeseries
+            druid.query.json 
{"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+            druid.query.type groupBy
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
           Select Operator
-            expressions: __time (type: timestamp), $f1 (type: float), $f2 
(type: float)
+            expressions: extract (type: timestamp), $f1 (type: float), $f2 
(type: float)
             outputColumnNames: _col0, _col1, _col2
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
             ListSink

Reply via email to