HIVE-19298 : Fix operator tree of CTAS for Druid Storage Handler (Slim 
Bouguerra via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <hashut...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b1357240
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b1357240
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b1357240

Branch: refs/heads/branch-3
Commit: b135724029272963f16b57830c630ce9a69fe8b5
Parents: 71d77ab
Author: Slim Bouguerra <slim.bougue...@gmail.com>
Authored: Mon May 7 22:56:19 2018 -0700
Committer: Vineet Garg <vg...@apache.org>
Committed: Tue May 8 16:05:12 2018 -0700

----------------------------------------------------------------------
 .../results/positive/accumulo_queries.q.out     |  22 ++--
 .../accumulo_single_sourced_multi_insert.q.out  |  35 +++---
 .../src/test/results/positive/hbase_ddl.q.out   |   9 +-
 .../test/results/positive/hbase_queries.q.out   |  22 ++--
 .../hbase_single_sourced_multi_insert.q.out     |  35 +++---
 .../src/test/results/positive/hbasestats.q.out  |   9 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  62 ++++++++---
 .../org/apache/hadoop/hive/ql/exec/Task.java    |   1 -
 .../ql/optimizer/QueryPlanPostProcessor.java    |   6 +-
 .../hadoop/hive/ql/parse/GenTezProcContext.java |  20 ++--
 .../hadoop/hive/ql/parse/ParseContext.java      |  23 ++--
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 106 +++++++++++--------
 .../hadoop/hive/ql/parse/TaskCompiler.java      |  30 +++---
 .../org/apache/hadoop/hive/ql/plan/DDLWork.java |  21 ++--
 .../hive/ql/plan/InsertCommitHookDesc.java      |  41 +++++++
 .../hadoop/hive/ql/plan/InsertTableDesc.java    |  40 -------
 .../druid/druidmini_dynamic_partition.q.out     |  28 ++---
 .../clientpositive/druid/druidmini_mv.q.out     |  18 ++--
 .../hive/metastore/DefaultHiveMetaHook.java     |   2 +-
 19 files changed, 269 insertions(+), 261 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out 
b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
index 78a2a8a..f7e7699 100644
--- a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
+++ b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
@@ -40,9 +40,8 @@ POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE 
accumulo_table_1 SELECT
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
-  Stage-2
   Stage-1 is a root stage
-  Stage-3 is a root stage
+  Stage-2 is a root stage
 
 STAGE PLANS:
   Stage: Stage-0
@@ -53,15 +52,11 @@ STAGE PLANS:
           properties:
             COLUMN_STATS_ACCURATE 
 
-  Stage: Stage-2
-      Insert operator:
-        Insert
-
   Stage: Stage-1
       Pre Insert operator:
         Pre-Insert task
 
-  Stage: Stage-3
+  Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -506,10 +501,9 @@ ON (x.key = Y.key)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
-  Stage-2
   Stage-1 is a root stage
-  Stage-4 is a root stage
-  Stage-3 depends on stages: Stage-4
+  Stage-3 is a root stage
+  Stage-2 depends on stages: Stage-3
 
 STAGE PLANS:
   Stage: Stage-0
@@ -520,15 +514,11 @@ STAGE PLANS:
           properties:
             COLUMN_STATS_ACCURATE 
 
-  Stage: Stage-2
-      Insert operator:
-        Insert
-
   Stage: Stage-1
       Pre Insert operator:
         Pre-Insert task
 
-  Stage: Stage-4
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -564,7 +554,7 @@ STAGE PLANS:
                 output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-  Stage: Stage-3
+  Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
----------------------------------------------------------------------
diff --git 
a/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
 
b/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
index 0efe121..5905ecd 100644
--- 
a/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
+++ 
b/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
@@ -34,16 +34,15 @@ select value,"" where a.key > 50 AND a.key < 100
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
-  Stage-2
-  Stage-5 depends on stages: Stage-3, Stage-2, Stage-1
+  Stage-4 depends on stages: Stage-2, Stage-0, Stage-1
   Stage-1 is a root stage
-  Stage-4 is a root stage
-  Stage-10 depends on stages: Stage-4 , consists of Stage-7, Stage-6, Stage-8
-  Stage-7
-  Stage-3 depends on stages: Stage-7, Stage-6, Stage-9
+  Stage-3 is a root stage
+  Stage-9 depends on stages: Stage-3 , consists of Stage-6, Stage-5, Stage-7
   Stage-6
-  Stage-8
-  Stage-9 depends on stages: Stage-8
+  Stage-2 depends on stages: Stage-6, Stage-5, Stage-8
+  Stage-5
+  Stage-7
+  Stage-8 depends on stages: Stage-7
 
 STAGE PLANS:
   Stage: Stage-0
@@ -54,11 +53,7 @@ STAGE PLANS:
           properties:
             COLUMN_STATS_ACCURATE 
 
-  Stage: Stage-2
-      Insert operator:
-        Insert
-
-  Stage: Stage-5
+  Stage: Stage-4
     Stats Work
       Basic Stats Work:
       Column Stats Desc:
@@ -70,7 +65,7 @@ STAGE PLANS:
       Pre Insert operator:
         Pre-Insert task
 
-  Stage: Stage-4
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -133,16 +128,16 @@ STAGE PLANS:
                 output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
-  Stage: Stage-10
+  Stage: Stage-9
     Conditional Operator
 
-  Stage: Stage-7
+  Stage: Stage-6
     Move Operator
       files:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-3
+  Stage: Stage-2
     Move Operator
       tables:
           replace: true
@@ -152,7 +147,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.src_x1
 
-  Stage: Stage-6
+  Stage: Stage-5
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -164,7 +159,7 @@ STAGE PLANS:
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: default.src_x1
 
-  Stage: Stage-8
+  Stage: Stage-7
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -176,7 +171,7 @@ STAGE PLANS:
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: default.src_x1
 
-  Stage: Stage-9
+  Stage: Stage-8
     Move Operator
       files:
           hdfs directory: true

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/hbase-handler/src/test/results/positive/hbase_ddl.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_ddl.q.out 
b/hbase-handler/src/test/results/positive/hbase_ddl.q.out
index bf7da98..9d16034 100644
--- a/hbase-handler/src/test/results/positive/hbase_ddl.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_ddl.q.out
@@ -40,9 +40,8 @@ POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE 
hbase_table_1 SELECT *
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
-  Stage-2
   Stage-1 is a root stage
-  Stage-3 is a root stage
+  Stage-2 is a root stage
 
 STAGE PLANS:
   Stage: Stage-0
@@ -53,15 +52,11 @@ STAGE PLANS:
           properties:
             COLUMN_STATS_ACCURATE 
 
-  Stage: Stage-2
-      Insert operator:
-        Insert
-
   Stage: Stage-1
       Pre Insert operator:
         Pre-Insert task
 
-  Stage: Stage-3
+  Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/hbase-handler/src/test/results/positive/hbase_queries.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_queries.q.out 
b/hbase-handler/src/test/results/positive/hbase_queries.q.out
index 8a91b66..eb91bf9 100644
--- a/hbase-handler/src/test/results/positive/hbase_queries.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_queries.q.out
@@ -40,9 +40,8 @@ POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE 
hbase_table_1 SELECT *
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
-  Stage-2
   Stage-1 is a root stage
-  Stage-3 is a root stage
+  Stage-2 is a root stage
 
 STAGE PLANS:
   Stage: Stage-0
@@ -53,15 +52,11 @@ STAGE PLANS:
           properties:
             COLUMN_STATS_ACCURATE 
 
-  Stage: Stage-2
-      Insert operator:
-        Insert
-
   Stage: Stage-1
       Pre Insert operator:
         Pre-Insert task
 
-  Stage: Stage-3
+  Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -506,10 +501,9 @@ ON (x.key = Y.key)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
-  Stage-2
   Stage-1 is a root stage
-  Stage-4 is a root stage
-  Stage-3 depends on stages: Stage-4
+  Stage-3 is a root stage
+  Stage-2 depends on stages: Stage-3
 
 STAGE PLANS:
   Stage: Stage-0
@@ -520,15 +514,11 @@ STAGE PLANS:
           properties:
             COLUMN_STATS_ACCURATE 
 
-  Stage: Stage-2
-      Insert operator:
-        Insert
-
   Stage: Stage-1
       Pre Insert operator:
         Pre-Insert task
 
-  Stage: Stage-4
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -564,7 +554,7 @@ STAGE PLANS:
                 output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-  Stage: Stage-3
+  Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
----------------------------------------------------------------------
diff --git 
a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
 
b/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
index 7e47a65..86a9fea 100644
--- 
a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
+++ 
b/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
@@ -34,16 +34,15 @@ select value,"" where a.key > 50 AND a.key < 100
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
-  Stage-2
-  Stage-5 depends on stages: Stage-3, Stage-2, Stage-1
+  Stage-4 depends on stages: Stage-2, Stage-0, Stage-1
   Stage-1 is a root stage
-  Stage-4 is a root stage
-  Stage-10 depends on stages: Stage-4 , consists of Stage-7, Stage-6, Stage-8
-  Stage-7
-  Stage-3 depends on stages: Stage-7, Stage-6, Stage-9
+  Stage-3 is a root stage
+  Stage-9 depends on stages: Stage-3 , consists of Stage-6, Stage-5, Stage-7
   Stage-6
-  Stage-8
-  Stage-9 depends on stages: Stage-8
+  Stage-2 depends on stages: Stage-6, Stage-5, Stage-8
+  Stage-5
+  Stage-7
+  Stage-8 depends on stages: Stage-7
 
 STAGE PLANS:
   Stage: Stage-0
@@ -54,11 +53,7 @@ STAGE PLANS:
           properties:
             COLUMN_STATS_ACCURATE 
 
-  Stage: Stage-2
-      Insert operator:
-        Insert
-
-  Stage: Stage-5
+  Stage: Stage-4
     Stats Work
       Basic Stats Work:
       Column Stats Desc:
@@ -70,7 +65,7 @@ STAGE PLANS:
       Pre Insert operator:
         Pre-Insert task
 
-  Stage: Stage-4
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -133,16 +128,16 @@ STAGE PLANS:
                 output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
-  Stage: Stage-10
+  Stage: Stage-9
     Conditional Operator
 
-  Stage: Stage-7
+  Stage: Stage-6
     Move Operator
       files:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-3
+  Stage: Stage-2
     Move Operator
       tables:
           replace: true
@@ -152,7 +147,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.src_x1
 
-  Stage: Stage-6
+  Stage: Stage-5
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -164,7 +159,7 @@ STAGE PLANS:
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: default.src_x1
 
-  Stage: Stage-8
+  Stage: Stage-7
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -176,7 +171,7 @@ STAGE PLANS:
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: default.src_x1
 
-  Stage: Stage-9
+  Stage: Stage-8
     Move Operator
       files:
           hdfs directory: true

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/hbase-handler/src/test/results/positive/hbasestats.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbasestats.q.out 
b/hbase-handler/src/test/results/positive/hbasestats.q.out
index 92cf8cc..4724ad6 100644
--- a/hbase-handler/src/test/results/positive/hbasestats.q.out
+++ b/hbase-handler/src/test/results/positive/hbasestats.q.out
@@ -63,9 +63,8 @@ POSTHOOK: query: explain INSERT OVERWRITE TABLE users SELECT 
'user1', 'IA', 'USA
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
-  Stage-2
   Stage-1 is a root stage
-  Stage-3 is a root stage
+  Stage-2 is a root stage
 
 STAGE PLANS:
   Stage: Stage-0
@@ -76,15 +75,11 @@ STAGE PLANS:
           properties:
             COLUMN_STATS_ACCURATE 
 
-  Stage: Stage-2
-      Insert operator:
-        Insert
-
   Stage: Stage-1
       Pre Insert operator:
         Pre-Insert task
 
-  Stage: Stage-3
+  Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index ed31348..b3c95eb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -53,7 +53,11 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.concurrent.ExecutionException;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ListenableFuture;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -149,7 +153,6 @@ import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData;
 import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
-import org.apache.hadoop.hive.ql.lockmgr.LockException;
 import org.apache.hadoop.hive.ql.metadata.CheckConstraint;
 import org.apache.hadoop.hive.ql.metadata.CheckResult;
 import org.apache.hadoop.hive.ql.metadata.DefaultConstraint;
@@ -211,7 +214,7 @@ import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc;
 import org.apache.hadoop.hive.ql.plan.FileMergeDesc;
 import org.apache.hadoop.hive.ql.plan.GrantDesc;
 import org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL;
-import org.apache.hadoop.hive.ql.plan.InsertTableDesc;
+import org.apache.hadoop.hive.ql.plan.InsertCommitHookDesc;
 import org.apache.hadoop.hive.ql.plan.KillQueryDesc;
 import org.apache.hadoop.hive.ql.plan.ListBucketingCtx;
 import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc;
@@ -291,10 +294,39 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.stringtemplate.v4.ST;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.ListenableFuture;
+import java.io.BufferedWriter;
+import java.io.DataOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.Serializable;
+import java.io.Writer;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.charset.StandardCharsets;
+import java.sql.SQLException;
+import java.util.AbstractList;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.concurrent.ExecutionException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import static org.apache.commons.lang.StringUtils.join;
+import static 
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
 
 /**
  * DDLTask implementation.
@@ -602,9 +634,9 @@ public class DDLTask extends Task<DDLWork> implements 
Serializable {
       if (cacheMetadataDesc != null) {
         return cacheMetadata(db, cacheMetadataDesc);
       }
-      InsertTableDesc insertTableDesc = work.getInsertTableDesc();
-      if (insertTableDesc != null) {
-        return insertCommitWork(db, insertTableDesc);
+      InsertCommitHookDesc insertCommitHookDesc = 
work.getInsertCommitHookDesc();
+      if (insertCommitHookDesc != null) {
+        return insertCommitWork(db, insertCommitHookDesc);
       }
       PreInsertTableDesc preInsertTableDesc = work.getPreInsertTableDesc();
       if (preInsertTableDesc != null) {
@@ -860,22 +892,22 @@ public class DDLTask extends Task<DDLWork> implements 
Serializable {
     return 0;
   }
 
-  private int insertCommitWork(Hive db, InsertTableDesc insertTableDesc) 
throws MetaException {
+  private int insertCommitWork(Hive db, InsertCommitHookDesc 
insertCommitHookDesc) throws MetaException {
     boolean failed = true;
-    HiveMetaHook hook = 
insertTableDesc.getTable().getStorageHandler().getMetaHook();
+    HiveMetaHook hook = 
insertCommitHookDesc.getTable().getStorageHandler().getMetaHook();
     if (hook == null || !(hook instanceof DefaultHiveMetaHook)) {
       return 0;
     }
     DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook;
     try {
-      hiveMetaHook.commitInsertTable(insertTableDesc.getTable().getTTable(),
-              insertTableDesc.isOverwrite()
+      
hiveMetaHook.commitInsertTable(insertCommitHookDesc.getTable().getTTable(),
+              insertCommitHookDesc.isOverwrite()
       );
       failed = false;
     } finally {
       if (failed) {
-        
hiveMetaHook.rollbackInsertTable(insertTableDesc.getTable().getTTable(),
-                insertTableDesc.isOverwrite()
+        
hiveMetaHook.rollbackInsertTable(insertCommitHookDesc.getTable().getTTable(),
+                insertCommitHookDesc.isOverwrite()
         );
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
index 1e8857b..e16411b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
@@ -68,7 +68,6 @@ public abstract class Task<T extends Serializable> implements 
Serializable, Node
   protected static transient Logger LOG = LoggerFactory.getLogger(Task.class);
   protected int taskTag;
   private boolean isLocalMode =false;
-  private boolean retryCmdWhenFail = false;
 
   public static final int NO_TAG = 0;
   public static final int COMMON_JOIN = 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
index c0ce684..a91f45e 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.hive.ql.optimizer;
 
-import java.util.List;
-import java.util.Set;
-
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.OperatorUtils;
@@ -53,6 +50,9 @@ import org.apache.hadoop.hive.ql.plan.TezWork;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.List;
+import java.util.Set;
+
 /**
  * Finds Acid FileSinkDesc objects which can be created in the physical 
(disconnected) plan, e.g.
  * {@link 
org.apache.hadoop.hive.ql.parse.GenTezUtils#removeUnionOperators(GenTezProcContext,
 BaseWork, int)}

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java
index 0df581a..f977fc1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java
@@ -18,16 +18,6 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import java.io.Serializable;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
-
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator;
@@ -55,6 +45,16 @@ import org.apache.hadoop.hive.ql.plan.TezEdgeProperty;
 import org.apache.hadoop.hive.ql.plan.TezWork;
 import org.apache.hadoop.hive.ql.plan.UnionWork;
 
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
 /**
  * GenTezProcContext. GenTezProcContext maintains information
  * about the tasks and operators as we walk the operator tree

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
index d890b31..89121e3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
@@ -18,16 +18,6 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.QueryProperties;
@@ -61,6 +51,16 @@ import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 /**
  * Parse Context: The current parse context. This is passed to the optimizer
  * which then transforms the operator tree using the parse context. All the
@@ -122,7 +122,7 @@ public class ParseContext {
   private MaterializedViewDesc materializedViewUpdateDesc;
   private boolean reduceSinkAddedBySortedDynPartition;
 
-  private Map<SelectOperator, Table> viewProjectToViewSchema;  
+  private Map<SelectOperator, Table> viewProjectToViewSchema;
   private ColumnAccessInfo columnAccessInfo;
   private boolean needViewColumnAuthorization;
   private Set<FileSinkDesc> acidFileSinks = Collections.emptySet();
@@ -141,7 +141,6 @@ public class ParseContext {
   }
 
   /**
-   * @param conf
    * @param opToPartPruner
    *          map from table scan operator to partition pruner
    * @param opToPartList

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 1ab7158..ff952b6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -18,33 +18,12 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.Serializable;
-import java.security.AccessControlException;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Deque;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Queue;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.UUID;
-import java.util.function.Supplier;
-import java.util.regex.Pattern;
-import java.util.regex.PatternSyntaxException;
-import java.util.stream.Collectors;
-
+import com.google.common.base.Splitter;
+import com.google.common.base.Strings;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Sets;
+import com.google.common.math.IntMath;
+import com.google.common.math.LongMath;
 import org.antlr.runtime.ClassicToken;
 import org.antlr.runtime.CommonToken;
 import org.antlr.runtime.Token;
@@ -65,9 +44,9 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.common.StatsSetupConst.StatDB;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
-import org.apache.hadoop.hive.common.StatsSetupConst.StatDB;
 import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -121,6 +100,7 @@ import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.exec.UnionOperator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.exec.tez.TezTask;
 import org.apache.hadoop.hive.ql.hooks.Entity;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
@@ -185,6 +165,7 @@ import 
org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowType;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes;
+import org.apache.hadoop.hive.ql.plan.BaseWork;
 import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc;
 import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
@@ -203,7 +184,7 @@ import org.apache.hadoop.hive.ql.plan.FilterDesc.SampleDesc;
 import org.apache.hadoop.hive.ql.plan.ForwardDesc;
 import org.apache.hadoop.hive.ql.plan.GroupByDesc;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
-import org.apache.hadoop.hive.ql.plan.InsertTableDesc;
+import org.apache.hadoop.hive.ql.plan.InsertCommitHookDesc;
 import org.apache.hadoop.hive.ql.plan.JoinCondDesc;
 import org.apache.hadoop.hive.ql.plan.JoinDesc;
 import org.apache.hadoop.hive.ql.plan.LateralViewForwardDesc;
@@ -230,9 +211,9 @@ import 
org.apache.hadoop.hive.ql.plan.ptf.PartitionedTableFunctionDef;
 import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.ResourceType;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFArray;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFArray;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFCardinalityViolation;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFHash;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFMurmurHash;
@@ -270,12 +251,33 @@ import org.apache.hadoop.mapred.OutputFormat;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 import org.apache.hadoop.security.UserGroupInformation;
 
-import com.google.common.base.Splitter;
-import com.google.common.base.Strings;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Sets;
-import com.google.common.math.IntMath;
-import com.google.common.math.LongMath;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.Serializable;
+import java.security.AccessControlException;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Deque;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.Queue;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.UUID;
+import java.util.function.Supplier;
+import java.util.regex.Pattern;
+import java.util.regex.PatternSyntaxException;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS;
 
 /**
  * Implementation of the semantic analyzer. It generates the query plan.
@@ -7293,7 +7295,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
         // true if it is insert overwrite.
         boolean overwrite = !qb.getParseInfo().isInsertIntoTable(
             String.format("%s.%s", dest_tab.getDbName(), 
dest_tab.getTableName()));
-        createInsertDesc(dest_tab, overwrite);
+        createPreInsertDesc(dest_tab, overwrite);
       }
 
       if (dest_tab.isMaterializedView()) {
@@ -7571,7 +7573,6 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
         ltd.setInsertOverwrite(true);
       }
     }
-
     if (SessionState.get().isHiveServerQuery() &&
         null != table_desc &&
         
table_desc.getSerdeClassName().equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName())
 &&
@@ -7886,17 +7887,14 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
     return dpCtx;
   }
 
-  private void createInsertDesc(Table table, boolean overwrite) {
-    Task<? extends Serializable>[] tasks = new Task[this.rootTasks.size()];
-    tasks = this.rootTasks.toArray(tasks);
+  private void createPreInsertDesc(Table table, boolean overwrite) {
     PreInsertTableDesc preInsertTableDesc = new PreInsertTableDesc(table, 
overwrite);
-    InsertTableDesc insertTableDesc = new InsertTableDesc(table, overwrite);
     this.rootTasks
         .add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), 
preInsertTableDesc)));
-    TaskFactory
-        .getAndMakeChild(new DDLWork(getInputs(), getOutputs(), 
insertTableDesc), conf, tasks);
+
   }
 
+
   private void genAutoColumnStatsGatheringPipeline(QB qb, TableDesc table_desc,
                                                    Map<String, String> 
partSpec, Operator curr, boolean isInsertInto) throws SemanticException {
     String tableName = table_desc.getTableName();
@@ -12232,9 +12230,27 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
     }
     //find all Acid FileSinkOperatorS
     QueryPlanPostProcessor qp = new QueryPlanPostProcessor(rootTasks, 
acidFileSinks, ctx.getExecutionId());
+
+    // 10. Attach CTAS/Insert-Commit-hooks for Storage Handlers
+    final Optional<TezTask> optionalTezTask =
+        rootTasks.stream().filter(task -> task instanceof TezTask).map(task -> 
(TezTask) task)
+            .findFirst();
+    if (optionalTezTask.isPresent()) {
+      final TezTask tezTask = optionalTezTask.get();
+      rootTasks.stream()
+          .filter(task -> task.getWork() instanceof DDLWork)
+          .map(task -> (DDLWork) task.getWork())
+          .filter(ddlWork -> ddlWork.getPreInsertTableDesc() != null)
+          .map(ddlWork -> ddlWork.getPreInsertTableDesc())
+          .map(ddlPreInsertTask -> new 
InsertCommitHookDesc(ddlPreInsertTask.getTable(),
+              ddlPreInsertTask.isOverwrite()))
+          .forEach(insertCommitHookDesc -> tezTask.addDependentTask(
+              TaskFactory.get(new DDLWork(getInputs(), getOutputs(), 
insertCommitHookDesc), conf)));
+    }
+
     LOG.info("Completed plan generation");
 
-    // 10. put accessed columns to readEntity
+    // 11. put accessed columns to readEntity
     if (HiveConf.getBoolVar(this.conf, 
HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS)) {
       putAccessedColumnsToReadEntity(inputs, columnAccessInfo);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index df1d9cb..95e1c31 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -18,21 +18,6 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.hive.ql.exec.DDLTask;
-import org.apache.hadoop.hive.ql.exec.MaterializedViewDesc;
-import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import com.google.common.collect.Interner;
 import com.google.common.collect.Interners;
 import org.apache.hadoop.fs.Path;
@@ -43,7 +28,9 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.exec.DDLTask;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
+import org.apache.hadoop.hive.ql.exec.MaterializedViewDesc;
 import org.apache.hadoop.hive.ql.exec.StatsTask;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
@@ -51,6 +38,7 @@ import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -82,6 +70,18 @@ import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.hive.serde2.thrift.ThriftFormatter;
 import org.apache.hadoop.hive.serde2.thrift.ThriftJDBCBinarySerDe;
 import org.apache.hadoop.mapred.InputFormat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 /**
  * TaskCompiler is a the base class for classes that compile

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
index 98da309..8ed3b03 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
@@ -17,15 +17,15 @@
  */
 package org.apache.hadoop.hive.ql.plan;
 
-import java.io.Serializable;
-import java.util.HashSet;
-
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc;
 import org.apache.hadoop.hive.ql.parse.PreInsertTableDesc;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 
+import java.io.Serializable;
+import java.util.HashSet;
+
 /**
  * DDLWork.
  *
@@ -35,7 +35,7 @@ public class DDLWork implements Serializable {
 
   // TODO: this can probably be replaced with much less code via dynamic 
dispatch and/or templates.
   private PreInsertTableDesc preInsertTableDesc;
-  private InsertTableDesc insertTableDesc;
+  private InsertCommitHookDesc insertCommitHookDesc;
   private AlterMaterializedViewDesc alterMVDesc;
   private CreateDatabaseDesc createDatabaseDesc;
   private SwitchDatabaseDesc switchDatabaseDesc;
@@ -522,9 +522,10 @@ public class DDLWork implements Serializable {
   }
 
   public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-          InsertTableDesc insertTableDesc) {
+          InsertCommitHookDesc insertCommitHookDesc
+  ) {
     this(inputs, outputs);
-    this.insertTableDesc = insertTableDesc;
+    this.insertCommitHookDesc = insertCommitHookDesc;
   }
 
   public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
@@ -1241,12 +1242,12 @@ public class DDLWork implements Serializable {
   }
 
   @Explain(displayName = "Insert operator", explainLevels = { Level.USER, 
Level.DEFAULT, Level.EXTENDED })
-  public InsertTableDesc getInsertTableDesc() {
-    return insertTableDesc;
+  public InsertCommitHookDesc getInsertCommitHookDesc() {
+    return insertCommitHookDesc;
   }
 
-  public void setInsertTableDesc(InsertTableDesc insertTableDesc) {
-    this.insertTableDesc = insertTableDesc;
+  public void setInsertCommitHookDesc(InsertCommitHookDesc 
insertCommitHookDesc) {
+    this.insertCommitHookDesc = insertCommitHookDesc;
   }
 
   @Explain(displayName = "Pre Insert operator", explainLevels = { Level.USER, 
Level.DEFAULT, Level.EXTENDED })

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/ql/src/java/org/apache/hadoop/hive/ql/plan/InsertCommitHookDesc.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/plan/InsertCommitHookDesc.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/InsertCommitHookDesc.java
new file mode 100644
index 0000000..8136506
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/InsertCommitHookDesc.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.plan;
+
+import org.apache.hadoop.hive.ql.metadata.Table;
+
+@Explain(displayName = "Commit-Insert-Hook", explainLevels = { 
Explain.Level.USER,
+    Explain.Level.DEFAULT, Explain.Level.EXTENDED })
+public class InsertCommitHookDesc extends DDLDesc {
+  private final Table table;
+  private final boolean overwrite;
+
+  public InsertCommitHookDesc(Table table, boolean overwrite) {
+    this.table = table;
+    this.overwrite = overwrite;
+  }
+
+  public Table getTable() {
+    return table;
+  }
+
+  public boolean isOverwrite() {
+    return overwrite;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/ql/src/java/org/apache/hadoop/hive/ql/plan/InsertTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/InsertTableDesc.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/InsertTableDesc.java
deleted file mode 100644
index 212bc7a..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/InsertTableDesc.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-import org.apache.hadoop.hive.ql.metadata.Table;
-
-@Explain(displayName = "Insert", explainLevels = { Explain.Level.USER, 
Explain.Level.DEFAULT, Explain.Level.EXTENDED })
-public class InsertTableDesc extends DDLDesc {
-  private final Table table;
-  private final boolean overwrite;
-
-  public InsertTableDesc(Table table, boolean overwrite) {
-    this.table = table;
-    this.overwrite = overwrite;
-  }
-
-  public Table getTable() {
-    return table;
-  }
-
-  public boolean isOverwrite() {
-    return overwrite;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out 
b/ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out
index 154e504..28deb79 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_dynamic_partition.q.out
@@ -323,9 +323,9 @@ SELECT cast (`ctimestamp2` as timestamp with local time 
zone) as `__time`,
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
-  Stage-2
   Stage-1 is a root stage
-  Stage-3 is a root stage
+  Stage-2 is a root stage
+  Stage-4 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-0
@@ -336,15 +336,11 @@ STAGE PLANS:
           properties:
             COLUMN_STATS_ACCURATE 
 
-  Stage: Stage-2
-      Insert operator:
-        Insert
-
   Stage: Stage-1
       Pre Insert operator:
         Pre-Insert task
 
-  Stage: Stage-3
+  Stage: Stage-2
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -389,6 +385,10 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.druid.serde.DruidSerDe
                       name: default.druid_partitioned_table
 
+  Stage: Stage-4
+      Insert operator:
+        Commit-Insert-Hook
+
 PREHOOK: query: INSERT INTO TABLE druid_partitioned_table
 SELECT cast (`ctimestamp2` as timestamp with local time zone) as `__time`,
   cstring1,
@@ -460,9 +460,9 @@ POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE 
druid_partitioned_table
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
-  Stage-2
   Stage-1 is a root stage
-  Stage-3 is a root stage
+  Stage-2 is a root stage
+  Stage-4 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-0
@@ -473,15 +473,11 @@ STAGE PLANS:
           properties:
             COLUMN_STATS_ACCURATE 
 
-  Stage: Stage-2
-      Insert operator:
-        Insert
-
   Stage: Stage-1
       Pre Insert operator:
         Pre-Insert task
 
-  Stage: Stage-3
+  Stage: Stage-2
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -526,6 +522,10 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.druid.serde.DruidSerDe
                       name: default.druid_partitioned_table
 
+  Stage: Stage-4
+      Insert operator:
+        Commit-Insert-Hook
+
 PREHOOK: query: INSERT OVERWRITE TABLE druid_partitioned_table
   SELECT cast (`ctimestamp1` as timestamp with local time zone) as `__time`,
     cstring1,

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out 
b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
index c29f547..a5d338f 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
@@ -422,10 +422,10 @@ ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
-  Stage-2
-  Stage-5 depends on stages: Stage-2, Stage-1, Stage-3
+  Stage-4 depends on stages: Stage-0, Stage-1, Stage-2
   Stage-1 is a root stage
-  Stage-3 is a root stage
+  Stage-2 is a root stage
+  Stage-5 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-0
@@ -436,18 +436,14 @@ STAGE PLANS:
           properties:
             COLUMN_STATS_ACCURATE 
 
-  Stage: Stage-2
-      Insert operator:
-        Insert
-
-  Stage: Stage-5
+  Stage: Stage-4
     Materialized View Work
 
   Stage: Stage-1
       Pre Insert operator:
         Pre-Insert task
 
-  Stage: Stage-3
+  Stage: Stage-2
     Tez
 #### A masked pattern was here ####
       Edges:
@@ -492,6 +488,10 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.druid.serde.DruidSerDe
                       name: default.cmv_mat_view2
 
+  Stage: Stage-5
+      Insert operator:
+        Commit-Insert-Hook
+
 PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view2 REBUILD
 PREHOOK: type: QUERY
 PREHOOK: Input: default@cmv_basetable

http://git-wip-us.apache.org/repos/asf/hive/blob/b1357240/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultHiveMetaHook.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultHiveMetaHook.java
 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultHiveMetaHook.java
index 0957945..4fc913c 100644
--- 
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultHiveMetaHook.java
+++ 
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultHiveMetaHook.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hive.metastore.api.Table;
 
 public abstract class DefaultHiveMetaHook implements HiveMetaHook {
   /**
-   * Called after successfully after INSERT [OVERWRITE] statement is executed.
+   * Called after successfully INSERT [OVERWRITE] statement is executed.
    * @param table table definition
    * @param overwrite true if it is INSERT OVERWRITE
    *

Reply via email to