Repository: hive
Updated Branches:
  refs/heads/master 63c2025b7 -> 1fe8db618


HIVE-14988 : Support INSERT OVERWRITE into a partition on transactional tables 
(Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1fe8db61
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1fe8db61
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1fe8db61

Branch: refs/heads/master
Commit: 1fe8db618a7bbc09e041844021a2711c89355995
Parents: 63c2025
Author: Wei Zheng <w...@apache.org>
Authored: Sun Jul 16 23:11:35 2017 -0700
Committer: Wei Zheng <w...@apache.org>
Committed: Sun Jul 16 23:11:35 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |   2 -
 .../hadoop/hive/ql/io/HiveFileFormatUtils.java  |   2 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java    |  12 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  12 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  23 +-
 .../hadoop/hive/ql/plan/FileSinkDesc.java       |  10 +
 .../hive/ql/txn/compactor/CompactorMR.java      |   2 +-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java | 201 ++++++++++
 .../hive/ql/lockmgr/TestDbTxnManager2.java      |   6 -
 .../queries/clientnegative/acid_overwrite.q     |   8 -
 .../clientpositive/acid_insert_overwrite.q      |  73 ++++
 .../results/clientnegative/acid_overwrite.q.out |  19 -
 .../clientpositive/acid_insert_overwrite.q.out  | 395 +++++++++++++++++++
 13 files changed, 712 insertions(+), 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1fe8db61/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java 
b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 2de4c7a..8642049 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -411,8 +411,6 @@ public enum ErrorMsg {
   INSERT_CANNOT_CREATE_TEMP_FILE(10293, "Unable to create temp file for insert 
values "),
   ACID_OP_ON_NONACID_TXNMGR(10294, "Attempt to do update or delete using 
transaction manager that" +
       " does not support these operations."),
-  NO_INSERT_OVERWRITE_WITH_ACID(10295, "INSERT OVERWRITE not allowed on table 
{0} with OutputFormat " +
-      "that implements AcidOutputFormat while transaction manager that 
supports ACID is in use", true),
   VALUES_TABLE_CONSTRUCTOR_NOT_SUPPORTED(10296,
       "Values clause with table constructor not yet supported"),
   ACID_OP_ON_NONACID_TABLE(10297, "Attempt to do update or delete on table {0} 
that does not use " +

http://git-wip-us.apache.org/repos/asf/hive/blob/1fe8db61/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
index cc69c7e..0070c68 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
@@ -340,7 +340,7 @@ public final class HiveFileFormatUtils {
         .isCompressed(conf.getCompressed())
         .tableProperties(tableProp)
         .reporter(reporter)
-        .writingBase(false)
+        .writingBase(conf.getInsertOverwrite())
         .minimumTransactionId(conf.getTransactionId())
         .maximumTransactionId(conf.getTransactionId())
         .bucket(bucket)

http://git-wip-us.apache.org/repos/asf/hive/blob/1fe8db61/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index cdf2c40..03d5b09 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -420,11 +420,19 @@ public final class DbTxnManager extends 
HiveTxnManagerImpl {
          Seems much cleaner if each stmt is identified as a particular 
HiveOperation (which I'd think
          makes sense everywhere).  This however would be problematic for 
merge...*/
         case DDL_EXCLUSIVE:
-        case INSERT_OVERWRITE:
           compBuilder.setExclusive();
           compBuilder.setOperationType(DataOperationType.NO_TXN);
           break;
-
+        case INSERT_OVERWRITE:
+          t = getTable(output);
+          if (AcidUtils.isAcidTable(t)) {
+            compBuilder.setSemiShared();
+            compBuilder.setOperationType(DataOperationType.UPDATE);
+          } else {
+            compBuilder.setExclusive();
+            compBuilder.setOperationType(DataOperationType.NO_TXN);
+          }
+          break;
         case INSERT:
           assert t != null;
           if(AcidUtils.isAcidTable(t)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/1fe8db61/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 7019f4c..0cfc8d2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -3424,17 +3424,19 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
       for (FileStatus origBucketStat : origBucketStats) {
         Path origBucketPath = origBucketStat.getPath();
-        moveAcidDeltaFiles(AcidUtils.DELTA_PREFIX, AcidUtils.deltaFileFilter,
+        moveAcidFiles(AcidUtils.DELTA_PREFIX, AcidUtils.deltaFileFilter,
                 fs, dst, origBucketPath, createdDeltaDirs, newFiles);
-        moveAcidDeltaFiles(AcidUtils.DELETE_DELTA_PREFIX, 
AcidUtils.deleteEventDeltaDirFilter,
+        moveAcidFiles(AcidUtils.DELETE_DELTA_PREFIX, 
AcidUtils.deleteEventDeltaDirFilter,
                 fs, dst,origBucketPath, createdDeltaDirs, newFiles);
+        moveAcidFiles(AcidUtils.BASE_PREFIX, AcidUtils.baseFileFilter,
+                fs, dst, origBucketPath, createdDeltaDirs, newFiles);
       }
     }
   }
 
-  private static void moveAcidDeltaFiles(String deltaFileType, PathFilter 
pathFilter, FileSystem fs,
-                                         Path dst, Path origBucketPath, 
Set<Path> createdDeltaDirs,
-                                         List<Path> newFiles) throws 
HiveException {
+  private static void moveAcidFiles(String deltaFileType, PathFilter 
pathFilter, FileSystem fs,
+                                    Path dst, Path origBucketPath, Set<Path> 
createdDeltaDirs,
+                                    List<Path> newFiles) throws HiveException {
     LOG.debug("Acid move looking for " + deltaFileType + " files in bucket " + 
origBucketPath);
 
     FileStatus[] deltaStats = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/1fe8db61/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index d7f7d48..56e785e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -1500,7 +1500,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
                 && ast.getChild(0).getType() == HiveParser.TOK_TAB) {
               String fullTableName = getUnescapedName((ASTNode) 
ast.getChild(0).getChild(0),
                   SessionState.get().getCurrentDatabase());
-              qbp.getInsertOverwriteTables().put(fullTableName, ast);
+              qbp.getInsertOverwriteTables().put(fullTableName.toLowerCase(), 
ast);
             }
           }
         }
@@ -2153,7 +2153,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
           boolean isTableWrittenTo = 
qb.getParseInfo().isInsertIntoTable(ts.tableHandle.getDbName(),
             ts.tableHandle.getTableName());
           isTableWrittenTo |= (qb.getParseInfo().getInsertOverwriteTables().
-            get(getUnescapedName((ASTNode) ast.getChild(0), 
ts.tableHandle.getDbName())) != null);
+            get(getUnescapedName((ASTNode) ast.getChild(0), 
ts.tableHandle.getDbName()).toLowerCase()) != null);
           assert isTableWrittenTo :
             "Inconsistent data structure detected: we are writing to " + 
ts.tableHandle  + " in " +
               name + " but it's not in isInsertIntoTable() or 
getInsertOverwriteTables()";
@@ -6895,8 +6895,10 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
           checkAcidConstraints(qb, table_desc, dest_tab);
         }
         ltd = new LoadTableDesc(queryTmpdir, table_desc, dpCtx, acidOp);
+        // For Acid table, Insert Overwrite shouldn't replace the table 
content. We keep the old
+        // deltas and base and leave them up to the cleaner to clean up
         
ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),
-            dest_tab.getTableName()));
+            dest_tab.getTableName()) && !destTableIsAcid);
         ltd.setLbCtx(lbCtx);
         loadTableWork.add(ltd);
       } else {
@@ -7008,8 +7010,10 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
         checkAcidConstraints(qb, table_desc, dest_tab);
       }
       ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), 
acidOp);
+      // For Acid table, Insert Overwrite shouldn't replace the table content. 
We keep the old
+      // deltas and base and leave them up to the cleaner to clean up
       ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),
-          dest_tab.getTableName()));
+          dest_tab.getTableName()) && !destTableIsAcid);
       ltd.setLbCtx(lbCtx);
 
       loadTableWork.add(ltd);
@@ -7239,6 +7243,12 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
       AcidUtils.Operation wt = updating(dest) ? AcidUtils.Operation.UPDATE :
           (deleting(dest) ? AcidUtils.Operation.DELETE : 
AcidUtils.Operation.INSERT);
       fileSinkDesc.setWriteType(wt);
+
+      String destTableFullName = dest_tab.getCompleteName().replace('@', '.');
+      Map<String, ASTNode> iowMap = 
qb.getParseInfo().getInsertOverwriteTables();
+      if (iowMap.containsKey(destTableFullName)) {
+        fileSinkDesc.setInsertOverwrite(true);
+      }
       acidFileSinks.add(fileSinkDesc);
     }
 
@@ -7373,11 +7383,6 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
   // that isn't true.
   private void checkAcidConstraints(QB qb, TableDesc tableDesc,
                                     Table table) throws SemanticException {
-    String tableName = tableDesc.getTableName();
-    if (!qb.getParseInfo().isInsertIntoTable(tableName)) {
-      LOG.debug("Couldn't find table " + tableName + " in insertIntoTable");
-      throw new SemanticException(ErrorMsg.NO_INSERT_OVERWRITE_WITH_ACID, 
tableName);
-    }
     /*
     LOG.info("Modifying config values for ACID write");
     conf.setBoolVar(ConfVars.HIVEOPTREDUCEDEDUPLICATION, true);

http://git-wip-us.apache.org/repos/asf/hive/blob/1fe8db61/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
index 4716adc..fd27f53 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
@@ -103,6 +103,8 @@ public class FileSinkDesc extends AbstractOperatorDesc {
    */
   private boolean isUsingThriftJDBCBinarySerDe = false;
 
+  private boolean isInsertOverwrite = false;
+
   public FileSinkDesc() {
   }
 
@@ -509,4 +511,12 @@ public class FileSinkDesc extends AbstractOperatorDesc {
     }
     return new FileSinkOperatorExplainVectorization(vectorDesc);
   }
+
+  public void setInsertOverwrite(boolean isInsertOverwrite) {
+    this.isInsertOverwrite = isInsertOverwrite;
+  }
+
+  public boolean getInsertOverwrite() {
+    return isInsertOverwrite;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1fe8db61/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index bafed9e..5e2146e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -265,7 +265,7 @@ public class CompactorMR {
       }
     }
 
-    if (parsedDeltas.size() == 0 && dir.getOriginalFiles() == null) {
+    if (parsedDeltas.size() == 0 && dir.getOriginalFiles().size() == 0) {
       // Skip compaction if there's no delta files AND there's no original 
files
       LOG.error("No delta files or original files found to compact in " + 
sd.getLocation() + " for compactionId=" + ci.id);
       return;

http://git-wip-us.apache.org/repos/asf/hive/blob/1fe8db61/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index ed1a328..408c089 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -68,6 +68,8 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import javax.validation.constraints.AssertTrue;
+
 /**
  * TODO: this should be merged with TestTxnCommands once that is checked in
  * specifically the tests; the supporting code here is just a clone of 
TestTxnCommands
@@ -1761,6 +1763,205 @@ public class TestTxnCommands2 {
     Assert.assertEquals(stringifyValues(rExpected), r);
   }
   /**
+   * Test the scenario when IOW comes in before a MAJOR compaction happens
+   * @throws Exception
+   */
+  @Test
+  public void testInsertOverwrite1() throws Exception {
+    FileSystem fs = FileSystem.get(hiveConf);
+    FileStatus[] status;
+
+    // 1. Insert two rows to an ACID table
+    runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(1,2)");
+    runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(3,4)");
+    status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
+        (Table.ACIDTBL).toString().toLowerCase()), 
FileUtils.STAGING_DIR_PATH_FILTER);
+    // There should be 2 delta dirs in the location
+    Assert.assertEquals(2, status.length);
+    for (int i = 0; i < status.length; i++) {
+      Assert.assertTrue(status[i].getPath().getName().matches("delta_.*"));
+    }
+
+    // 2. INSERT OVERWRITE
+    // Prepare data for the source table
+    runStatementOnDriver("insert into " + Table.NONACIDORCTBL + "(a,b) 
values(5,6),(7,8)");
+    // Insert overwrite ACID table from source table
+    runStatementOnDriver("insert overwrite table " + Table.ACIDTBL + " select 
a,b from " + Table.NONACIDORCTBL);
+    status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
+        (Table.ACIDTBL).toString().toLowerCase()), 
FileUtils.STAGING_DIR_PATH_FILTER);
+    // There should be 2 delta dirs, plus a base dir in the location
+    Assert.assertEquals(3, status.length);
+    boolean sawBase = false;
+    String baseDir = "";
+    int deltaCount = 0;
+    for (int i = 0; i < status.length; i++) {
+      String dirName = status[i].getPath().getName();
+      if (dirName.matches("delta_.*")) {
+        deltaCount++;
+      } else {
+        sawBase = true;
+        baseDir = dirName;
+        Assert.assertTrue(baseDir.matches("base_.*"));
+      }
+    }
+    Assert.assertEquals(2, deltaCount);
+    Assert.assertTrue(sawBase);
+    // Verify query result
+    List<String> rs = runStatementOnDriver("select a,b from " + Table.ACIDTBL 
+ " order by a");
+    int [][] resultData = new int[][] {{5,6},{7,8}};
+    Assert.assertEquals(stringifyValues(resultData), rs);
+
+    // 3. Perform a major compaction. Nothing should change. Both deltas and 
base dirs should have the same name.
+    // Re-verify directory layout and query result by using the same logic as 
above
+    runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MAJOR'");
+    runWorker(hiveConf);
+    status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
+        (Table.ACIDTBL).toString().toLowerCase()), 
FileUtils.STAGING_DIR_PATH_FILTER);
+    // There should be 2 delta dirs, plus a base dir in the location
+    Assert.assertEquals(3, status.length);
+    sawBase = false;
+    deltaCount = 0;
+    for (int i = 0; i < status.length; i++) {
+      String dirName = status[i].getPath().getName();
+      if (dirName.matches("delta_.*")) {
+        deltaCount++;
+      } else {
+        sawBase = true;
+        Assert.assertTrue(dirName.matches("base_.*"));
+        Assert.assertEquals(baseDir, dirName);
+      }
+    }
+    Assert.assertEquals(2, deltaCount);
+    Assert.assertTrue(sawBase);
+    // Verify query result
+    rs = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by 
a");
+    Assert.assertEquals(stringifyValues(resultData), rs);
+
+    // 4. Run Cleaner. It should remove the 2 delta dirs.
+    runCleaner(hiveConf);
+    // There should be only 1 directory left: base_xxxxxxx.
+    // The delta dirs should have been cleaned up.
+    status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
+        (Table.ACIDTBL).toString().toLowerCase()), 
FileUtils.STAGING_DIR_PATH_FILTER);
+    Assert.assertEquals(1, status.length);
+    Assert.assertTrue(status[0].getPath().getName().matches("base_.*"));
+    Assert.assertEquals(baseDir, status[0].getPath().getName());
+    // Verify query result
+    rs = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by 
a");
+    Assert.assertEquals(stringifyValues(resultData), rs);
+  }
+
+  /**
+   * Test the scenario when IOW comes in after a MAJOR compaction happens
+   * @throws Exception
+   */
+  @Test
+  public void testInsertOverwrite2() throws Exception {
+    FileSystem fs = FileSystem.get(hiveConf);
+    FileStatus[] status;
+
+    // 1. Insert two rows to an ACID table
+    runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(1,2)");
+    runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(3,4)");
+    status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
+        (Table.ACIDTBL).toString().toLowerCase()), 
FileUtils.STAGING_DIR_PATH_FILTER);
+    // There should be 2 delta dirs in the location
+    Assert.assertEquals(2, status.length);
+    for (int i = 0; i < status.length; i++) {
+      Assert.assertTrue(status[i].getPath().getName().matches("delta_.*"));
+    }
+
+    // 2. Perform a major compaction. There should be an extra base dir now.
+    runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MAJOR'");
+    runWorker(hiveConf);
+    status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
+        (Table.ACIDTBL).toString().toLowerCase()), 
FileUtils.STAGING_DIR_PATH_FILTER);
+    // There should be 2 delta dirs, plus a base dir in the location
+    Assert.assertEquals(3, status.length);
+    boolean sawBase = false;
+    int deltaCount = 0;
+    for (int i = 0; i < status.length; i++) {
+      String dirName = status[i].getPath().getName();
+      if (dirName.matches("delta_.*")) {
+        deltaCount++;
+      } else {
+        sawBase = true;
+        Assert.assertTrue(dirName.matches("base_.*"));
+      }
+    }
+    Assert.assertEquals(2, deltaCount);
+    Assert.assertTrue(sawBase);
+    // Verify query result
+    int [][] resultData = new int[][] {{1,2},{3,4}};
+    List<String> rs = runStatementOnDriver("select a,b from " + Table.ACIDTBL 
+ " order by a");
+    Assert.assertEquals(stringifyValues(resultData), rs);
+
+    // 3. INSERT OVERWRITE
+    // Prepare data for the source table
+    runStatementOnDriver("insert into " + Table.NONACIDORCTBL + "(a,b) 
values(5,6),(7,8)");
+    // Insert overwrite ACID table from source table
+    runStatementOnDriver("insert overwrite table " + Table.ACIDTBL + " select 
a,b from " + Table.NONACIDORCTBL);
+    status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
+        (Table.ACIDTBL).toString().toLowerCase()), 
FileUtils.STAGING_DIR_PATH_FILTER);
+    // There should be 2 delta dirs, plus 2 base dirs in the location
+    Assert.assertEquals(4, status.length);
+    int baseCount = 0;
+    deltaCount = 0;
+    for (int i = 0; i < status.length; i++) {
+      String dirName = status[i].getPath().getName();
+      if (dirName.matches("delta_.*")) {
+        deltaCount++;
+      } else {
+        baseCount++;
+      }
+    }
+    Assert.assertEquals(2, deltaCount);
+    Assert.assertEquals(2, baseCount);
+    // Verify query result
+    resultData = new int[][] {{5,6},{7,8}};
+    rs = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by 
a");
+    Assert.assertEquals(stringifyValues(resultData), rs);
+
+    // 4. Perform another major compaction. Nothing should change. Both deltas 
and  both base dirs
+    // should have the same name.
+    // Re-verify directory layout and query result by using the same logic as 
above
+    runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MAJOR'");
+    runWorker(hiveConf);
+    status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
+        (Table.ACIDTBL).toString().toLowerCase()), 
FileUtils.STAGING_DIR_PATH_FILTER);
+    // There should be 2 delta dirs, plus 2 base dirs in the location
+    Assert.assertEquals(4, status.length);
+    baseCount = 0;
+    deltaCount = 0;
+    for (int i = 0; i < status.length; i++) {
+      String dirName = status[i].getPath().getName();
+      if (dirName.matches("delta_.*")) {
+        deltaCount++;
+      } else {
+        Assert.assertTrue(dirName.matches("base_.*"));
+        baseCount++;
+      }
+    }
+    Assert.assertEquals(2, deltaCount);
+    Assert.assertEquals(2, baseCount);
+    // Verify query result
+    rs = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by 
a");
+    Assert.assertEquals(stringifyValues(resultData), rs);
+
+    // 5. Run Cleaner. It should remove the 2 delta dirs and 1 old base dir.
+    runCleaner(hiveConf);
+    // There should be only 1 directory left: base_xxxxxxx.
+    // The delta dirs should have been cleaned up.
+    status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
+        (Table.ACIDTBL).toString().toLowerCase()), 
FileUtils.STAGING_DIR_PATH_FILTER);
+    Assert.assertEquals(1, status.length);
+    Assert.assertTrue(status[0].getPath().getName().matches("base_.*"));
+    // Verify query result
+    rs = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " order by 
a");
+    Assert.assertEquals(stringifyValues(resultData), rs);
+  }
+
+  /**
    * takes raw data and turns it into a string as if from Driver.getResults()
    * sorts rows in dictionary order
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/1fe8db61/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index c31241a..6d1cdcb 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -376,12 +376,6 @@ public class TestDbTxnManager2 {
     Assert.assertTrue(cpr.getErrorMessage().contains("This command is not 
allowed on an ACID table"));
 
     useDummyTxnManagerTemporarily(conf);
-    cpr = driver.compileAndRespond("insert overwrite table T10 select a, b 
from T11");
-    Assert.assertEquals(ErrorMsg.NO_INSERT_OVERWRITE_WITH_ACID.getErrorCode(), 
cpr.getResponseCode());
-    Assert.assertTrue(cpr.getErrorMessage().contains("INSERT OVERWRITE not 
allowed on table default.t10 with OutputFormat" +
-        " that implements AcidOutputFormat while transaction manager that 
supports ACID is in use"));
-
-    useDummyTxnManagerTemporarily(conf);
     cpr = driver.compileAndRespond("update T10 set a=0 where b=1");
     Assert.assertEquals(ErrorMsg.ACID_OP_ON_NONACID_TXNMGR.getErrorCode(), 
cpr.getResponseCode());
     Assert.assertTrue(cpr.getErrorMessage().contains("Attempt to do update or 
delete using transaction manager that does not support these operations."));

http://git-wip-us.apache.org/repos/asf/hive/blob/1fe8db61/ql/src/test/queries/clientnegative/acid_overwrite.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/acid_overwrite.q 
b/ql/src/test/queries/clientnegative/acid_overwrite.q
deleted file mode 100644
index 9ccf31e..0000000
--- a/ql/src/test/queries/clientnegative/acid_overwrite.q
+++ /dev/null
@@ -1,8 +0,0 @@
-set hive.support.concurrency=true;
-set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-
-
-create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets 
stored as orc TBLPROPERTIES ('transactional'='true');
-
-insert into table acid_uanp select cint, cast(cstring1 as varchar(128)) from 
alltypesorc where cint < 0 order by cint limit 10;
-insert overwrite table acid_uanp select cint, cast(cstring1 as varchar(128)) 
from alltypesorc;

http://git-wip-us.apache.org/repos/asf/hive/blob/1fe8db61/ql/src/test/queries/clientpositive/acid_insert_overwrite.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_insert_overwrite.q 
b/ql/src/test/queries/clientpositive/acid_insert_overwrite.q
new file mode 100644
index 0000000..ba73369
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/acid_insert_overwrite.q
@@ -0,0 +1,73 @@
+set hive.mapred.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+-- create a source table where the IOW data select from
+create table srctbl (key char(1), value int);
+insert into table srctbl values ('d', 4), ('e', 5), ('f', 6), ('i', 9), ('j', 
10);
+select * from srctbl;
+
+-- insert overwrite on non-partitioned acid table
+drop table if exists acidtbl1;
+create table acidtbl1 (key char(1), value int) clustered by (value) into 2 
buckets stored as orc TBLPROPERTIES ("transactional"="true");
+
+insert into table acidtbl1 values ('a', 1), ('b', 2), ('c', 3);
+select * from acidtbl1 order by key;
+
+insert overwrite table acidtbl1 select key, value from srctbl where key in 
('d', 'e', 'f');
+select * from acidtbl1 order by key;
+
+insert into table acidtbl1 values ('g', 7), ('h', 8);
+select * from acidtbl1 order by key;
+
+insert overwrite table acidtbl1 select key, value from srctbl where key in 
('i', 'j');
+select * from acidtbl1 order by key;
+
+insert into table acidtbl1 values ('k', 11);
+insert into table acidtbl1 values ('l', 12);
+select * from acidtbl1 order by key;
+
+
+-- insert overwrite with multi table insert
+drop table if exists acidtbl2;
+create table acidtbl2 (key char(1), value int) clustered by (value) into 2 
buckets stored as orc TBLPROPERTIES ("transactional"="true");
+
+drop table if exists acidtbl3;
+create table acidtbl3 (key char(1), value int) clustered by (value) into 2 
buckets stored as orc TBLPROPERTIES ("transactional"="true");
+
+insert into table acidtbl2 values ('m', 13), ('n', 14);
+select * from acidtbl2 order by key;
+
+insert into table acidtbl3 values ('o', 15), ('p', 16);
+select * from acidtbl3 order by key;
+
+from acidtbl1
+insert overwrite table acidtbl2 select key, value
+insert into table acidtbl3 select key, value;
+
+select * from acidtbl2 order by key;
+select * from acidtbl3 order by key;
+
+drop table acidtbl1;
+drop table acidtbl2;
+drop table acidtbl3;
+
+
+-- insert overwrite on partitioned acid table
+drop table if exists acidparttbl;
+create table acidparttbl (key char(1), value int) partitioned by (p int) 
clustered by (value) into 2 buckets stored as orc TBLPROPERTIES 
("transactional"="true");
+
+insert into table acidparttbl partition(p=100) values ('a', 1), ('b', 2), 
('c', 3);
+select p, key, value from acidparttbl order by p, key;
+
+insert overwrite table acidparttbl partition(p=100) select key, value from 
srctbl where key in ('d', 'e', 'f');
+select p, key, value from acidparttbl order by p, key;
+
+insert into table acidparttbl partition(p) values ('g', 7, 100), ('h', 8, 200);
+select p, key, value from acidparttbl order by p, key;
+
+insert overwrite table acidparttbl partition(p) values ('i', 9, 100), ('j', 
10, 200);
+select p, key, value from acidparttbl order by p, key;
+
+drop table acidparttbl;

http://git-wip-us.apache.org/repos/asf/hive/blob/1fe8db61/ql/src/test/results/clientnegative/acid_overwrite.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/acid_overwrite.q.out 
b/ql/src/test/results/clientnegative/acid_overwrite.q.out
deleted file mode 100644
index 15070fa..0000000
--- a/ql/src/test/results/clientnegative/acid_overwrite.q.out
+++ /dev/null
@@ -1,19 +0,0 @@
-PREHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) 
into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@acid_uanp
-POSTHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by 
(a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@acid_uanp
-PREHOOK: query: insert into table acid_uanp select cint, cast(cstring1 as 
varchar(128)) from alltypesorc where cint < 0 order by cint limit 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@alltypesorc
-PREHOOK: Output: default@acid_uanp
-POSTHOOK: query: insert into table acid_uanp select cint, cast(cstring1 as 
varchar(128)) from alltypesorc where cint < 0 order by cint limit 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@alltypesorc
-POSTHOOK: Output: default@acid_uanp
-POSTHOOK: Lineage: acid_uanp.a SIMPLE 
[(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
-POSTHOOK: Lineage: acid_uanp.b EXPRESSION 
[(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, 
comment:null), ]
-FAILED: SemanticException [Error 10295]: INSERT OVERWRITE not allowed on table 
default.acid_uanp with OutputFormat that implements AcidOutputFormat while 
transaction manager that supports ACID is in use

http://git-wip-us.apache.org/repos/asf/hive/blob/1fe8db61/ql/src/test/results/clientpositive/acid_insert_overwrite.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_insert_overwrite.q.out 
b/ql/src/test/results/clientpositive/acid_insert_overwrite.q.out
new file mode 100644
index 0000000..53ab0f5
--- /dev/null
+++ b/ql/src/test/results/clientpositive/acid_insert_overwrite.q.out
@@ -0,0 +1,395 @@
+PREHOOK: query: create table srctbl (key char(1), value int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srctbl
+POSTHOOK: query: create table srctbl (key char(1), value int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srctbl
+PREHOOK: query: insert into table srctbl values ('d', 4), ('e', 5), ('f', 6), 
('i', 9), ('j', 10)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@srctbl
+POSTHOOK: query: insert into table srctbl values ('d', 4), ('e', 5), ('f', 6), 
('i', 9), ('j', 10)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@srctbl
+POSTHOOK: Lineage: srctbl.key EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+POSTHOOK: Lineage: srctbl.value EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+PREHOOK: query: select * from srctbl
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srctbl
+#### A masked pattern was here ####
+POSTHOOK: query: select * from srctbl
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srctbl
+#### A masked pattern was here ####
+d      4
+e      5
+f      6
+i      9
+j      10
+PREHOOK: query: drop table if exists acidtbl1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists acidtbl1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table acidtbl1 (key char(1), value int) clustered by 
(value) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acidtbl1
+POSTHOOK: query: create table acidtbl1 (key char(1), value int) clustered by 
(value) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acidtbl1
+PREHOOK: query: insert into table acidtbl1 values ('a', 1), ('b', 2), ('c', 3)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@acidtbl1
+POSTHOOK: query: insert into table acidtbl1 values ('a', 1), ('b', 2), ('c', 3)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@acidtbl1
+POSTHOOK: Lineage: acidtbl1.key EXPRESSION 
[(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+POSTHOOK: Lineage: acidtbl1.value EXPRESSION 
[(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+PREHOOK: query: select * from acidtbl1 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidtbl1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acidtbl1 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidtbl1
+#### A masked pattern was here ####
+a      1
+b      2
+c      3
+PREHOOK: query: insert overwrite table acidtbl1 select key, value from srctbl 
where key in ('d', 'e', 'f')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srctbl
+PREHOOK: Output: default@acidtbl1
+POSTHOOK: query: insert overwrite table acidtbl1 select key, value from srctbl 
where key in ('d', 'e', 'f')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srctbl
+POSTHOOK: Output: default@acidtbl1
+POSTHOOK: Lineage: acidtbl1.key SIMPLE [(srctbl)srctbl.FieldSchema(name:key, 
type:char(1), comment:null), ]
+POSTHOOK: Lineage: acidtbl1.value SIMPLE 
[(srctbl)srctbl.FieldSchema(name:value, type:int, comment:null), ]
+PREHOOK: query: select * from acidtbl1 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidtbl1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acidtbl1 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidtbl1
+#### A masked pattern was here ####
+d      4
+e      5
+f      6
+PREHOOK: query: insert into table acidtbl1 values ('g', 7), ('h', 8)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@acidtbl1
+POSTHOOK: query: insert into table acidtbl1 values ('g', 7), ('h', 8)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@acidtbl1
+POSTHOOK: Lineage: acidtbl1.key EXPRESSION 
[(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+POSTHOOK: Lineage: acidtbl1.value EXPRESSION 
[(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+PREHOOK: query: select * from acidtbl1 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidtbl1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acidtbl1 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidtbl1
+#### A masked pattern was here ####
+d      4
+e      5
+f      6
+g      7
+h      8
+PREHOOK: query: insert overwrite table acidtbl1 select key, value from srctbl 
where key in ('i', 'j')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srctbl
+PREHOOK: Output: default@acidtbl1
+POSTHOOK: query: insert overwrite table acidtbl1 select key, value from srctbl 
where key in ('i', 'j')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srctbl
+POSTHOOK: Output: default@acidtbl1
+POSTHOOK: Lineage: acidtbl1.key SIMPLE [(srctbl)srctbl.FieldSchema(name:key, 
type:char(1), comment:null), ]
+POSTHOOK: Lineage: acidtbl1.value SIMPLE 
[(srctbl)srctbl.FieldSchema(name:value, type:int, comment:null), ]
+PREHOOK: query: select * from acidtbl1 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidtbl1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acidtbl1 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidtbl1
+#### A masked pattern was here ####
+i      9
+j      10
+PREHOOK: query: insert into table acidtbl1 values ('k', 11)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@acidtbl1
+POSTHOOK: query: insert into table acidtbl1 values ('k', 11)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@acidtbl1
+POSTHOOK: Lineage: acidtbl1.key EXPRESSION 
[(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+POSTHOOK: Lineage: acidtbl1.value EXPRESSION 
[(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+PREHOOK: query: insert into table acidtbl1 values ('l', 12)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@acidtbl1
+POSTHOOK: query: insert into table acidtbl1 values ('l', 12)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@acidtbl1
+POSTHOOK: Lineage: acidtbl1.key EXPRESSION 
[(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+POSTHOOK: Lineage: acidtbl1.value EXPRESSION 
[(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+PREHOOK: query: select * from acidtbl1 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidtbl1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acidtbl1 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidtbl1
+#### A masked pattern was here ####
+i      9
+j      10
+k      11
+l      12
+PREHOOK: query: drop table if exists acidtbl2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists acidtbl2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table acidtbl2 (key char(1), value int) clustered by 
(value) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acidtbl2
+POSTHOOK: query: create table acidtbl2 (key char(1), value int) clustered by 
(value) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acidtbl2
+PREHOOK: query: drop table if exists acidtbl3
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists acidtbl3
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table acidtbl3 (key char(1), value int) clustered by 
(value) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acidtbl3
+POSTHOOK: query: create table acidtbl3 (key char(1), value int) clustered by 
(value) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acidtbl3
+PREHOOK: query: insert into table acidtbl2 values ('m', 13), ('n', 14)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@acidtbl2
+POSTHOOK: query: insert into table acidtbl2 values ('m', 13), ('n', 14)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@acidtbl2
+POSTHOOK: Lineage: acidtbl2.key EXPRESSION 
[(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+POSTHOOK: Lineage: acidtbl2.value EXPRESSION 
[(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+PREHOOK: query: select * from acidtbl2 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidtbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acidtbl2 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidtbl2
+#### A masked pattern was here ####
+m      13
+n      14
+PREHOOK: query: insert into table acidtbl3 values ('o', 15), ('p', 16)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@acidtbl3
+POSTHOOK: query: insert into table acidtbl3 values ('o', 15), ('p', 16)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@acidtbl3
+POSTHOOK: Lineage: acidtbl3.key EXPRESSION 
[(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+POSTHOOK: Lineage: acidtbl3.value EXPRESSION 
[(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+PREHOOK: query: select * from acidtbl3 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidtbl3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acidtbl3 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidtbl3
+#### A masked pattern was here ####
+o      15
+p      16
+PREHOOK: query: from acidtbl1
+insert overwrite table acidtbl2 select key, value
+insert into table acidtbl3 select key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidtbl1
+PREHOOK: Output: default@acidtbl2
+PREHOOK: Output: default@acidtbl3
+POSTHOOK: query: from acidtbl1
+insert overwrite table acidtbl2 select key, value
+insert into table acidtbl3 select key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidtbl1
+POSTHOOK: Output: default@acidtbl2
+POSTHOOK: Output: default@acidtbl3
+POSTHOOK: Lineage: acidtbl2.key SIMPLE 
[(acidtbl1)acidtbl1.FieldSchema(name:key, type:char(1), comment:null), ]
+POSTHOOK: Lineage: acidtbl2.value SIMPLE 
[(acidtbl1)acidtbl1.FieldSchema(name:value, type:int, comment:null), ]
+POSTHOOK: Lineage: acidtbl3.key SIMPLE 
[(acidtbl1)acidtbl1.FieldSchema(name:key, type:char(1), comment:null), ]
+POSTHOOK: Lineage: acidtbl3.value SIMPLE 
[(acidtbl1)acidtbl1.FieldSchema(name:value, type:int, comment:null), ]
+PREHOOK: query: select * from acidtbl2 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidtbl2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acidtbl2 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidtbl2
+#### A masked pattern was here ####
+i      9
+j      10
+k      11
+l      12
+PREHOOK: query: select * from acidtbl3 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidtbl3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from acidtbl3 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidtbl3
+#### A masked pattern was here ####
+i      9
+j      10
+k      11
+l      12
+o      15
+p      16
+PREHOOK: query: drop table acidtbl1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@acidtbl1
+PREHOOK: Output: default@acidtbl1
+POSTHOOK: query: drop table acidtbl1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@acidtbl1
+POSTHOOK: Output: default@acidtbl1
+PREHOOK: query: drop table acidtbl2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@acidtbl2
+PREHOOK: Output: default@acidtbl2
+POSTHOOK: query: drop table acidtbl2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@acidtbl2
+POSTHOOK: Output: default@acidtbl2
+PREHOOK: query: drop table acidtbl3
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@acidtbl3
+PREHOOK: Output: default@acidtbl3
+POSTHOOK: query: drop table acidtbl3
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@acidtbl3
+POSTHOOK: Output: default@acidtbl3
+PREHOOK: query: drop table if exists acidparttbl
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists acidparttbl
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table acidparttbl (key char(1), value int) partitioned 
by (p int) clustered by (value) into 2 buckets stored as orc TBLPROPERTIES 
("transactional"="true")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acidparttbl
+POSTHOOK: query: create table acidparttbl (key char(1), value int) partitioned 
by (p int) clustered by (value) into 2 buckets stored as orc TBLPROPERTIES 
("transactional"="true")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acidparttbl
+PREHOOK: query: insert into table acidparttbl partition(p=100) values ('a', 
1), ('b', 2), ('c', 3)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@acidparttbl@p=100
+POSTHOOK: query: insert into table acidparttbl partition(p=100) values ('a', 
1), ('b', 2), ('c', 3)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@acidparttbl@p=100
+POSTHOOK: Lineage: acidparttbl PARTITION(p=100).key EXPRESSION 
[(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+POSTHOOK: Lineage: acidparttbl PARTITION(p=100).value EXPRESSION 
[(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+PREHOOK: query: select p, key, value from acidparttbl order by p, key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidparttbl
+PREHOOK: Input: default@acidparttbl@p=100
+#### A masked pattern was here ####
+POSTHOOK: query: select p, key, value from acidparttbl order by p, key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidparttbl
+POSTHOOK: Input: default@acidparttbl@p=100
+#### A masked pattern was here ####
+100    a       1
+100    b       2
+100    c       3
+PREHOOK: query: insert overwrite table acidparttbl partition(p=100) select 
key, value from srctbl where key in ('d', 'e', 'f')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srctbl
+PREHOOK: Output: default@acidparttbl@p=100
+POSTHOOK: query: insert overwrite table acidparttbl partition(p=100) select 
key, value from srctbl where key in ('d', 'e', 'f')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srctbl
+POSTHOOK: Output: default@acidparttbl@p=100
+POSTHOOK: Lineage: acidparttbl PARTITION(p=100).key SIMPLE 
[(srctbl)srctbl.FieldSchema(name:key, type:char(1), comment:null), ]
+POSTHOOK: Lineage: acidparttbl PARTITION(p=100).value SIMPLE 
[(srctbl)srctbl.FieldSchema(name:value, type:int, comment:null), ]
+PREHOOK: query: select p, key, value from acidparttbl order by p, key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidparttbl
+PREHOOK: Input: default@acidparttbl@p=100
+#### A masked pattern was here ####
+POSTHOOK: query: select p, key, value from acidparttbl order by p, key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidparttbl
+POSTHOOK: Input: default@acidparttbl@p=100
+#### A masked pattern was here ####
+100    d       4
+100    e       5
+100    f       6
+PREHOOK: query: insert into table acidparttbl partition(p) values ('g', 7, 
100), ('h', 8, 200)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@acidparttbl
+POSTHOOK: query: insert into table acidparttbl partition(p) values ('g', 7, 
100), ('h', 8, 200)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@acidparttbl@p=100
+POSTHOOK: Output: default@acidparttbl@p=200
+POSTHOOK: Lineage: acidparttbl PARTITION(p=100).key EXPRESSION 
[(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+POSTHOOK: Lineage: acidparttbl PARTITION(p=100).value EXPRESSION 
[(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+POSTHOOK: Lineage: acidparttbl PARTITION(p=200).key EXPRESSION 
[(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+POSTHOOK: Lineage: acidparttbl PARTITION(p=200).value EXPRESSION 
[(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+PREHOOK: query: select p, key, value from acidparttbl order by p, key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidparttbl
+PREHOOK: Input: default@acidparttbl@p=100
+PREHOOK: Input: default@acidparttbl@p=200
+#### A masked pattern was here ####
+POSTHOOK: query: select p, key, value from acidparttbl order by p, key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidparttbl
+POSTHOOK: Input: default@acidparttbl@p=100
+POSTHOOK: Input: default@acidparttbl@p=200
+#### A masked pattern was here ####
+100    d       4
+100    e       5
+100    f       6
+100    g       7
+200    h       8
+PREHOOK: query: insert overwrite table acidparttbl partition(p) values ('i', 
9, 100), ('j', 10, 200)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@acidparttbl
+POSTHOOK: query: insert overwrite table acidparttbl partition(p) values ('i', 
9, 100), ('j', 10, 200)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@acidparttbl@p=100
+POSTHOOK: Output: default@acidparttbl@p=200
+POSTHOOK: Lineage: acidparttbl PARTITION(p=100).key EXPRESSION 
[(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col1,
 type:string, comment:), ]
+POSTHOOK: Lineage: acidparttbl PARTITION(p=100).value EXPRESSION 
[(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col2,
 type:string, comment:), ]
+POSTHOOK: Lineage: acidparttbl PARTITION(p=200).key EXPRESSION 
[(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col1,
 type:string, comment:), ]
+POSTHOOK: Lineage: acidparttbl PARTITION(p=200).value EXPRESSION 
[(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col2,
 type:string, comment:), ]
+PREHOOK: query: select p, key, value from acidparttbl order by p, key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@acidparttbl
+PREHOOK: Input: default@acidparttbl@p=100
+PREHOOK: Input: default@acidparttbl@p=200
+#### A masked pattern was here ####
+POSTHOOK: query: select p, key, value from acidparttbl order by p, key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@acidparttbl
+POSTHOOK: Input: default@acidparttbl@p=100
+POSTHOOK: Input: default@acidparttbl@p=200
+#### A masked pattern was here ####
+100    i       9
+200    j       10
+PREHOOK: query: drop table acidparttbl
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@acidparttbl
+PREHOOK: Output: default@acidparttbl
+POSTHOOK: query: drop table acidparttbl
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@acidparttbl
+POSTHOOK: Output: default@acidparttbl

Reply via email to