This is an automated email from the ASF dual-hosted git repository.

anishek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 78d8825  HIVE-24117: Fix for not setting managed table location in 
incremental load (Aasha Medhi, reviewed by Pravin Kumar Sinha)
78d8825 is described below

commit 78d882535a1c8797565e24b2b0360dcab851cb29
Author: Anishek Agarwal <[email protected]>
AuthorDate: Mon Sep 14 11:30:32 2020 +0530

    HIVE-24117: Fix for not setting managed table location in incremental load 
(Aasha Medhi, reviewed by Pravin Kumar Sinha)
---
 .../parse/TestReplicationScenariosAcidTables.java  | 52 ++++++++++++++++++++++
 .../hive/ql/parse/ImportSemanticAnalyzer.java      |  4 ++
 2 files changed, 56 insertions(+)

diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
index c03b252..95ad047 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
@@ -278,6 +278,58 @@ public class TestReplicationScenariosAcidTables extends 
BaseReplicationScenarios
   }
 
   @Test
+  public void testAcidTablesCreateTableIncremental() throws Throwable {
+    // Create 2 tables, one partitioned and other not.
+    primary.run("use " + primaryDbName)
+      .run("create table t1 (id int) clustered by(id) into 3 buckets stored as 
orc " +
+        "tblproperties (\"transactional\"=\"true\")")
+      .run("insert into t1 values(1)")
+      .run("create table t2 (rank int) partitioned by (name string) 
tblproperties(\"transactional\"=\"true\", " +
+        "\"transactional_properties\"=\"insert_only\")")
+      .run("insert into t2 partition(name='Bob') values(11)")
+      .run("insert into t2 partition(name='Carl') values(10)");
+
+    WarehouseInstance.Tuple bootstrapDump = primary
+      .run("use " + primaryDbName)
+      .dump(primaryDbName);
+
+    replica.load(replicatedDbName, primaryDbName)
+      .run("use " + replicatedDbName)
+      .run("show tables")
+      .verifyResults(new String[] {"t1", "t2"})
+      .run("repl status " + replicatedDbName)
+      .verifyResult(bootstrapDump.lastReplicationId)
+      .run("select id from t1")
+      .verifyResults(new String[]{"1"})
+      .run("select rank from t2 order by rank")
+      .verifyResults(new String[] {"10", "11"});
+
+    WarehouseInstance.Tuple incrDump = primary.run("use "+ primaryDbName)
+      .run("create table t3 (id int)")
+      .run("insert into t3 values (99)")
+      .run("create table t4 (standard int) partitioned by (name string) stored 
as orc " +
+        "tblproperties (\"transactional\"=\"true\")")
+      .run("insert into t4 partition(name='Tom') values(11)")
+      .dump(primaryDbName);
+
+    replica.load(replicatedDbName, primaryDbName)
+      .run("use " + replicatedDbName)
+      .run("show tables")
+      .verifyResults(new String[] {"t1", "t2", "t3", "t4"})
+      .run("repl status " + replicatedDbName)
+      .verifyResult(incrDump.lastReplicationId)
+      .run("select id from t1")
+      .verifyResults(new String[]{"1"})
+      .run("select rank from t2 order by rank")
+      .verifyResults(new String[] {"10", "11"})
+      .run("select id from t3")
+      .verifyResults(new String[]{"99"})
+      .run("select standard from t4 order by standard")
+      .verifyResults(new String[] {"11"});
+  }
+
+
+  @Test
   public void testAcidTablesBootstrapWithOpenTxnsDiffDb() throws Throwable {
     int numTxns = 5;
     HiveConf primaryConf = primary.getConf();
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index 614453b..3b9bc6f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -1192,6 +1192,10 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
       // have been already created when replaying previous events. So no need 
to create table
       // again.
       if (x.getEventType() != DumpType.EVENT_COMMIT_TXN) {
+        //Don't set location for managed tables while creating the table.
+        if (x.getEventType() == DumpType.EVENT_CREATE_TABLE && 
!tblDesc.isExternal()) {
+          tblDesc.setLocation(null);
+        }
         Task t = createTableTask(tblDesc, x);
         if (dependentTasks != null) {
           dependentTasks.forEach(task -> t.addDependentTask(task));

Reply via email to