http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
index b1f731f..7d2b616 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
@@ -18,36 +18,51 @@
 package org.apache.hadoop.hive.ql.exec.repl.util;
 
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.repl.ReplLogger;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
+import org.apache.hadoop.hive.ql.plan.ReplTxnWork;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.plan.ImportTableDesc;
 import org.apache.hadoop.hive.ql.stats.StatsUtils;
+import org.apache.hadoop.hive.ql.util.HiveStrictManagedMigration;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.hive.ql.parse.repl.load.UpdatedMetaDataTracker;
+import org.apache.thrift.TException;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.io.Serializable;
+
+import static 
org.apache.hadoop.hive.ql.util.HiveStrictManagedMigration.TableMigrationOption.MANAGED;
 
 
 public class ReplUtils {
 
   public static final String REPL_CHECKPOINT_KEY = "hive.repl.ckpt.key";
 
+  // write id allocated in the current execution context which will be passed 
through config to be used by different
+  // tasks.
+  public static final String REPL_CURRENT_TBL_WRITE_ID = 
"hive.repl.current.table.write.id";
+
+
   /**
    * Bootstrap REPL LOAD operation type on the examined object based on ckpt 
state.
    */
@@ -121,4 +136,31 @@ public class ReplUtils {
     }
     return false;
   }
+
+  public static List<Task<? extends Serializable>> 
addOpenTxnTaskForMigration(String actualDbName,
+                                                                  String 
actualTblName, HiveConf conf,
+                                                                  
UpdatedMetaDataTracker updatedMetaDataTracker,
+                                                                  Task<? 
extends Serializable> childTask,
+                                                                  
org.apache.hadoop.hive.metastore.api.Table tableObj)
+          throws IOException, TException {
+    List<Task<? extends Serializable>> taskList = new ArrayList<>();
+    taskList.add(childTask);
+    if (conf.getBoolVar(HiveConf.ConfVars.HIVE_STRICT_MANAGED_TABLES) && 
updatedMetaDataTracker != null &&
+            !AcidUtils.isTransactionalTable(tableObj) &&
+            TableType.valueOf(tableObj.getTableType()) == 
TableType.MANAGED_TABLE) {
+      //TODO : isPathOwnByHive is hard coded to true, need to get it from repl 
dump metadata.
+      HiveStrictManagedMigration.TableMigrationOption migrationOption =
+              
HiveStrictManagedMigration.determineMigrationTypeAutomatically(tableObj, 
TableType.MANAGED_TABLE,
+                      null, conf, null, true);
+      if (migrationOption == MANAGED) {
+        //if conversion to managed table.
+        Task<? extends Serializable> replTxnTask = TaskFactory.get(new 
ReplTxnWork(actualDbName, actualTblName,
+                        ReplTxnWork.OperationType.REPL_MIGRATION_OPEN_TXN), 
conf);
+        replTxnTask.addDependentTask(childTask);
+        updatedMetaDataTracker.setNeedCommitTxn(true);
+        taskList.add(replTxnTask);
+      }
+    }
+    return taskList;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 46c51eb..9ddd30c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -442,9 +442,28 @@ public final class DbTxnManager extends HiveTxnManagerImpl 
{
     }
   }
 
+  private void clearLocksAndHB() throws LockException {
+    lockMgr.clearLocalLockRecords();
+    stopHeartbeat();
+  }
+
+  private void resetTxnInfo() {
+    txnId = 0;
+    stmtId = -1;
+    numStatements = 0;
+    tableWriteIds.clear();
+  }
+
   @Override
   public void replCommitTxn(CommitTxnRequest rqst) throws LockException {
     try {
+      if (rqst.isSetReplLastIdInfo()) {
+        if (!isTxnOpen()) {
+          throw new RuntimeException("Attempt to commit before opening a 
transaction");
+        }
+        // For transaction started internally by repl load command, heartbeat 
needs to be stopped.
+        clearLocksAndHB();
+      }
       getMS().replCommitTxn(rqst);
     } catch (NoSuchTxnException e) {
       LOG.error("Metastore could not find " + 
JavaUtils.txnIdToString(rqst.getTxnid()));
@@ -456,6 +475,11 @@ public final class DbTxnManager extends HiveTxnManagerImpl 
{
       throw le;
     } catch (TException e) {
       throw new 
LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), e);
+    } finally {
+      if (rqst.isSetReplLastIdInfo()) {
+        // For transaction started internally by repl load command, needs to 
clear the txn info.
+        resetTxnInfo();
+      }
     }
   }
 
@@ -465,8 +489,8 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
       throw new RuntimeException("Attempt to commit before opening a 
transaction");
     }
     try {
-      lockMgr.clearLocalLockRecords();
-      stopHeartbeat();
+      // do all new clear in clearLocksAndHB method to make sure that same 
code is there for replCommitTxn flow.
+      clearLocksAndHB();
       LOG.debug("Committing txn " + JavaUtils.txnIdToString(txnId));
       getMS().commitTxn(txnId);
     } catch (NoSuchTxnException e) {
@@ -480,10 +504,8 @@ public final class DbTxnManager extends HiveTxnManagerImpl 
{
       throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(),
           e);
     } finally {
-      txnId = 0;
-      stmtId = -1;
-      numStatements = 0;
-      tableWriteIds.clear();
+      // do all new reset in resetTxnInfo method to make sure that same code 
is there for replCommitTxn flow.
+      resetTxnInfo();
     }
   }
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 768a73c..c017790 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -695,7 +695,7 @@ public class Hive {
       AcidUtils.TableSnapshot tableSnapshot = null;
       if (transactional) {
         if (replWriteId > 0) {
-          ValidWriteIdList writeIds = 
getMSC().getValidWriteIds(getFullTableName(dbName, tblName), replWriteId);
+          ValidWriteIdList writeIds = 
AcidUtils.getTableValidWriteIdListWithTxnList(conf, dbName, tblName);
           tableSnapshot = new TableSnapshot(replWriteId, 
writeIds.writeToString());
         } else {
           // Make sure we pass in the names, so we can get the correct 
snapshot for rename table.
@@ -1903,8 +1903,17 @@ public class Hive {
             inheritLocation, isSkewedStoreAsSubdir, isSrcLocal, 
isAcidIUDoperation,
             hasFollowingStatsTask, writeId, stmtId, isInsertOverwrite, 
isTxnTable, newFiles);
 
-    AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf,
-            newTPart.getTable(), true);
+    AcidUtils.TableSnapshot tableSnapshot = null;
+    if (isTxnTable) {
+      if ((writeId != null) && (writeId > 0)) {
+        ValidWriteIdList writeIds = 
AcidUtils.getTableValidWriteIdListWithTxnList(
+                conf, tbl.getDbName(), tbl.getTableName());
+        tableSnapshot = new TableSnapshot(writeId, writeIds.writeToString());
+      } else {
+        // Make sure we pass in the names, so we can get the correct snapshot 
for rename table.
+        tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, tbl.getDbName(), 
tbl.getTableName(), true);
+      }
+    }
     if (tableSnapshot != null) {
       newTPart.getTPartition().setWriteId(tableSnapshot.getWriteId());
     }
@@ -2854,7 +2863,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
       environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, 
StatsSetupConst.TRUE);
     }
 
-    alterTable(tbl, false, environmentContext, true);
+    alterTable(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), tbl, 
false, environmentContext,
+            true, ((writeId == null) ? 0 : writeId));
 
     if (AcidUtils.isTransactionalTable(tbl)) {
       addWriteNotificationLog(tbl, null, newFiles, writeId);
@@ -2896,8 +2906,16 @@ private void constructOneLBLocationMap(FileStatus fSta,
     int size = addPartitionDesc.getPartitionCount();
     List<org.apache.hadoop.hive.metastore.api.Partition> in =
         new ArrayList<org.apache.hadoop.hive.metastore.api.Partition>(size);
-    AcidUtils.TableSnapshot tableSnapshot =
-        AcidUtils.getTableSnapshot(conf, tbl);
+    AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, 
tbl, true);
+    long writeId;
+    String validWriteIdList;
+    if (tableSnapshot != null && tableSnapshot.getWriteId() > 0) {
+      writeId = tableSnapshot.getWriteId();
+      validWriteIdList = tableSnapshot.getValidWriteIdList();
+    } else {
+      writeId = -1;
+      validWriteIdList = null;
+    }
     for (int i = 0; i < size; ++i) {
       org.apache.hadoop.hive.metastore.api.Partition tmpPart =
           convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), 
conf);
@@ -2941,7 +2959,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
           out.add(new Partition(tbl, outPart));
         }
         getMSC().alter_partitions(addPartitionDesc.getDbName(), 
addPartitionDesc.getTableName(),
-            partsToAlter, new EnvironmentContext(), null, -1);
+            partsToAlter, new EnvironmentContext(), validWriteIdList, writeId);
 
         for ( org.apache.hadoop.hive.metastore.api.Partition outPart :
         getMSC().getPartitionsByNames(addPartitionDesc.getDbName(), 
addPartitionDesc.getTableName(),part_names)){
@@ -4133,7 +4151,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
   public void recycleDirToCmPath(Path dataPath, boolean isPurge) throws 
HiveException {
     try {
       CmRecycleRequest request = new CmRecycleRequest(dataPath.toString(), 
isPurge);
-      getMSC().recycleDirToCmPath(request);
+      getSynchronizedMSC().recycleDirToCmPath(request);
     } catch (Exception e) {
       throw new HiveException(e);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
index dd75b32..156f755 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
@@ -96,6 +96,7 @@ public class EximUtil {
     private Logger LOG;
     private Context ctx;
     private DumpType eventType = DumpType.EVENT_UNKNOWN;
+    private Task<? extends Serializable> openTxnTask = null;
 
     public HiveConf getConf() {
       return conf;
@@ -146,6 +147,13 @@ public class EximUtil {
       this.LOG = LOG;
       this.ctx = ctx;
     }
+
+    public Task<? extends Serializable> getOpenTxnTask() {
+      return openTxnTask;
+    }
+    public void setOpenTxnTask(Task<? extends Serializable> openTxnTask) {
+      this.openTxnTask = openTxnTask;
+    }
   }
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index 16ce5d5..e82a102 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.ReplChangeManager;
 import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.exec.ReplCopyTask;
 import org.apache.hadoop.hive.ql.exec.Task;
@@ -60,9 +61,12 @@ import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
 import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc.LoadFileType;
 import org.apache.hadoop.hive.ql.plan.MoveWork;
+import org.apache.hadoop.hive.ql.plan.ReplTxnWork;
 import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.ql.util.HiveStrictManagedMigration;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.mapred.OutputFormat;
+import org.apache.thrift.TException;
 import org.datanucleus.util.StringUtils;
 import org.slf4j.Logger;
 
@@ -80,6 +84,7 @@ import java.util.Map;
 import java.util.TreeMap;
 
 import static 
org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_ENABLE_MOVE_OPTIMIZATION;
+import static 
org.apache.hadoop.hive.ql.util.HiveStrictManagedMigration.getHiveUpdater;
 
 /**
  * ImportSemanticAnalyzer.
@@ -190,6 +195,22 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
     }
   }
 
+  private static void 
upgradeTableDesc(org.apache.hadoop.hive.metastore.api.Table tableObj, MetaData 
rv,
+                                       EximUtil.SemanticAnalyzerWrapperContext 
x)
+          throws IOException, TException, HiveException {
+    x.getLOG().debug("Converting table " + tableObj.getTableName() + " of type 
" + tableObj.getTableType() +
+            " with para " + tableObj.getParameters());
+    //TODO : isPathOwnedByHive is hard coded to true, need to get it from repl 
dump metadata.
+    TableType tableType = TableType.valueOf(tableObj.getTableType());
+    HiveStrictManagedMigration.TableMigrationOption migrationOption =
+            
HiveStrictManagedMigration.determineMigrationTypeAutomatically(tableObj, 
tableType,
+                    null, x.getConf(), x.getHive().getMSC(), true);
+    HiveStrictManagedMigration.migrateTable(tableObj, tableType, 
migrationOption, false,
+            getHiveUpdater(x.getConf()), x.getHive().getMSC(), x.getConf());
+    x.getLOG().debug("Converted table " + tableObj.getTableName() + " of type 
" + tableObj.getTableType() +
+            " with para " + tableObj.getParameters());
+  }
+
   /**
    * The same code is used from both the "repl load" as well as "import".
    * Given that "repl load" now supports two modes "repl load dbName 
[location]" and
@@ -249,7 +270,24 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
     // Executed if relevant, and used to contain all the other details about 
the table if not.
     ImportTableDesc tblDesc;
     try {
-      tblDesc = getBaseCreateTableDescFromTable(dbname, rv.getTable());
+      org.apache.hadoop.hive.metastore.api.Table tblObj = rv.getTable();
+      // The table can be non acid in case of replication from a cluster with 
STRICT_MANAGED set to false.
+      if (!TxnUtils.isTransactionalTable(tblObj) && 
replicationSpec.isInReplicationScope() &&
+              
x.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STRICT_MANAGED_TABLES) &&
+              (TableType.valueOf(tblObj.getTableType()) == 
TableType.MANAGED_TABLE)) {
+        //TODO : dump metadata should be read to make sure that migration is 
required.
+        upgradeTableDesc(tblObj, rv, x);
+        //if the conversion is from non transactional to transactional table
+        if (TxnUtils.isTransactionalTable(tblObj)) {
+          replicationSpec.setMigratingToTxnTable();
+        }
+        tblDesc = getBaseCreateTableDescFromTable(dbname, tblObj);
+        if (TableType.valueOf(tblObj.getTableType()) == 
TableType.EXTERNAL_TABLE) {
+          tblDesc.setExternal(true);
+        }
+      } else {
+        tblDesc = getBaseCreateTableDescFromTable(dbname, tblObj);
+      }
     } catch (Exception e) {
       throw new HiveException(e);
     }
@@ -401,7 +439,7 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
     boolean isAutoPurge;
     boolean needRecycle;
 
-    if (replicationSpec.isInReplicationScope() &&
+    if (replicationSpec.isInReplicationScope() && 
!replicationSpec.isMigratingToTxnTable() &&
             
x.getCtx().getConf().getBoolean(REPL_ENABLE_MOVE_OPTIMIZATION.varname, false)) {
       lft = LoadFileType.IGNORE;
       destPath = loadPath = tgtPath;
@@ -430,7 +468,8 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
         lft = LoadFileType.KEEP_EXISTING;
       } else {
         destPath = loadPath = x.getCtx().getExternalTmpPath(tgtPath);
-        lft = replace ? LoadFileType.REPLACE_ALL : 
LoadFileType.OVERWRITE_EXISTING;
+        lft = replace ? LoadFileType.REPLACE_ALL :
+                replicationSpec.isMigratingToTxnTable() ? 
LoadFileType.KEEP_EXISTING : LoadFileType.OVERWRITE_EXISTING;
       }
       needRecycle = false;
       isAutoPurge = false;
@@ -457,7 +496,8 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
     MoveWork moveWork = new MoveWork(x.getInputs(), x.getOutputs(), null, 
null, false);
 
 
-    if (replicationSpec.isInReplicationScope() && 
AcidUtils.isTransactionalTable(table)) {
+    if (replicationSpec.isInReplicationScope() && 
AcidUtils.isTransactionalTable(table) &&
+            !replicationSpec.isMigratingToTxnTable()) {
       LoadMultiFilesDesc loadFilesWork = new LoadMultiFilesDesc(
               Collections.singletonList(destPath),
               Collections.singletonList(tgtPath),
@@ -467,6 +507,9 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
     } else {
       LoadTableDesc loadTableWork = new LoadTableDesc(
               loadPath, Utilities.getTableDesc(table), new TreeMap<>(), lft, 
writeId);
+      if (replicationSpec.isMigratingToTxnTable()) {
+        loadTableWork.setInsertOverwrite(replace);
+      }
       loadTableWork.setStmtId(stmtId);
       moveWork.setLoadTableWork(loadTableWork);
     }
@@ -543,7 +586,7 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
 
       LoadFileType loadFileType;
       Path destPath;
-      if (replicationSpec.isInReplicationScope() &&
+      if (replicationSpec.isInReplicationScope() && 
!replicationSpec.isMigratingToTxnTable() &&
               
x.getCtx().getConf().getBoolean(REPL_ENABLE_MOVE_OPTIMIZATION.varname, false)) {
         loadFileType = LoadFileType.IGNORE;
         destPath =  tgtLocation;
@@ -555,7 +598,9 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
           needRecycle = db != null && 
ReplChangeManager.isSourceOfReplication(db);
         }
       } else {
-        loadFileType = replicationSpec.isReplace() ? LoadFileType.REPLACE_ALL 
: LoadFileType.OVERWRITE_EXISTING;
+        loadFileType = replicationSpec.isReplace() ?
+                LoadFileType.REPLACE_ALL :
+                replicationSpec.isMigratingToTxnTable() ? 
LoadFileType.KEEP_EXISTING : LoadFileType.OVERWRITE_EXISTING;
         //Replication scope the write id will be invalid
         Boolean useStagingDirectory = 
!AcidUtils.isTransactionalTable(table.getParameters()) ||
                 replicationSpec.isInReplicationScope();
@@ -565,7 +610,8 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
         needRecycle = false;
       }
 
-      Path moveTaskSrc =  
!AcidUtils.isTransactionalTable(table.getParameters()) ? destPath : tgtLocation;
+      Path moveTaskSrc =  
!AcidUtils.isTransactionalTable(table.getParameters()) ||
+              replicationSpec.isInReplicationScope() ? destPath : tgtLocation;
       if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
         Utilities.FILE_OP_LOGGER.trace("adding import work for partition with 
source location: "
           + srcLocation + "; target: " + tgtLocation + "; copy dest " + 
destPath + "; mm "
@@ -592,7 +638,8 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
 
       // Note: this sets LoadFileType incorrectly for ACID; is that relevant 
for import?
       //       See setLoadFileType and setIsAcidIow calls elsewhere for an 
example.
-      if (replicationSpec.isInReplicationScope() && 
AcidUtils.isTransactionalTable(tblDesc.getTblProps())) {
+      if (replicationSpec.isInReplicationScope() && 
AcidUtils.isTransactionalTable(tblDesc.getTblProps()) &&
+              !replicationSpec.isMigratingToTxnTable()) {
         LoadMultiFilesDesc loadFilesWork = new LoadMultiFilesDesc(
                 Collections.singletonList(destPath),
                 Collections.singletonList(tgtLocation),
@@ -604,6 +651,9 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
                 partSpec.getPartSpec(),
                 loadFileType,
                 writeId);
+        if (replicationSpec.isMigratingToTxnTable()) {
+          loadTableWork.setInsertOverwrite(replicationSpec.isReplace());
+        }
         loadTableWork.setStmtId(stmtId);
         loadTableWork.setInheritTableSpecs(false);
         moveWork.setLoadTableWork(loadTableWork);
@@ -1057,6 +1107,11 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
                           tblDesc.getDatabaseName(),
                           tblDesc.getTableName(),
                           null);
+      if (replicationSpec.isMigratingToTxnTable()) {
+        x.setOpenTxnTask(TaskFactory.get(new 
ReplTxnWork(tblDesc.getDatabaseName(),
+                tblDesc.getTableName(), 
ReplTxnWork.OperationType.REPL_MIGRATION_OPEN_TXN), x.getConf()));
+        updatedMetadata.setNeedCommitTxn(true);
+      }
     }
 
     if (tblDesc.getLocation() == null) {
@@ -1100,7 +1155,7 @@ public class ImportSemanticAnalyzer extends 
BaseSemanticAnalyzer {
           }
         } else {
           x.getLOG().debug("adding dependent CopyWork/MoveWork for table");
-          t.addDependentTask(loadTable(fromURI, table, true, new 
Path(tblDesc.getLocation()), replicationSpec, x, writeId, stmtId));
+          t.addDependentTask(loadTable(fromURI, table, 
replicationSpec.isReplace(), new Path(tblDesc.getLocation()), replicationSpec, 
x, writeId, stmtId));
         }
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
index f1fcd6e..1ebbb82 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
@@ -22,6 +22,7 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.ReplChangeManager;
 import org.apache.hadoop.hive.metastore.Warehouse;
@@ -95,6 +96,9 @@ public class ReplicationSemanticAnalyzer extends 
BaseSemanticAnalyzer {
   public void analyzeInternal(ASTNode ast) throws SemanticException {
     LOG.debug("ReplicationSemanticAanalyzer: analyzeInternal");
     LOG.debug(ast.getName() + ":" + ast.getToken().getText() + "=" + 
ast.getText());
+    // Some of the txn related configs were not set when 
ReplicationSemanticAnalyzer.conf was initialized.
+    // It should be set first.
+    setTxnConfigs();
     switch (ast.getToken().getType()) {
       case TOK_REPL_DUMP: {
         LOG.debug("ReplicationSemanticAnalyzer: analyzeInternal: dump");
@@ -124,6 +128,13 @@ public class ReplicationSemanticAnalyzer extends 
BaseSemanticAnalyzer {
     }
   }
 
+  private void setTxnConfigs() {
+    String validTxnList = 
queryState.getConf().get(ValidTxnList.VALID_TXNS_KEY);
+    if (validTxnList != null) {
+      conf.set(ValidTxnList.VALID_TXNS_KEY, validTxnList);
+    }
+  }
+
   private void initReplDump(ASTNode ast) throws HiveException {
     int numChildren = ast.getChildCount();
     boolean isMetaDataOnly = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
index 3115e83..39009ce 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
@@ -47,6 +47,7 @@ public class ReplicationSpec {
   //TxnIds snapshot
   private String validTxnList = null;
   private Type specType = Type.DEFAULT; // DEFAULT means REPL_LOAD or 
BOOTSTRAP_DUMP or EXPORT
+  private boolean isMigratingToTxnTable = false;
 
   // Key definitions related to replication
   public enum KEY {
@@ -402,4 +403,11 @@ public class ReplicationSpec {
       return SCOPE.NO_REPL;
     }
   }
+
+  public boolean isMigratingToTxnTable() {
+    return isMigratingToTxnTable;
+  }
+  public void setMigratingToTxnTable() {
+    isMigratingToTxnTable = true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/UpdatedMetaDataTracker.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/UpdatedMetaDataTracker.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/UpdatedMetaDataTracker.java
index 614e071..1f20698 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/UpdatedMetaDataTracker.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/UpdatedMetaDataTracker.java
@@ -71,12 +71,21 @@ public class UpdatedMetaDataTracker {
 
   private List<UpdateMetaData> updateMetaDataList;
   private Map<String, Integer> updateMetaDataMap;
+  private boolean needCommitTxn = false;
 
   public UpdatedMetaDataTracker() {
     updateMetaDataList = new ArrayList<>();
     updateMetaDataMap = new HashMap<>();
   }
 
+  public void setNeedCommitTxn(boolean needCommitTxn) {
+    this.needCommitTxn = needCommitTxn;
+  }
+
+  public boolean isNeedCommitTxn() {
+    return needCommitTxn;
+  }
+
   public void copyUpdatedMetadata(UpdatedMetaDataTracker other) {
     int size = updateMetaDataList.size();
     for (UpdateMetaData updateMetaDataOther : other.updateMetaDataList) {
@@ -93,6 +102,7 @@ public class UpdatedMetaDataTracker {
         }
       }
     }
+    this.needCommitTxn = other.needCommitTxn;
   }
 
   public void set(String replState, String dbName, String tableName, Map 
<String, String> partSpec)

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java
index 4a2fdd2..b95a35a 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java
@@ -41,8 +41,7 @@ public class DropPartitionHandler extends 
AbstractMessageHandler {
       String actualDbName = context.isDbNameEmpty() ? msg.getDB() : 
context.dbName;
       String actualTblName = context.isTableNameEmpty() ? msg.getTable() : 
context.tableName;
       Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs =
-          ReplUtils.genPartSpecs(new Table(msg.getTableObj()),
-              msg.getPartitions());
+          ReplUtils.genPartSpecs(new Table(msg.getTableObj()), 
msg.getPartitions());
       if (partSpecs.size() > 0) {
         DropTableDesc dropPtnDesc = new DropTableDesc(actualDbName + "." + 
actualTblName,
             partSpecs, null, true, context.eventOnlyReplicationSpec());

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java
index 0035026..1125f69 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hive.ql.parse.repl.load.message;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
@@ -46,10 +45,9 @@ public class RenamePartitionHandler extends 
AbstractMessageHandler {
     Map<String, String> oldPartSpec = new LinkedHashMap<>();
     String tableName = actualDbName + "." + actualTblName;
     try {
-      Table tblObj = msg.getTableObj();
       Iterator<String> beforeIterator = 
msg.getPtnObjBefore().getValuesIterator();
       Iterator<String> afterIterator = 
msg.getPtnObjAfter().getValuesIterator();
-      for (FieldSchema fs : tblObj.getPartitionKeys()) {
+      for (FieldSchema fs : msg.getTableObj().getPartitionKeys()) {
         oldPartSpec.put(fs.getName(), beforeIterator.next());
         newPartSpec.put(fs.getName(), afterIterator.next());
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java
index 83433d7..ddf2ca1 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenameTableHandler.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.parse.repl.load.message;
 
+import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
@@ -40,8 +41,10 @@ public class RenameTableHandler extends 
AbstractMessageHandler {
           "RENAMES of tables are not supported for table-level replication");
     }
     try {
-      String oldDbName = msg.getTableObjBefore().getDbName();
-      String newDbName = msg.getTableObjAfter().getDbName();
+      Table tableObjBefore = msg.getTableObjBefore();
+      Table tableObjAfter = msg.getTableObjAfter();
+      String oldDbName = tableObjBefore.getDbName();
+      String newDbName = tableObjAfter.getDbName();
 
       if (!context.isDbNameEmpty()) {
         // If we're loading into a db, instead of into the warehouse, then the 
oldDbName and
@@ -56,8 +59,8 @@ public class RenameTableHandler extends 
AbstractMessageHandler {
         }
       }
 
-      String oldName = StatsUtils.getFullyQualifiedTableName(oldDbName, 
msg.getTableObjBefore().getTableName());
-      String newName = StatsUtils.getFullyQualifiedTableName(newDbName, 
msg.getTableObjAfter().getTableName());
+      String oldName = StatsUtils.getFullyQualifiedTableName(oldDbName, 
tableObjBefore.getTableName());
+      String newName = StatsUtils.getFullyQualifiedTableName(newDbName, 
tableObjAfter.getTableName());
       AlterTableDesc renameTableDesc = new AlterTableDesc(
               oldName, newName, false, context.eventOnlyReplicationSpec());
       Task<DDLWork> renameTableTask = TaskFactory.get(
@@ -67,7 +70,7 @@ public class RenameTableHandler extends 
AbstractMessageHandler {
 
       // oldDbName and newDbName *will* be the same if we're here
       updatedMetadata.set(context.dmd.getEventTo().toString(), newDbName,
-              msg.getTableObjAfter().getTableName(), null);
+              tableObjAfter.getTableName(), null);
 
       // Note : edge-case here in interaction with table-level REPL LOAD, 
where that nukes out
       // tablesUpdated. However, we explicitly don't support repl of that 
sort, and error out above

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java
index 3fb18d8..f5f4459 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TableHandler.java
@@ -21,11 +21,14 @@ import 
org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
 import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.parse.EximUtil;
+import org.apache.hadoop.hive.ql.exec.TaskFactory;
 import org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.ReplTxnWork;
 
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 
 import static 
org.apache.hadoop.hive.ql.parse.repl.DumpType.EVENT_ALTER_PARTITION;
@@ -59,6 +62,14 @@ public class TableHandler extends AbstractMessageHandler {
           (context.precursor != null), null, context.tableName, context.dbName,
           null, context.location, x, updatedMetadata, context.getTxnMgr(), 
writeId);
 
+      Task<? extends Serializable> openTxnTask = x.getOpenTxnTask();
+      if (openTxnTask != null && !importTasks.isEmpty()) {
+        for (Task<? extends Serializable> t : importTasks) {
+          openTxnTask.addDependentTask(t);
+        }
+        importTasks.add(openTxnTask);
+      }
+
       return importTasks;
     } catch (Exception e) {
       throw new SemanticException(e);

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java
index 0d5ac31..dec6ed5 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java
@@ -21,12 +21,12 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
 import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
 
 import java.io.Serializable;
-import java.util.Collections;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
@@ -40,8 +40,9 @@ public class TruncatePartitionHandler extends 
AbstractMessageHandler {
     String actualTblName = context.isTableNameEmpty() ? msg.getTable() : 
context.tableName;
 
     Map<String, String> partSpec = new LinkedHashMap<>();
+    org.apache.hadoop.hive.metastore.api.Table tblObj;
     try {
-      org.apache.hadoop.hive.metastore.api.Table tblObj = msg.getTableObj();
+      tblObj = msg.getTableObj();
       Iterator<String> afterIterator = 
msg.getPtnObjAfter().getValuesIterator();
       for (FieldSchema fs : tblObj.getPartitionKeys()) {
         partSpec.put(fs.getName(), afterIterator.next());
@@ -63,6 +64,12 @@ public class TruncatePartitionHandler extends 
AbstractMessageHandler {
     context.log.debug("Added truncate ptn task : {}:{}:{}", 
truncatePtnTask.getId(),
         truncateTableDesc.getTableName(), truncateTableDesc.getWriteId());
     updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, 
actualTblName, partSpec);
-    return Collections.singletonList(truncatePtnTask);
+
+    try {
+      return ReplUtils.addOpenTxnTaskForMigration(actualDbName, actualTblName,
+              context.hiveConf, updatedMetadata, truncatePtnTask, tblObj);
+    } catch (Exception e) {
+      throw new SemanticException(e.getMessage());
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java
index d18a9e1..f037cbb 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java
@@ -20,12 +20,12 @@ package org.apache.hadoop.hive.ql.parse.repl.load.message;
 import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskFactory;
+import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.DDLWork;
 import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
 
 import java.io.Serializable;
-import java.util.Collections;
 import java.util.List;
 
 public class TruncateTableHandler extends AbstractMessageHandler {
@@ -45,6 +45,12 @@ public class TruncateTableHandler extends 
AbstractMessageHandler {
     context.log.debug("Added truncate tbl task : {}:{}:{}", 
truncateTableTask.getId(),
         truncateTableDesc.getTableName(), truncateTableDesc.getWriteId());
     updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, 
actualTblName, null);
-    return Collections.singletonList(truncateTableTask);
+
+    try {
+      return ReplUtils.addOpenTxnTaskForMigration(actualDbName, actualTblName,
+              context.hiveConf, updatedMetadata, truncateTableTask, 
msg.getTableObjBefore());
+    } catch (Exception e) {
+      throw new SemanticException(e.getMessage());
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
index d9333b5..bed0581 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java
@@ -253,6 +253,10 @@ public class LoadTableDesc extends LoadDesc implements 
Serializable {
     return currentWriteId == null ? 0 : currentWriteId;
   }
 
+  public void setWriteId(long writeId) {
+    currentWriteId = writeId;
+  }
+
   public int getStmtId() {
     return stmtId;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java
index a6ab836..a9f98cc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ReplTxnWork.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.io.Serializable;
-
+import org.apache.hadoop.hive.metastore.api.ReplLastIdInfo;
 import org.apache.hadoop.hive.metastore.api.TxnToWriteId;
 import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
@@ -43,13 +43,15 @@ public class ReplTxnWork implements Serializable {
   private List<TxnToWriteId> txnToWriteIdList;
   private ReplicationSpec replicationSpec;
   private List<WriteEventInfo> writeEventInfos;
+  private ReplLastIdInfo replLastIdInfo;
 
   /**
    * OperationType.
    * Different kind of events supported for replaying.
    */
   public enum OperationType {
-    REPL_OPEN_TXN, REPL_ABORT_TXN, REPL_COMMIT_TXN, REPL_ALLOC_WRITE_ID, 
REPL_WRITEID_STATE
+    REPL_OPEN_TXN, REPL_ABORT_TXN, REPL_COMMIT_TXN, REPL_ALLOC_WRITE_ID, 
REPL_WRITEID_STATE,
+    REPL_MIGRATION_OPEN_TXN, REPL_MIGRATION_COMMIT_TXN
   }
 
   OperationType operation;
@@ -64,6 +66,7 @@ public class ReplTxnWork implements Serializable {
     this.txnToWriteIdList = txnToWriteIdList;
     this.replicationSpec = replicationSpec;
     this.writeEventInfos = null;
+    this.replLastIdInfo = null;
   }
 
   public ReplTxnWork(String replPolicy, String dbName, String tableName, 
List<Long> txnIds, OperationType type,
@@ -90,6 +93,17 @@ public class ReplTxnWork implements Serializable {
     this.operation = type;
   }
 
+  public ReplTxnWork(String dbName, String tableName, OperationType type) {
+    this(null, dbName, tableName, null, type, null, null);
+    assert type == OperationType.REPL_MIGRATION_OPEN_TXN;
+  }
+
+  public ReplTxnWork(ReplLastIdInfo replLastIdInfo, OperationType type) {
+    this(null, null, null, null, type, null, null);
+    assert type == OperationType.REPL_MIGRATION_COMMIT_TXN;
+    this.replLastIdInfo = replLastIdInfo;
+  }
+
   public void addWriteEventInfo(WriteEventInfo writeEventInfo) {
     if (this.writeEventInfos == null) {
       this.writeEventInfos = new ArrayList<>();
@@ -136,4 +150,8 @@ public class ReplTxnWork implements Serializable {
   public List<WriteEventInfo> getWriteEventInfos() {
     return writeEventInfos;
   }
+
+  public ReplLastIdInfo getReplLastIdInfo() {
+    return replLastIdInfo;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java 
b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java
index 13a8af7..9535bed 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java
@@ -77,7 +77,7 @@ public class HiveStrictManagedMigration {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(HiveStrictManagedMigration.class);
 
-  enum TableMigrationOption {
+  public enum TableMigrationOption {
     NONE,      // Do nothing
     VALIDATE,  // No migration, just validate that the tables
     AUTOMATIC, // Automatically determine if the table should be managed or 
external
@@ -428,6 +428,32 @@ public class HiveStrictManagedMigration {
     }
   }
 
+  public static boolean migrateTable(Table tableObj, TableType tableType, 
TableMigrationOption migrationOption,
+                                     boolean dryRun, HiveUpdater hiveUpdater, 
IMetaStoreClient hms, Configuration conf)
+          throws HiveException, IOException, TException {
+    switch (migrationOption) {
+      case EXTERNAL:
+        migrateToExternalTable(tableObj, tableType, dryRun, hiveUpdater);
+        break;
+      case MANAGED:
+        migrateToManagedTable(tableObj, tableType, dryRun, hiveUpdater, hms, 
conf);
+        break;
+      case NONE:
+        break;
+      case VALIDATE:
+        // Check that the table is valid under strict managed tables mode.
+        String reason = 
HiveStrictManagedUtils.validateStrictManagedTable(conf, tableObj);
+        if (reason != null) {
+          LOG.warn(reason);
+          return true;
+        }
+        break;
+      default:
+        throw new IllegalArgumentException("Unexpected table migration option 
" + migrationOption);
+    }
+    return false;
+  }
+
   void processTable(Database dbObj, String tableName, boolean 
modifyDefaultManagedLocation)
       throws HiveException, IOException, TException {
     String dbName = dbObj.getName();
@@ -435,41 +461,16 @@ public class HiveStrictManagedMigration {
 
     Table tableObj = hms.getTable(dbName, tableName);
     TableType tableType = TableType.valueOf(tableObj.getTableType());
-    boolean tableMigrated;
 
     TableMigrationOption migrationOption = runOptions.migrationOption;
     if (migrationOption == TableMigrationOption.AUTOMATIC) {
-      migrationOption = determineMigrationTypeAutomatically(tableObj, 
tableType);
+      migrationOption = determineMigrationTypeAutomatically(tableObj, 
tableType, ownerName, conf, hms, null);
     }
 
-    switch (migrationOption) {
-    case EXTERNAL:
-      tableMigrated = migrateToExternalTable(tableObj, tableType);
-      if (tableMigrated) {
-        tableType = TableType.EXTERNAL_TABLE;
-      }
-      break;
-    case MANAGED:
-      tableMigrated = migrateToManagedTable(tableObj, tableType);
-      if (tableMigrated) {
-        tableType = TableType.MANAGED_TABLE;
-      }
-      break;
-    case NONE:
-      break;
-    case VALIDATE:
-      // Check that the table is valid under strict managed tables mode.
-      String reason = HiveStrictManagedUtils.validateStrictManagedTable(conf, 
tableObj);
-      if (reason != null) {
-        LOG.warn(reason);
-        failedValidationChecks = true;
-      }
-      break;
-    default:
-      throw new IllegalArgumentException("Unexpected table migration option " 
+ runOptions.migrationOption);
-    }
+    failedValidationChecks = migrateTable(tableObj, tableType, 
migrationOption, runOptions.dryRun,
+            getHiveUpdater(), hms, conf);
 
-    if (tableType == TableType.MANAGED_TABLE) {
+    if (!failedValidationChecks && (TableType.valueOf(tableObj.getTableType()) 
== TableType.MANAGED_TABLE)) {
       Path tablePath = new Path(tableObj.getSd().getLocation());
       if (modifyDefaultManagedLocation && shouldModifyTableLocation(dbObj, 
tableObj)) {
         Path newTablePath = wh.getDnsPath(
@@ -623,7 +624,8 @@ public class HiveStrictManagedMigration {
     }
   }
 
-  void renameFilesToConformToAcid(Table tableObj) throws IOException, 
TException {
+  static void renameFilesToConformToAcid(Table tableObj, IMetaStoreClient hms, 
Configuration conf, boolean dryRun)
+          throws IOException, TException {
     if (isPartitionedTable(tableObj)) {
       String dbName = tableObj.getDbName();
       String tableName = tableObj.getTableName();
@@ -634,7 +636,7 @@ public class HiveStrictManagedMigration {
         FileSystem fs = partPath.getFileSystem(conf);
         if (fs.exists(partPath)) {
           UpgradeTool.handleRenameFiles(tableObj, partPath,
-              !runOptions.dryRun, conf, tableObj.getSd().getBucketColsSize() > 
0, null);
+              !dryRun, conf, tableObj.getSd().getBucketColsSize() > 0, null);
         }
       }
     } else {
@@ -642,12 +644,13 @@ public class HiveStrictManagedMigration {
       FileSystem fs = tablePath.getFileSystem(conf);
       if (fs.exists(tablePath)) {
         UpgradeTool.handleRenameFiles(tableObj, tablePath,
-            !runOptions.dryRun, conf, tableObj.getSd().getBucketColsSize() > 
0, null);
+            !dryRun, conf, tableObj.getSd().getBucketColsSize() > 0, null);
       }
     }
   }
 
-  TableMigrationOption determineMigrationTypeAutomatically(Table tableObj, 
TableType tableType)
+  public static TableMigrationOption determineMigrationTypeAutomatically(Table 
tableObj, TableType tableType,
+     String ownerName, Configuration conf, IMetaStoreClient hms, Boolean 
isPathOwnedByHive)
       throws IOException, MetaException, TException {
     TableMigrationOption result = TableMigrationOption.NONE;
     String msg;
@@ -657,7 +660,7 @@ public class HiveStrictManagedMigration {
         // Always keep transactional tables as managed tables.
         result = TableMigrationOption.MANAGED;
       } else {
-        String reason = shouldTableBeExternal(tableObj);
+        String reason = shouldTableBeExternal(tableObj, ownerName, conf, hms, 
isPathOwnedByHive);
         if (reason != null) {
           LOG.debug("Converting {} to external table. {}", 
getQualifiedName(tableObj), reason);
           result = TableMigrationOption.EXTERNAL;
@@ -697,7 +700,8 @@ public class HiveStrictManagedMigration {
     convertToMMTableProps.put("transactional_properties", "insert_only");
   }
 
-  boolean migrateToExternalTable(Table tableObj, TableType tableType) throws 
HiveException {
+  static boolean migrateToExternalTable(Table tableObj, TableType tableType, 
boolean dryRun, HiveUpdater hiveUpdater)
+          throws HiveException {
     String msg;
     switch (tableType) {
     case MANAGED_TABLE:
@@ -708,9 +712,9 @@ public class HiveStrictManagedMigration {
         return false;
       }
       LOG.info("Converting {} to external table ...", 
getQualifiedName(tableObj));
-      if (!runOptions.dryRun) {
+      if (!dryRun) {
         tableObj.setTableType(TableType.EXTERNAL_TABLE.toString());
-        getHiveUpdater().updateTableProperties(tableObj, 
convertToExternalTableProps);
+        hiveUpdater.updateTableProperties(tableObj, 
convertToExternalTableProps);
       }
       return true;
     case EXTERNAL_TABLE:
@@ -727,13 +731,13 @@ public class HiveStrictManagedMigration {
     return false;
   }
 
-  boolean canTableBeFullAcid(Table tableObj) throws MetaException {
+  static boolean canTableBeFullAcid(Table tableObj) throws MetaException {
     // Table must be acid-compatible table format, and no sorting columns.
     return TransactionalValidationListener.conformToAcid(tableObj) &&
         (tableObj.getSd().getSortColsSize() <= 0);
   }
 
-  Map<String, String> getTablePropsForConversionToTransactional(Map<String, 
String> props,
+  static Map<String, String> 
getTablePropsForConversionToTransactional(Map<String, String> props,
       boolean convertFromExternal) {
     if (convertFromExternal) {
       // Copy the properties to a new map so we can add EXTERNAL=FALSE
@@ -743,7 +747,9 @@ public class HiveStrictManagedMigration {
     return props;
   }
 
-  boolean migrateToManagedTable(Table tableObj, TableType tableType) throws 
HiveException, IOException, MetaException, TException {
+  static boolean migrateToManagedTable(Table tableObj, TableType tableType, 
boolean dryRun, HiveUpdater hiveUpdater,
+                                       IMetaStoreClient hms, Configuration 
conf)
+          throws HiveException, IOException, MetaException, TException {
 
     boolean convertFromExternal = false;
     switch (tableType) {
@@ -784,20 +790,22 @@ public class HiveStrictManagedMigration {
         // TODO: option to allow converting ORC file to insert-only 
transactional?
         LOG.info("Converting {} to full transactional table", 
getQualifiedName(tableObj));
 
-        renameFilesToConformToAcid(tableObj);
+        if (hiveUpdater.doFileRename) {
+          renameFilesToConformToAcid(tableObj, hms, conf, dryRun);
+        }
 
-        if (!runOptions.dryRun) {
+        if (!dryRun) {
           Map<String, String> props = 
getTablePropsForConversionToTransactional(
               convertToAcidTableProps, convertFromExternal);
-          getHiveUpdater().updateTableProperties(tableObj, props);
+          hiveUpdater.updateTableProperties(tableObj, props);
         }
         return true;
       } else {
         LOG.info("Converting {} to insert-only transactional table", 
getQualifiedName(tableObj));
-        if (!runOptions.dryRun) {
+        if (!dryRun) {
           Map<String, String> props = 
getTablePropsForConversionToTransactional(
               convertToMMTableProps, convertFromExternal);
-          getHiveUpdater().updateTableProperties(tableObj, props);
+          hiveUpdater.updateTableProperties(tableObj, props);
         }
         return true;
       }
@@ -809,7 +817,9 @@ public class HiveStrictManagedMigration {
     }
   }
 
-  String shouldTableBeExternal(Table tableObj) throws IOException, 
MetaException, TException {
+  static String shouldTableBeExternal(Table tableObj, String ownerName, 
Configuration conf,
+                                      IMetaStoreClient hms, Boolean 
isPathOwnedByHive)
+          throws IOException, MetaException, TException {
     if (MetaStoreUtils.isNonNativeTable(tableObj)) {
       return "Table is a non-native (StorageHandler) table";
     }
@@ -824,14 +834,19 @@ public class HiveStrictManagedMigration {
     // then assume table is using storage-based auth - set external.
     // Transactional tables should still remain transactional,
     // but we should have already checked for that before this point.
-    if (shouldTablePathBeExternal(tableObj, ownerName)) {
+    if (isPathOwnedByHive != null) {
+      // for replication flow, the path ownership must be verified at source 
cluster itself.
+      return isPathOwnedByHive ? null :
+              String.format("One or more table directories is not owned by 
hive or non-HDFS path at source cluster");
+    } else if (shouldTablePathBeExternal(tableObj, ownerName, conf, hms)) {
       return String.format("One or more table directories not owned by %s, or 
non-HDFS path", ownerName);
     }
 
     return null;
   }
 
-  boolean shouldTablePathBeExternal(Table tableObj, String userName) throws 
IOException, MetaException, TException {
+  static boolean shouldTablePathBeExternal(Table tableObj, String ownerName, 
Configuration conf, IMetaStoreClient hms)
+          throws IOException, TException {
     boolean shouldBeExternal = false;
     String dbName = tableObj.getDbName();
     String tableName = tableObj.getTableName();
@@ -876,9 +891,13 @@ public class HiveStrictManagedMigration {
     }
   }
 
+  public static HiveUpdater getHiveUpdater(HiveConf conf) throws HiveException 
{
+    return new HiveUpdater(conf, false);
+  }
+
   HiveUpdater getHiveUpdater() throws HiveException {
     if (hiveUpdater == null) {
-      hiveUpdater = new HiveUpdater();
+      hiveUpdater = new HiveUpdater(conf, true);
     }
     return hiveUpdater;
   }
@@ -895,12 +914,14 @@ public class HiveStrictManagedMigration {
     }
   }
 
-  class HiveUpdater {
+  public static class HiveUpdater {
     Hive hive;
+    boolean doFileRename;
 
-    HiveUpdater() throws HiveException {
+    HiveUpdater(HiveConf conf, boolean fileRename) throws HiveException {
       hive = Hive.get(conf);
       Hive.set(hive);
+      doFileRename = fileRename;
     }
 
     void close() {
@@ -1041,15 +1062,19 @@ public class HiveStrictManagedMigration {
     void updateTableProperties(Table table, Map<String, String> props) throws 
HiveException {
       StringBuilder sb = new StringBuilder();
       boolean isTxn = TxnUtils.isTransactionalTable(table);
-      org.apache.hadoop.hive.ql.metadata.Table modifiedTable =
-          new org.apache.hadoop.hive.ql.metadata.Table(table);
+      org.apache.hadoop.hive.ql.metadata.Table modifiedTable = doFileRename ?
+          new org.apache.hadoop.hive.ql.metadata.Table(table) : null;
       if (props.size() == 0) {
         return;
       }
       boolean first = true;
       for (String key : props.keySet()) {
         String value = props.get(key);
-        modifiedTable.getParameters().put(key, value);
+        if (modifiedTable == null) {
+          table.getParameters().put(key, value);
+        } else {
+          modifiedTable.getParameters().put(key, value);
+        }
 
         // Build properties list for logging
         if (first) {
@@ -1069,7 +1094,9 @@ public class HiveStrictManagedMigration {
 
       // Note: for now, this is always called to convert the table to either 
external, or ACID/MM,
       //       so the original table would be non-txn and the transaction 
wouldn't be opened.
-      alterTableInternal(isTxn, table, modifiedTable);
+      if (modifiedTable != null) {
+        alterTableInternal(isTxn, table, modifiedTable);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
index 45618e7..9c33229 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
@@ -816,13 +816,13 @@ import org.slf4j.LoggerFactory;
           case 5: // PARTITIONNAMES
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list716 = 
iprot.readListBegin();
-                struct.partitionnames = new ArrayList<String>(_list716.size);
-                String _elem717;
-                for (int _i718 = 0; _i718 < _list716.size; ++_i718)
+                org.apache.thrift.protocol.TList _list724 = 
iprot.readListBegin();
+                struct.partitionnames = new ArrayList<String>(_list724.size);
+                String _elem725;
+                for (int _i726 = 0; _i726 < _list724.size; ++_i726)
                 {
-                  _elem717 = iprot.readString();
-                  struct.partitionnames.add(_elem717);
+                  _elem725 = iprot.readString();
+                  struct.partitionnames.add(_elem725);
                 }
                 iprot.readListEnd();
               }
@@ -872,9 +872,9 @@ import org.slf4j.LoggerFactory;
         oprot.writeFieldBegin(PARTITIONNAMES_FIELD_DESC);
         {
           oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.partitionnames.size()));
-          for (String _iter719 : struct.partitionnames)
+          for (String _iter727 : struct.partitionnames)
           {
-            oprot.writeString(_iter719);
+            oprot.writeString(_iter727);
           }
           oprot.writeListEnd();
         }
@@ -910,9 +910,9 @@ import org.slf4j.LoggerFactory;
       oprot.writeString(struct.tablename);
       {
         oprot.writeI32(struct.partitionnames.size());
-        for (String _iter720 : struct.partitionnames)
+        for (String _iter728 : struct.partitionnames)
         {
-          oprot.writeString(_iter720);
+          oprot.writeString(_iter728);
         }
       }
       BitSet optionals = new BitSet();
@@ -937,13 +937,13 @@ import org.slf4j.LoggerFactory;
       struct.tablename = iprot.readString();
       struct.setTablenameIsSet(true);
       {
-        org.apache.thrift.protocol.TList _list721 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
-        struct.partitionnames = new ArrayList<String>(_list721.size);
-        String _elem722;
-        for (int _i723 = 0; _i723 < _list721.size; ++_i723)
+        org.apache.thrift.protocol.TList _list729 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
+        struct.partitionnames = new ArrayList<String>(_list729.size);
+        String _elem730;
+        for (int _i731 = 0; _i731 < _list729.size; ++_i731)
         {
-          _elem722 = iprot.readString();
-          struct.partitionnames.add(_elem722);
+          _elem730 = iprot.readString();
+          struct.partitionnames.add(_elem730);
         }
       }
       struct.setPartitionnamesIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
index 5fcb98f..d05e7ba 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
@@ -716,13 +716,13 @@ import org.slf4j.LoggerFactory;
           case 3: // TXN_IDS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list642 = 
iprot.readListBegin();
-                struct.txnIds = new ArrayList<Long>(_list642.size);
-                long _elem643;
-                for (int _i644 = 0; _i644 < _list642.size; ++_i644)
+                org.apache.thrift.protocol.TList _list650 = 
iprot.readListBegin();
+                struct.txnIds = new ArrayList<Long>(_list650.size);
+                long _elem651;
+                for (int _i652 = 0; _i652 < _list650.size; ++_i652)
                 {
-                  _elem643 = iprot.readI64();
-                  struct.txnIds.add(_elem643);
+                  _elem651 = iprot.readI64();
+                  struct.txnIds.add(_elem651);
                 }
                 iprot.readListEnd();
               }
@@ -742,14 +742,14 @@ import org.slf4j.LoggerFactory;
           case 5: // SRC_TXN_TO_WRITE_ID_LIST
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list645 = 
iprot.readListBegin();
-                struct.srcTxnToWriteIdList = new 
ArrayList<TxnToWriteId>(_list645.size);
-                TxnToWriteId _elem646;
-                for (int _i647 = 0; _i647 < _list645.size; ++_i647)
+                org.apache.thrift.protocol.TList _list653 = 
iprot.readListBegin();
+                struct.srcTxnToWriteIdList = new 
ArrayList<TxnToWriteId>(_list653.size);
+                TxnToWriteId _elem654;
+                for (int _i655 = 0; _i655 < _list653.size; ++_i655)
                 {
-                  _elem646 = new TxnToWriteId();
-                  _elem646.read(iprot);
-                  struct.srcTxnToWriteIdList.add(_elem646);
+                  _elem654 = new TxnToWriteId();
+                  _elem654.read(iprot);
+                  struct.srcTxnToWriteIdList.add(_elem654);
                 }
                 iprot.readListEnd();
               }
@@ -786,9 +786,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(TXN_IDS_FIELD_DESC);
           {
             oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, 
struct.txnIds.size()));
-            for (long _iter648 : struct.txnIds)
+            for (long _iter656 : struct.txnIds)
             {
-              oprot.writeI64(_iter648);
+              oprot.writeI64(_iter656);
             }
             oprot.writeListEnd();
           }
@@ -807,9 +807,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SRC_TXN_TO_WRITE_ID_LIST_FIELD_DESC);
           {
             oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, 
struct.srcTxnToWriteIdList.size()));
-            for (TxnToWriteId _iter649 : struct.srcTxnToWriteIdList)
+            for (TxnToWriteId _iter657 : struct.srcTxnToWriteIdList)
             {
-              _iter649.write(oprot);
+              _iter657.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -849,9 +849,9 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetTxnIds()) {
         {
           oprot.writeI32(struct.txnIds.size());
-          for (long _iter650 : struct.txnIds)
+          for (long _iter658 : struct.txnIds)
           {
-            oprot.writeI64(_iter650);
+            oprot.writeI64(_iter658);
           }
         }
       }
@@ -861,9 +861,9 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetSrcTxnToWriteIdList()) {
         {
           oprot.writeI32(struct.srcTxnToWriteIdList.size());
-          for (TxnToWriteId _iter651 : struct.srcTxnToWriteIdList)
+          for (TxnToWriteId _iter659 : struct.srcTxnToWriteIdList)
           {
-            _iter651.write(oprot);
+            _iter659.write(oprot);
           }
         }
       }
@@ -879,13 +879,13 @@ import org.slf4j.LoggerFactory;
       BitSet incoming = iprot.readBitSet(3);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list652 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, 
iprot.readI32());
-          struct.txnIds = new ArrayList<Long>(_list652.size);
-          long _elem653;
-          for (int _i654 = 0; _i654 < _list652.size; ++_i654)
+          org.apache.thrift.protocol.TList _list660 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, 
iprot.readI32());
+          struct.txnIds = new ArrayList<Long>(_list660.size);
+          long _elem661;
+          for (int _i662 = 0; _i662 < _list660.size; ++_i662)
           {
-            _elem653 = iprot.readI64();
-            struct.txnIds.add(_elem653);
+            _elem661 = iprot.readI64();
+            struct.txnIds.add(_elem661);
           }
         }
         struct.setTxnIdsIsSet(true);
@@ -896,14 +896,14 @@ import org.slf4j.LoggerFactory;
       }
       if (incoming.get(2)) {
         {
-          org.apache.thrift.protocol.TList _list655 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, 
iprot.readI32());
-          struct.srcTxnToWriteIdList = new 
ArrayList<TxnToWriteId>(_list655.size);
-          TxnToWriteId _elem656;
-          for (int _i657 = 0; _i657 < _list655.size; ++_i657)
+          org.apache.thrift.protocol.TList _list663 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, 
iprot.readI32());
+          struct.srcTxnToWriteIdList = new 
ArrayList<TxnToWriteId>(_list663.size);
+          TxnToWriteId _elem664;
+          for (int _i665 = 0; _i665 < _list663.size; ++_i665)
           {
-            _elem656 = new TxnToWriteId();
-            _elem656.read(iprot);
-            struct.srcTxnToWriteIdList.add(_elem656);
+            _elem664 = new TxnToWriteId();
+            _elem664.read(iprot);
+            struct.srcTxnToWriteIdList.add(_elem664);
           }
         }
         struct.setSrcTxnToWriteIdListIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
index 2a13eba..193179b 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
@@ -354,14 +354,14 @@ import org.slf4j.LoggerFactory;
           case 1: // TXN_TO_WRITE_IDS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list658 = 
iprot.readListBegin();
-                struct.txnToWriteIds = new 
ArrayList<TxnToWriteId>(_list658.size);
-                TxnToWriteId _elem659;
-                for (int _i660 = 0; _i660 < _list658.size; ++_i660)
+                org.apache.thrift.protocol.TList _list666 = 
iprot.readListBegin();
+                struct.txnToWriteIds = new 
ArrayList<TxnToWriteId>(_list666.size);
+                TxnToWriteId _elem667;
+                for (int _i668 = 0; _i668 < _list666.size; ++_i668)
                 {
-                  _elem659 = new TxnToWriteId();
-                  _elem659.read(iprot);
-                  struct.txnToWriteIds.add(_elem659);
+                  _elem667 = new TxnToWriteId();
+                  _elem667.read(iprot);
+                  struct.txnToWriteIds.add(_elem667);
                 }
                 iprot.readListEnd();
               }
@@ -387,9 +387,9 @@ import org.slf4j.LoggerFactory;
         oprot.writeFieldBegin(TXN_TO_WRITE_IDS_FIELD_DESC);
         {
           oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, 
struct.txnToWriteIds.size()));
-          for (TxnToWriteId _iter661 : struct.txnToWriteIds)
+          for (TxnToWriteId _iter669 : struct.txnToWriteIds)
           {
-            _iter661.write(oprot);
+            _iter669.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -414,9 +414,9 @@ import org.slf4j.LoggerFactory;
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.txnToWriteIds.size());
-        for (TxnToWriteId _iter662 : struct.txnToWriteIds)
+        for (TxnToWriteId _iter670 : struct.txnToWriteIds)
         {
-          _iter662.write(oprot);
+          _iter670.write(oprot);
         }
       }
     }
@@ -425,14 +425,14 @@ import org.slf4j.LoggerFactory;
     public void read(org.apache.thrift.protocol.TProtocol prot, 
AllocateTableWriteIdsResponse struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TList _list663 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, 
iprot.readI32());
-        struct.txnToWriteIds = new ArrayList<TxnToWriteId>(_list663.size);
-        TxnToWriteId _elem664;
-        for (int _i665 = 0; _i665 < _list663.size; ++_i665)
+        org.apache.thrift.protocol.TList _list671 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, 
iprot.readI32());
+        struct.txnToWriteIds = new ArrayList<TxnToWriteId>(_list671.size);
+        TxnToWriteId _elem672;
+        for (int _i673 = 0; _i673 < _list671.size; ++_i673)
         {
-          _elem664 = new TxnToWriteId();
-          _elem664.read(iprot);
-          struct.txnToWriteIds.add(_elem664);
+          _elem672 = new TxnToWriteId();
+          _elem672.read(iprot);
+          struct.txnToWriteIds.add(_elem672);
         }
       }
       struct.setTxnToWriteIdsIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
index 4d4595a..d85dda5 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
@@ -877,14 +877,14 @@ import org.slf4j.LoggerFactory;
           case 4: // PARTITIONS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list952 = 
iprot.readListBegin();
-                struct.partitions = new ArrayList<Partition>(_list952.size);
-                Partition _elem953;
-                for (int _i954 = 0; _i954 < _list952.size; ++_i954)
+                org.apache.thrift.protocol.TList _list960 = 
iprot.readListBegin();
+                struct.partitions = new ArrayList<Partition>(_list960.size);
+                Partition _elem961;
+                for (int _i962 = 0; _i962 < _list960.size; ++_i962)
                 {
-                  _elem953 = new Partition();
-                  _elem953.read(iprot);
-                  struct.partitions.add(_elem953);
+                  _elem961 = new Partition();
+                  _elem961.read(iprot);
+                  struct.partitions.add(_elem961);
                 }
                 iprot.readListEnd();
               }
@@ -952,9 +952,9 @@ import org.slf4j.LoggerFactory;
         oprot.writeFieldBegin(PARTITIONS_FIELD_DESC);
         {
           oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, 
struct.partitions.size()));
-          for (Partition _iter955 : struct.partitions)
+          for (Partition _iter963 : struct.partitions)
           {
-            _iter955.write(oprot);
+            _iter963.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -1000,9 +1000,9 @@ import org.slf4j.LoggerFactory;
       oprot.writeString(struct.tableName);
       {
         oprot.writeI32(struct.partitions.size());
-        for (Partition _iter956 : struct.partitions)
+        for (Partition _iter964 : struct.partitions)
         {
-          _iter956.write(oprot);
+          _iter964.write(oprot);
         }
       }
       BitSet optionals = new BitSet();
@@ -1041,14 +1041,14 @@ import org.slf4j.LoggerFactory;
       struct.tableName = iprot.readString();
       struct.setTableNameIsSet(true);
       {
-        org.apache.thrift.protocol.TList _list957 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, 
iprot.readI32());
-        struct.partitions = new ArrayList<Partition>(_list957.size);
-        Partition _elem958;
-        for (int _i959 = 0; _i959 < _list957.size; ++_i959)
+        org.apache.thrift.protocol.TList _list965 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, 
iprot.readI32());
+        struct.partitions = new ArrayList<Partition>(_list965.size);
+        Partition _elem966;
+        for (int _i967 = 0; _i967 < _list965.size; ++_i967)
         {
-          _elem958 = new Partition();
-          _elem958.read(iprot);
-          struct.partitions.add(_elem958);
+          _elem966 = new Partition();
+          _elem966.read(iprot);
+          struct.partitions.add(_elem966);
         }
       }
       struct.setPartitionsIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java
index 3fdd295..3eb55b1 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java
@@ -351,13 +351,13 @@ import org.slf4j.LoggerFactory;
           case 1: // FILE_IDS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list832 = 
iprot.readListBegin();
-                struct.fileIds = new ArrayList<Long>(_list832.size);
-                long _elem833;
-                for (int _i834 = 0; _i834 < _list832.size; ++_i834)
+                org.apache.thrift.protocol.TList _list840 = 
iprot.readListBegin();
+                struct.fileIds = new ArrayList<Long>(_list840.size);
+                long _elem841;
+                for (int _i842 = 0; _i842 < _list840.size; ++_i842)
                 {
-                  _elem833 = iprot.readI64();
-                  struct.fileIds.add(_elem833);
+                  _elem841 = iprot.readI64();
+                  struct.fileIds.add(_elem841);
                 }
                 iprot.readListEnd();
               }
@@ -383,9 +383,9 @@ import org.slf4j.LoggerFactory;
         oprot.writeFieldBegin(FILE_IDS_FIELD_DESC);
         {
           oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, 
struct.fileIds.size()));
-          for (long _iter835 : struct.fileIds)
+          for (long _iter843 : struct.fileIds)
           {
-            oprot.writeI64(_iter835);
+            oprot.writeI64(_iter843);
           }
           oprot.writeListEnd();
         }
@@ -410,9 +410,9 @@ import org.slf4j.LoggerFactory;
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.fileIds.size());
-        for (long _iter836 : struct.fileIds)
+        for (long _iter844 : struct.fileIds)
         {
-          oprot.writeI64(_iter836);
+          oprot.writeI64(_iter844);
         }
       }
     }
@@ -421,13 +421,13 @@ import org.slf4j.LoggerFactory;
     public void read(org.apache.thrift.protocol.TProtocol prot, 
ClearFileMetadataRequest struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TList _list837 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, 
iprot.readI32());
-        struct.fileIds = new ArrayList<Long>(_list837.size);
-        long _elem838;
-        for (int _i839 = 0; _i839 < _list837.size; ++_i839)
+        org.apache.thrift.protocol.TList _list845 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, 
iprot.readI32());
+        struct.fileIds = new ArrayList<Long>(_list845.size);
+        long _elem846;
+        for (int _i847 = 0; _i847 < _list845.size; ++_i847)
         {
-          _elem838 = iprot.readI64();
-          struct.fileIds.add(_elem838);
+          _elem846 = iprot.readI64();
+          struct.fileIds.add(_elem846);
         }
       }
       struct.setFileIdsIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/10cfba20/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
----------------------------------------------------------------------
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
index f5c9582..17f8b77 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
@@ -354,13 +354,13 @@ import org.slf4j.LoggerFactory;
           case 1: // VALUES
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list848 = 
iprot.readListBegin();
-                struct.values = new ArrayList<ClientCapability>(_list848.size);
-                ClientCapability _elem849;
-                for (int _i850 = 0; _i850 < _list848.size; ++_i850)
+                org.apache.thrift.protocol.TList _list856 = 
iprot.readListBegin();
+                struct.values = new ArrayList<ClientCapability>(_list856.size);
+                ClientCapability _elem857;
+                for (int _i858 = 0; _i858 < _list856.size; ++_i858)
                 {
-                  _elem849 = 
org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32());
-                  struct.values.add(_elem849);
+                  _elem857 = 
org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32());
+                  struct.values.add(_elem857);
                 }
                 iprot.readListEnd();
               }
@@ -386,9 +386,9 @@ import org.slf4j.LoggerFactory;
         oprot.writeFieldBegin(VALUES_FIELD_DESC);
         {
           oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, 
struct.values.size()));
-          for (ClientCapability _iter851 : struct.values)
+          for (ClientCapability _iter859 : struct.values)
           {
-            oprot.writeI32(_iter851.getValue());
+            oprot.writeI32(_iter859.getValue());
           }
           oprot.writeListEnd();
         }
@@ -413,9 +413,9 @@ import org.slf4j.LoggerFactory;
       TTupleProtocol oprot = (TTupleProtocol) prot;
       {
         oprot.writeI32(struct.values.size());
-        for (ClientCapability _iter852 : struct.values)
+        for (ClientCapability _iter860 : struct.values)
         {
-          oprot.writeI32(_iter852.getValue());
+          oprot.writeI32(_iter860.getValue());
         }
       }
     }
@@ -424,13 +424,13 @@ import org.slf4j.LoggerFactory;
     public void read(org.apache.thrift.protocol.TProtocol prot, 
ClientCapabilities struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
       {
-        org.apache.thrift.protocol.TList _list853 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, 
iprot.readI32());
-        struct.values = new ArrayList<ClientCapability>(_list853.size);
-        ClientCapability _elem854;
-        for (int _i855 = 0; _i855 < _list853.size; ++_i855)
+        org.apache.thrift.protocol.TList _list861 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, 
iprot.readI32());
+        struct.values = new ArrayList<ClientCapability>(_list861.size);
+        ClientCapability _elem862;
+        for (int _i863 = 0; _i863 < _list861.size; ++_i863)
         {
-          _elem854 = 
org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32());
-          struct.values.add(_elem854);
+          _elem862 = 
org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32());
+          struct.values.add(_elem862);
         }
       }
       struct.setValuesIsSet(true);

Reply via email to