This is an automated email from the ASF dual-hosted git repository.
ayushsaxena pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 8077bb118cb HIVE-25872: Skip tracking of alterDatabase events for
replication specific properties (#2950). (Haymant Mangla, reviewed by Ayush
Saxena andRajesh Balamohan)
8077bb118cb is described below
commit 8077bb118cb418cae24a46f7155c481bf893da99
Author: Haymant Mangla <[email protected]>
AuthorDate: Tue May 17 10:32:34 2022 +0530
HIVE-25872: Skip tracking of alterDatabase events for replication specific
properties (#2950). (Haymant Mangla, reviewed by Ayush Saxena andRajesh
Balamohan)
---
.../hive/metastore/TestReplChangeManager.java | 2 +-
.../ql/parse/BaseReplicationAcrossInstances.java | 2 +-
.../parse/BaseReplicationScenariosAcidTables.java | 2 +-
.../apache/hadoop/hive/ql/parse/TestCopyUtils.java | 2 +-
.../ql/parse/TestMetaStoreEventListenerInRepl.java | 11 +---
.../ql/parse/TestReplicationOfHiveStreaming.java | 2 +-
.../parse/TestReplicationOnHDFSEncryptedZones.java | 2 +-
.../parse/TestReplicationOptimisedBootstrap.java | 7 ++-
.../hive/ql/parse/TestReplicationScenarios.java | 2 +-
.../parse/TestReplicationScenariosAcidTables.java | 61 +++++++++++++++++++---
.../TestReplicationScenariosAcrossInstances.java | 8 +--
.../TestReplicationScenariosExclusiveReplica.java | 2 +-
.../TestReplicationScenariosExternalTables.java | 22 ++++----
...icationScenariosExternalTablesMetaDataOnly.java | 2 +-
...licationScenariosIncrementalLoadAcidTables.java | 2 +-
.../parse/TestScheduledReplicationScenarios.java | 2 +-
.../ql/parse/TestStatsReplicationScenarios.java | 2 +-
.../hadoop/hive/ql/parse/WarehouseInstance.java | 4 +-
.../plugin/TestHiveAuthorizerCheckInvocation.java | 2 +-
.../txn/compactor/TestCleanerWithReplication.java | 2 +-
.../org/apache/hive/jdbc/TestJdbcWithMiniHS2.java | 2 +-
.../ReplRemoveFirstIncLoadPendFlagOperation.java | 6 +--
.../hadoop/hive/ql/exec/repl/ReplDumpTask.java | 2 +-
.../ql/exec/repl/bootstrap/load/LoadDatabase.java | 12 ++---
.../repl/bootstrap/load/table/LoadPartitions.java | 6 +--
.../hadoop/hive/ql/exec/repl/util/ReplUtils.java | 21 +++-----
.../org/apache/hadoop/hive/ql/parse/EximUtil.java | 15 +++---
.../hadoop/hive/ql/parse/repl/dump/Utils.java | 6 +--
.../ql/parse/repl/dump/io/PartitionSerializer.java | 4 +-
.../ql/parse/repl/dump/io/TableSerializer.java | 6 +--
.../repl/load/message/AlterDatabaseHandler.java | 18 +------
.../apache/hadoop/hive/common/repl/ReplConst.java | 10 ++++
.../hadoop/hive/metastore/ReplChangeManager.java | 7 +--
.../hive/metastore/utils/MetaStoreUtils.java | 15 ++++++
.../apache/hadoop/hive/metastore/HMSHandler.java | 30 +++++------
.../hive/metastore/events/AlterDatabaseEvent.java | 48 ++++++++++++++++-
.../hadoop/hive/metastore/model/MDatabase.java | 6 +--
37 files changed, 216 insertions(+), 139 deletions(-)
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
index 6e5fa2e6429..ebac38d1094 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.ReplChangeManager.RecycleType;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java
index 6ce32c8d659..e5807ee0d3d 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationAcrossInstances.java
@@ -36,7 +36,7 @@ import java.nio.file.Files;
import java.util.HashMap;
import java.util.Map;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
public class BaseReplicationAcrossInstances {
@Rule
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java
index b347552f85b..e1f3238486b 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java
@@ -59,7 +59,7 @@ import java.util.List;
import java.util.Collections;
import java.util.Map;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
/**
* TestReplicationScenariosAcidTablesBase - base class for replication for
ACID tables tests
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestCopyUtils.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestCopyUtils.java
index 28b4abe0ed9..17791f4604e 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestCopyUtils.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestCopyUtils.java
@@ -43,7 +43,7 @@ import java.util.Map;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.shims.HadoopShims.MiniMrShim;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
public class TestCopyUtils {
@Rule
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestMetaStoreEventListenerInRepl.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestMetaStoreEventListenerInRepl.java
index bb3036598ab..2704e5a58a7 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestMetaStoreEventListenerInRepl.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestMetaStoreEventListenerInRepl.java
@@ -44,7 +44,7 @@ import java.util.HashSet;
import java.util.Map;
import java.util.Set;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
/**
* TestMetaStoreEventListenerInRepl - Test metastore events created by
replication.
@@ -122,9 +122,6 @@ public class TestMetaStoreEventListenerInRepl {
// Add expected events with associated tables, if any.
Map<String, Set<String>> eventsMap = new HashMap<>();
eventsMap.put(CreateDatabaseEvent.class.getName(), null);
- // Replication causes many implicit alter database operations, so
metastore will see some
- // alter table events as well.
- eventsMap.put(AlterDatabaseEvent.class.getName(), null);
eventsMap.put(CreateTableEvent.class.getName(), new
HashSet<>(Arrays.asList("t1", "t2", "t4")));
eventsMap.put(AlterTableEvent.class.getName(), new
HashSet<>(Arrays.asList("t1", "t2", "t4")));
return eventsMap;
@@ -142,9 +139,6 @@ public class TestMetaStoreEventListenerInRepl {
// Add expected events with associated tables, if any.
Map<String, Set<String>> eventsMap = new HashMap<>();
- // Replication causes many implicit alter database operations, so
metastore will see some
- // alter table events as well.
- eventsMap.put(AlterDatabaseEvent.class.getName(), null);
eventsMap.put(CreateTableEvent.class.getName(), new
HashSet<>(Arrays.asList("t6")));
eventsMap.put(AlterTableEvent.class.getName(), new
HashSet<>(Arrays.asList("t1", "t2", "t6")));
eventsMap.put(DropTableEvent.class.getName(), new
HashSet<>(Arrays.asList("t2")));
@@ -160,9 +154,6 @@ public class TestMetaStoreEventListenerInRepl {
.run("drop table t1");
// Add expected events with associated tables, if any.
Map<String, Set<String>> eventsMap = new HashMap<>();
- // Replication causes many implicit alter database operations, so
metastore will see some
- // alter table events as well.
- eventsMap.put(AlterDatabaseEvent.class.getName(), null);
eventsMap.put(CreateTableEvent.class.getName(), new
HashSet<>(Arrays.asList("t7")));
eventsMap.put(AlterTableEvent.class.getName(), new
HashSet<>(Arrays.asList("t4", "t7")));
eventsMap.put(DropTableEvent.class.getName(), new
HashSet<>(Arrays.asList("t1")));
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOfHiveStreaming.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOfHiveStreaming.java
index d40449a2cdb..ef83ec6c792 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOfHiveStreaming.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOfHiveStreaming.java
@@ -41,7 +41,7 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
/**
* TestReplicationOfHiveStreaming - test replication for streaming ingest on
ACID tables.
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java
index d8ac0ce1882..94324c13177 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java
@@ -43,7 +43,7 @@ import java.util.HashMap;
import java.util.List;
import static
org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
public class TestReplicationOnHDFSEncryptedZones {
private static String jksFile = System.getProperty("java.io.tmpdir") +
"/test.jks";
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java
index 537d8604346..5ccd74f3708 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java
@@ -550,9 +550,8 @@ public class TestReplicationOptimisedBootstrap extends
BaseReplicationAcrossInst
.getNextNotification(Long.parseLong(getEventIdFromFile(new
Path(tuple.dumpLocation), conf)[1]), -1,
new DatabaseAndTableFilter(replicatedDbName, null));
- // There should be 4 events, one for alter db, second to remove first
incremental pending and then two custom
- // alter operations.
- assertEquals(4, nl.getEvents().size());
+ // There should be 2 events, two custom alter operations.
+ assertEquals(2, nl.getEvents().size());
}
@Test
@@ -660,7 +659,7 @@ public class TestReplicationOptimisedBootstrap extends
BaseReplicationAcrossInst
.getNextNotification(Long.parseLong(getEventIdFromFile(new
Path(tuple.dumpLocation), conf)[1]), 10,
new DatabaseAndTableFilter(replicatedDbName, null));
- assertEquals(1, nl.getEvents().size());
+ assertEquals(0, nl.getEventsSize());
}
@Test
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
index c43cd457f58..024c242b3a7 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java
@@ -134,7 +134,7 @@ import java.util.Base64;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
import static
org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.EVENT_DB_LISTENER_CLEAN_STARTUP_WAIT_INTERVAL;
import static
org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.REPL_EVENT_DB_LISTENER_TTL;
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
index 2612cbf7e6c..7fc55cf9805 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
@@ -77,7 +77,7 @@ import java.util.List;
import java.util.Map;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.DUMP_ACKNOWLEDGEMENT;
import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT;
import static org.junit.Assert.assertEquals;
@@ -156,6 +156,50 @@ public class TestReplicationScenariosAcidTables extends
BaseReplicationScenarios
primary.run("drop database if exists " + primaryDbName + "_extra cascade");
}
+ @Test
+ public void testReplAlterDbEventsNotCapturedInNotificationLog() throws
Throwable {
+ String srcDbName = "srcDb";
+ String replicaDb = "tgtDb";
+ try {
+ //Perform empty bootstrap dump and load
+ primary.run("CREATE DATABASE " + srcDbName);
+ long lastEventId = primary.getCurrentNotificationEventId().getEventId();
+ //Assert that repl.source.for is not captured in NotificationLog
+ WarehouseInstance.Tuple dumpData = primary.dump(srcDbName);
+ long latestEventId =
primary.getCurrentNotificationEventId().getEventId();
+ assertEquals(lastEventId, latestEventId);
+
+ replica.run("REPL LOAD " + srcDbName + " INTO " + replicaDb);
+ latestEventId = replica.getCurrentNotificationEventId().getEventId();
+ //Assert that repl.target.id, hive.repl.ckpt.key and
hive.repl.first.inc.pending is not captured in notificationLog.
+ assertEquals(latestEventId, lastEventId + 1); //This load will generate
only 1 event i.e. CREATE_DATABASE
+
+ WarehouseInstance.Tuple incDump = primary.run("use " + srcDbName)
+ .run("create table t1 (id int) clustered by(id) into 3 buckets
stored as orc " +
+ "tblproperties (\"transactional\"=\"true\")")
+ .run("insert into t1 values(1)")
+ .dump(srcDbName);
+
+ //Assert that repl.last.id is not captured in notification log.
+ long noOfEventsInDumpDir =
primary.getNoOfEventsDumped(incDump.dumpLocation, conf);
+ lastEventId = primary.getCurrentNotificationEventId().getEventId();
+ replica.run("REPL LOAD " + srcDbName + " INTO " + replicaDb);
+
+ latestEventId = replica.getCurrentNotificationEventId().getEventId();
+
+ //Validate that there is no addition event generated in notificationLog
table apart from replayed ones.
+ assertEquals(latestEventId, lastEventId + noOfEventsInDumpDir);
+
+ long targetDbReplId = Long.parseLong(replica.getDatabase(replicaDb)
+ .getParameters().get(ReplConst.REPL_TARGET_TABLE_PROPERTY));
+ //Validate that repl.last.id db property has been updated successfully.
+ assertEquals(targetDbReplId, lastEventId);
+ } finally {
+ primary.run("DROP DATABASE IF EXISTS " + srcDbName + " CASCADE");
+ replica.run("DROP DATABASE IF EXISTS " + replicaDb + " CASCADE");
+ }
+ }
+
@Test
public void testTargetDbReplIncompatibleWithNoPropSet() throws Throwable {
testTargetDbReplIncompatible(false);
@@ -930,8 +974,7 @@ public class TestReplicationScenariosAcidTables extends
BaseReplicationScenarios
lastEventId = replica.getCurrentNotificationEventId().getEventId();
replica.run("REPL LOAD " + primaryDbName + " INTO " + replicatedDbName);
currentEventId = replica.getCurrentNotificationEventId().getEventId();
- //This iteration of repl load will have only one event i.e ALTER_DATABASE
to update repl.last.id for the target db.
- assert currentEventId == lastEventId + 1;
+ assert currentEventId == lastEventId;
primary.run("ALTER DATABASE " + primaryDbName +
" SET DBPROPERTIES('" + ReplConst.TARGET_OF_REPLICATION + "'='')");
@@ -1729,7 +1772,7 @@ public class TestReplicationScenariosAcidTables extends
BaseReplicationScenarios
primary.dump(primaryDbName);
long lastReplId = Long.parseLong(bootStrapDump.lastReplicationId);
- primary.testEventCounts(primaryDbName, lastReplId, null, null, 12);
+ primary.testEventCounts(primaryDbName, lastReplId, null, null, 10);
// Test load
replica.load(replicatedDbName, primaryDbName)
@@ -2278,14 +2321,18 @@ public class TestReplicationScenariosAcidTables extends
BaseReplicationScenarios
fs.delete(ackLastEventID, false);
fs.delete(dumpMetaData, false);
//delete all the event folder except first one.
- long firstIncEventID = Long.parseLong(bootstrapDump.lastReplicationId) + 1;
+ long firstIncEventID = -1;
long lastIncEventID = Long.parseLong(incrementalDump1.lastReplicationId);
assertTrue(lastIncEventID > (firstIncEventID + 1));
- for (long eventId=firstIncEventID + 1; eventId<=lastIncEventID; eventId++)
{
+ for (long eventId=Long.parseLong(bootstrapDump.lastReplicationId) + 1;
eventId<=lastIncEventID; eventId++) {
Path eventRoot = new Path(hiveDumpDir, String.valueOf(eventId));
if (fs.exists(eventRoot)) {
- fs.delete(eventRoot, true);
+ if (firstIncEventID == -1){
+ firstIncEventID = eventId;
+ } else {
+ fs.delete(eventRoot, true);
+ }
}
}
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
index d9c63c6073d..31acaa244ae 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java
@@ -74,7 +74,7 @@ import java.util.stream.Collectors;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.QUOTA_DONT_SET;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.QUOTA_RESET;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT;
import static
org.apache.hadoop.hive.ql.exec.repl.ReplAck.NON_RECOVERABLE_MARKER;
import static org.hamcrest.CoreMatchers.equalTo;
@@ -967,13 +967,13 @@ public class TestReplicationScenariosAcrossInstances
extends BaseReplicationAcro
}
private void verifyIfCkptSet(Map<String, String> props, String dumpDir) {
- assertTrue(props.containsKey(ReplUtils.REPL_CHECKPOINT_KEY));
+ assertTrue(props.containsKey(ReplConst.REPL_TARGET_DB_PROPERTY));
String hiveDumpDir = dumpDir + File.separator +
ReplUtils.REPL_HIVE_BASE_DIR;
- assertTrue(props.get(ReplUtils.REPL_CHECKPOINT_KEY).equals(hiveDumpDir));
+
assertTrue(props.get(ReplConst.REPL_TARGET_DB_PROPERTY).equals(hiveDumpDir));
}
private void verifyIfCkptPropMissing(Map<String, String> props) {
- assertFalse(props.containsKey(ReplUtils.REPL_CHECKPOINT_KEY));
+ assertFalse(props.containsKey(ReplConst.REPL_TARGET_DB_PROPERTY));
}
private void verifyIfSrcOfReplPropMissing(Map<String, String> props) {
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java
index 449ec7ac7bb..c9f4753ba99 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java
@@ -46,7 +46,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
/**
* Test replication scenarios with staging on replica.
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java
index 44fdd00478a..8324b934a20 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java
@@ -83,7 +83,7 @@ import javax.annotation.Nullable;
import static
org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_EXTERNAL_WAREHOUSE_SINGLE_COPY_TASK;
import static
org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_EXTERNAL_WAREHOUSE_SINGLE_COPY_TASK_PATHS;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
import static
org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils.INC_BOOTSTRAP_ROOT_DIR_NAME;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -728,20 +728,24 @@ public class TestReplicationScenariosExternalTables
extends BaseReplicationAcros
fs.delete(ackFile, false);
fs.delete(ackLastEventID, false);
//delete all the event folders except first event
- long startEvent = Long.valueOf(tuple.lastReplicationId) + 1;
+ long startEvent = -1;
+ long endEvent = Long.valueOf(incrementalDump1.lastReplicationId);
+ for (long eventDir = Long.valueOf(tuple.lastReplicationId) + 1; eventDir
<= endEvent; eventDir++) {
+ Path eventRoot = new Path(hiveDumpDir, String.valueOf(eventDir));
+ if (fs.exists(eventRoot)) {
+ if (startEvent == -1){
+ startEvent = eventDir;
+ } else {
+ fs.delete(eventRoot, true);
+ }
+ }
+ }
Path startEventRoot = new Path(hiveDumpDir, String.valueOf(startEvent));
Map<Path, Long> firstEventModTimeMap = new HashMap<>();
for (FileStatus fileStatus: fs.listStatus(startEventRoot)) {
firstEventModTimeMap.put(fileStatus.getPath(),
fileStatus.getModificationTime());
}
- long endEvent = Long.valueOf(incrementalDump1.lastReplicationId);
assertTrue(endEvent - startEvent > 1);
- for (long eventDir = startEvent + 1; eventDir <= endEvent; eventDir++) {
- Path eventRoot = new Path(hiveDumpDir, String.valueOf(eventDir));
- if (fs.exists(eventRoot)) {
- fs.delete(eventRoot, true);
- }
- }
Utils.writeOutput(String.valueOf(startEvent), ackLastEventID,
primary.hiveConf);
WarehouseInstance.Tuple incrementalDump2 = primary.dump(primaryDbName,
withClause);
assertEquals(incrementalDump1.dumpLocation, incrementalDump2.dumpLocation);
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTablesMetaDataOnly.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTablesMetaDataOnly.java
index b68d15ac5da..846ef7fc918 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTablesMetaDataOnly.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTablesMetaDataOnly.java
@@ -48,7 +48,7 @@ import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
import static
org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils.INC_BOOTSTRAP_ROOT_DIR_NAME;
import static
org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils.REPL_HIVE_BASE_DIR;
import static org.junit.Assert.assertFalse;
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosIncrementalLoadAcidTables.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosIncrementalLoadAcidTables.java
index c31c9ef0ac3..56db93c6d7a 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosIncrementalLoadAcidTables.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosIncrementalLoadAcidTables.java
@@ -27,7 +27,7 @@ import
org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncod
import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.ql.parse.repl.CopyUtils;
import org.apache.hadoop.hive.shims.Utils;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
import org.junit.rules.TestName;
import org.slf4j.Logger;
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java
index e0b389eeaba..1887587ddbb 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java
@@ -57,7 +57,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.ArrayList;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
index 5d15e1a355c..e23c542d670 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
@@ -58,7 +58,7 @@ import java.util.List;
import java.util.Map;
import javax.annotation.Nullable;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
/**
* Tests for statistics replication.
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
index eee5702e210..948bd50b558 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java
@@ -432,8 +432,8 @@ public class WarehouseInstance implements Closeable {
}
private void verifyIfCkptSet(Map<String, String> props, String dumpDir) {
- assertTrue(props.containsKey(ReplUtils.REPL_CHECKPOINT_KEY));
- assertTrue(props.get(ReplUtils.REPL_CHECKPOINT_KEY).equals(dumpDir));
+ assertTrue(props.containsKey(ReplConst.REPL_TARGET_DB_PROPERTY));
+ assertTrue(props.get(ReplConst.REPL_TARGET_DB_PROPERTY).equals(dumpDir));
}
public void verifyIfCkptSet(String dbName, String dumpDir) throws Exception {
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java
index 3b7980d8415..e306a9970bc 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hive.ql.security.authorization.plugin;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java
index 6353b37fe3e..653c08ca0c6 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
import org.apache.hadoop.hive.metastore.api.Table;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
diff --git
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index 184cd6dd9d7..ca13b79f8bd 100644
---
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -87,7 +87,7 @@ import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
public class TestJdbcWithMiniHS2 {
private static MiniHS2 miniHS2 = null;
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/flags/ReplRemoveFirstIncLoadPendFlagOperation.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/flags/ReplRemoveFirstIncLoadPendFlagOperation.java
index 67eb48c957a..95582e2209c 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/flags/ReplRemoveFirstIncLoadPendFlagOperation.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/flags/ReplRemoveFirstIncLoadPendFlagOperation.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hive.ql.ddl.misc.flags;
+import org.apache.hadoop.hive.common.repl.ReplConst;
import org.apache.hadoop.hive.ql.ddl.DDLOperationContext;
-import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
import java.util.Map;
@@ -43,9 +43,9 @@ public class ReplRemoveFirstIncLoadPendFlagOperation extends
DDLOperation<ReplRe
for (String dbName : Utils.matchesDb(context.getDb(), dbNameOrPattern)) {
Database database = context.getDb().getMSC().getDatabase(dbName);
Map<String, String> parameters = database.getParameters();
- String incPendPara = parameters != null ?
parameters.get(ReplUtils.REPL_FIRST_INC_PENDING_FLAG) : null;
+ String incPendPara = parameters != null ?
parameters.get(ReplConst.REPL_FIRST_INC_PENDING_FLAG) : null;
if (incPendPara != null) {
- parameters.remove(ReplUtils.REPL_FIRST_INC_PENDING_FLAG);
+ parameters.remove(ReplConst.REPL_FIRST_INC_PENDING_FLAG);
context.getDb().getMSC().alterDatabase(dbName, database);
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
index 339937f1e94..bc141943131 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
@@ -126,7 +126,7 @@ import static
org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_DUMP_METADATA_O
import static
org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_EXTERNAL_WAREHOUSE_SINGLE_COPY_TASK;
import static
org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_EXTERNAL_WAREHOUSE_SINGLE_COPY_TASK_PATHS;
import static
org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_SNAPSHOT_DIFF_FOR_EXTERNAL_TABLE_COPY;
-import static
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
+import static
org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION;
import static
org.apache.hadoop.hive.metastore.ReplChangeManager.getReplPolicyIdString;
import static
org.apache.hadoop.hive.ql.exec.repl.OptimisedBootstrapUtils.EVENT_ACK_FILE;
import static
org.apache.hadoop.hive.ql.exec.repl.OptimisedBootstrapUtils.TABLE_DIFF_COMPLETE_DIRECTORY;
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java
index 06264f550f3..8775a88e304 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java
@@ -100,7 +100,7 @@ public class LoadDatabase {
String getDbLocation(Database dbInMetadata) {
if
(context.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_RETAIN_CUSTOM_LOCATIONS_FOR_DB_ON_TARGET)
- &&
Boolean.parseBoolean(dbInMetadata.getParameters().get(ReplUtils.REPL_IS_CUSTOM_DB_LOC)))
{
+ &&
Boolean.parseBoolean(dbInMetadata.getParameters().get(ReplConst.REPL_IS_CUSTOM_DB_LOC)))
{
String locOnTarget = new
Path(dbInMetadata.getLocationUri()).toUri().getPath().toString();
LOG.info("Using the custom location {} on the target", locOnTarget);
return locOnTarget;
@@ -110,7 +110,7 @@ public class LoadDatabase {
String getDbManagedLocation(Database dbInMetadata) {
if
(context.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_RETAIN_CUSTOM_LOCATIONS_FOR_DB_ON_TARGET)
- &&
Boolean.parseBoolean(dbInMetadata.getParameters().get(ReplUtils.REPL_IS_CUSTOM_DB_MANAGEDLOC)))
{
+ &&
Boolean.parseBoolean(dbInMetadata.getParameters().get(ReplConst.REPL_IS_CUSTOM_DB_MANAGEDLOC)))
{
String locOnTarget = new
Path(dbInMetadata.getManagedLocationUri()).toUri().getPath().toString();
LOG.info("Using the custom managed location {} on the target",
locOnTarget);
return locOnTarget;
@@ -184,19 +184,19 @@ public class LoadDatabase {
parameters.remove(ReplicationSpec.KEY.CURR_STATE_ID_TARGET.toString());
- parameters.remove(ReplUtils.REPL_IS_CUSTOM_DB_LOC);
+ parameters.remove(ReplConst.REPL_IS_CUSTOM_DB_LOC);
- parameters.remove(ReplUtils.REPL_IS_CUSTOM_DB_MANAGEDLOC);
+ parameters.remove(ReplConst.REPL_IS_CUSTOM_DB_MANAGEDLOC);
// Add the checkpoint key to the Database binding it to current dump
directory.
// So, if retry using same dump, we shall skip Database object update.
- parameters.put(ReplUtils.REPL_CHECKPOINT_KEY, dumpDirectory);
+ parameters.put(ReplConst.REPL_TARGET_DB_PROPERTY, dumpDirectory);
// This flag will be set to false after first incremental load is done.
This flag is used by repl copy task to
// check if duplicate file check is required or not. This flag is used by
compaction to check if compaction can be
// done for this database or not. If compaction is done before first
incremental then duplicate check will fail as
// compaction may change the directory structure.
- parameters.put(ReplUtils.REPL_FIRST_INC_PENDING_FLAG, "true");
+ parameters.put(ReplConst.REPL_FIRST_INC_PENDING_FLAG, "true");
//This flag will be set to identify its a target of replication. Repl dump
won't be allowed on a database
//which is a target of replication.
parameters.put(ReplConst.TARGET_OF_REPLICATION, "true");
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
index b043426c493..59373f55177 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.table;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.repl.ReplConst;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
@@ -63,7 +64,6 @@ import java.util.Map;
import static
org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY;
import static
org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_DUMP_SKIP_IMMUTABLE_DATA_COPY;
import static
org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.ReplicationState.PartitionState;
-import static
org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils.REPL_CHECKPOINT_KEY;
import static
org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer.isPartitioned;
import static
org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer.partSpecToString;
@@ -221,7 +221,7 @@ public class LoadPartitions {
if (partParams == null) {
partParams = new HashMap<>();
}
- partParams.put(REPL_CHECKPOINT_KEY, context.dumpDirectory);
+ partParams.put(ReplConst.REPL_TARGET_DB_PROPERTY,
context.dumpDirectory);
Path replicaWarehousePartitionLocation =
locationOnReplicaWarehouse(table, src);
partitions.add(new AlterTableAddPartitionDesc.PartitionDesc(
src.getPartSpec(), replicaWarehousePartitionLocation.toString(),
partParams, src.getInputFormat(),
@@ -387,7 +387,7 @@ public class LoadPartitions {
AlterTableAddPartitionDesc.PartitionDesc src =
addPartitionDesc.getPartitions().get(0);
//Add check point task as part of add partition
Map<String, String> partParams = new HashMap<>();
- partParams.put(REPL_CHECKPOINT_KEY, context.dumpDirectory);
+ partParams.put(ReplConst.REPL_TARGET_DB_PROPERTY, context.dumpDirectory);
Path replicaWarehousePartitionLocation =
locationOnReplicaWarehouse(table, src);
src.setLocation(replicaWarehousePartitionLocation.toString());
src.addPartParams(partParams);
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
index 038c6293c82..d059e6c2d5b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
@@ -86,16 +86,6 @@ import static
org.apache.hadoop.hive.ql.exec.repl.ReplAck.NON_RECOVERABLE_MARKER
public class ReplUtils {
public static final String LAST_REPL_ID_KEY = "hive.repl.last.repl.id";
- public static final String REPL_CHECKPOINT_KEY =
ReplConst.REPL_TARGET_DB_PROPERTY;
- public static final String REPL_FIRST_INC_PENDING_FLAG =
"hive.repl.first.inc.pending";
-
- // write id allocated in the current execution context which will be passed
through config to be used by different
- // tasks.
- public static final String REPL_CURRENT_TBL_WRITE_ID =
"hive.repl.current.table.write.id";
-
- public static final String REPL_IS_CUSTOM_DB_LOC =
"hive.repl.is.custom.db.loc";
-
- public static final String REPL_IS_CUSTOM_DB_MANAGEDLOC =
"hive.repl.is.custom.db.managedloc";
public static final String FUNCTIONS_ROOT_DIR_NAME = "_functions";
public static final String CONSTRAINTS_ROOT_DIR_NAME = "_constraints";
@@ -230,7 +220,7 @@ public class ReplUtils {
String dumpRoot,
ReplicationMetricCollector metricCollector,
HiveConf conf) throws
SemanticException {
HashMap<String, String> mapProp = new HashMap<>();
- mapProp.put(REPL_CHECKPOINT_KEY, dumpRoot);
+ mapProp.put(ReplConst.REPL_TARGET_DB_PROPERTY, dumpRoot);
final TableName tName = TableName.fromString(tableDesc.getTableName(),
null, tableDesc.getDatabaseName());
AlterTableSetPropertiesDesc alterTblDesc = new
AlterTableSetPropertiesDesc(tName, partSpec, null, false,
@@ -242,12 +232,13 @@ public class ReplUtils {
public static boolean replCkptStatus(String dbName, Map<String, String>
props, String dumpRoot)
throws InvalidOperationException {
// If ckpt property not set or empty means, bootstrap is not run on this
object.
- if ((props != null) && props.containsKey(REPL_CHECKPOINT_KEY) &&
!props.get(REPL_CHECKPOINT_KEY).isEmpty()) {
- if (props.get(REPL_CHECKPOINT_KEY).equals(dumpRoot)) {
+ if ((props != null) && props.containsKey(ReplConst.REPL_TARGET_DB_PROPERTY)
+ && !props.get(ReplConst.REPL_TARGET_DB_PROPERTY).isEmpty()) {
+ if (props.get(ReplConst.REPL_TARGET_DB_PROPERTY).equals(dumpRoot)) {
return true;
}
throw new
InvalidOperationException(ErrorMsg.REPL_BOOTSTRAP_LOAD_PATH_NOT_VALID.format(dumpRoot,
- props.get(REPL_CHECKPOINT_KEY)));
+ props.get(ReplConst.REPL_TARGET_DB_PROPERTY)));
}
return false;
}
@@ -364,7 +355,7 @@ public class ReplUtils {
public static boolean isFirstIncPending(Map<String, String> parameters) {
// If flag is not set, then we assume first incremental load is done as
the database/table may be created by user
// and not through replication.
- return parameters != null &&
ReplConst.TRUE.equalsIgnoreCase(parameters.get(ReplUtils.REPL_FIRST_INC_PENDING_FLAG));
+ return parameters != null &&
ReplConst.TRUE.equalsIgnoreCase(parameters.get(ReplConst.REPL_FIRST_INC_PENDING_FLAG));
}
public static EnvironmentContext
setReplDataLocationChangedFlag(EnvironmentContext envContext) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
index 34fa859b3c8..4add301fb0a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hive.common.repl.ReplConst;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.ReplChangeManager;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
@@ -35,7 +34,6 @@ import org.apache.hadoop.hive.ql.Context;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
import org.apache.hadoop.hive.ql.exec.repl.util.StringConvertibleObject;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
@@ -44,7 +42,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.repl.DumpType;
-import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
import org.apache.hadoop.hive.ql.parse.repl.dump.io.DBSerializer;
import org.apache.hadoop.hive.ql.parse.repl.dump.io.JsonWriter;
import org.apache.hadoop.hive.ql.parse.repl.dump.io.ReplicationSpecSerializer;
@@ -370,10 +367,10 @@ public class EximUtil {
if (parameters != null) {
Map<String, String> tmpParameters = new HashMap<>(parameters);
tmpParameters.entrySet()
- .removeIf(e ->
e.getKey().startsWith(Utils.BOOTSTRAP_DUMP_STATE_KEY_PREFIX)
- || e.getKey().equals(ReplUtils.REPL_CHECKPOINT_KEY)
- ||
e.getKey().equals(ReplChangeManager.SOURCE_OF_REPLICATION)
- ||
e.getKey().equals(ReplUtils.REPL_FIRST_INC_PENDING_FLAG)
+ .removeIf(e ->
e.getKey().startsWith(ReplConst.BOOTSTRAP_DUMP_STATE_KEY_PREFIX)
+ ||
e.getKey().equals(ReplConst.REPL_TARGET_DB_PROPERTY)
+ ||
e.getKey().equals(ReplConst.SOURCE_OF_REPLICATION)
+ ||
e.getKey().equals(ReplConst.REPL_FIRST_INC_PENDING_FLAG)
||
e.getKey().equals(ReplConst.REPL_FAILOVER_ENDPOINT));
dbObj.setParameters(tmpParameters);
}
@@ -391,13 +388,13 @@ public class EximUtil {
MetastoreConf.getVar(conf, MetastoreConf.ConfVars.WAREHOUSE));
Path dbDerivedLoc = new Path(whLocatoion,
database.getName().toLowerCase() + DATABASE_PATH_SUFFIX);
String defaultDbLoc = Utilities.getQualifiedPath((HiveConf) conf,
dbDerivedLoc);
- database.putToParameters(ReplUtils.REPL_IS_CUSTOM_DB_LOC,
+ database.putToParameters(ReplConst.REPL_IS_CUSTOM_DB_LOC,
Boolean.toString(!defaultDbLoc.equals(database.getLocationUri())));
String whManagedLocatoion = MetastoreConf.getVar(conf,
MetastoreConf.ConfVars.WAREHOUSE);
Path dbDerivedManagedLoc = new Path(whManagedLocatoion,
database.getName().toLowerCase()
+ DATABASE_PATH_SUFFIX);
String defaultDbManagedLoc = Utilities.getQualifiedPath((HiveConf) conf,
dbDerivedManagedLoc);
- database.getParameters().put(ReplUtils.REPL_IS_CUSTOM_DB_MANAGEDLOC,
Boolean.toString(
+ database.getParameters().put(ReplConst.REPL_IS_CUSTOM_DB_MANAGEDLOC,
Boolean.toString(
!(database.getManagedLocationUri() == null
||defaultDbManagedLoc.equals(database.getManagedLocationUri()))));
} catch (HiveException ex) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
index a2a1390b013..f8276d35e5c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.common.repl.ReplConst;
import org.apache.hadoop.hive.common.repl.ReplScope;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.ReplChangeManager;
@@ -64,7 +65,6 @@ import java.util.concurrent.Callable;
public class Utils {
private static Logger LOG = LoggerFactory.getLogger(Utils.class);
- public static final String BOOTSTRAP_DUMP_STATE_KEY_PREFIX =
"bootstrap.dump.state.";
private static final int DEF_BUF_SIZE = 8 * 1024;
public enum ReplDumpState {
@@ -274,7 +274,7 @@ public class Utils {
}
Map<String, String> newParams = new HashMap<>();
- String uniqueKey = BOOTSTRAP_DUMP_STATE_KEY_PREFIX +
UUID.randomUUID().toString();
+ String uniqueKey = ReplConst.BOOTSTRAP_DUMP_STATE_KEY_PREFIX +
UUID.randomUUID().toString();
newParams.put(uniqueKey, ReplDumpState.ACTIVE.name());
Map<String, String> params = database.getParameters();
@@ -319,7 +319,7 @@ public class Utils {
}
for (String key : params.keySet()) {
- if (key.startsWith(BOOTSTRAP_DUMP_STATE_KEY_PREFIX)
+ if (key.startsWith(ReplConst.BOOTSTRAP_DUMP_STATE_KEY_PREFIX)
&& params.get(key).equals(ReplDumpState.ACTIVE.name())) {
return true;
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/PartitionSerializer.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/PartitionSerializer.java
index d19861276f0..281bb37d32c 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/PartitionSerializer.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/PartitionSerializer.java
@@ -17,9 +17,9 @@
*/
package org.apache.hadoop.hive.ql.parse.repl.dump.io;
+import org.apache.hadoop.hive.common.repl.ReplConst;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.ql.ErrorMsg;
-import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.thrift.TException;
@@ -48,7 +48,7 @@ public class PartitionSerializer implements
JsonWriter.Serializer {
Map<String, String> parameters = partition.getParameters();
if (parameters != null) {
parameters.entrySet()
- .removeIf(e ->
e.getKey().equals(ReplUtils.REPL_CHECKPOINT_KEY));
+ .removeIf(e ->
e.getKey().equals(ReplConst.REPL_TARGET_DB_PROPERTY));
}
if (additionalPropertiesProvider.isInReplicationScope()) {
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/TableSerializer.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/TableSerializer.java
index 8c521946edf..3db721facf8 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/TableSerializer.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/TableSerializer.java
@@ -17,10 +17,10 @@
*/
package org.apache.hadoop.hive.ql.parse.repl.dump.io;
+import org.apache.hadoop.hive.common.repl.ReplConst;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.ql.ErrorMsg;
-import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -76,8 +76,8 @@ public class TableSerializer implements JsonWriter.Serializer
{
Map<String, String> parameters = table.getParameters();
if (parameters != null) {
parameters.entrySet()
- .removeIf(e -> (e.getKey().equals(ReplUtils.REPL_CHECKPOINT_KEY)
||
-
e.getKey().equals(ReplUtils.REPL_FIRST_INC_PENDING_FLAG)));
+ .removeIf(e ->
(e.getKey().equals(ReplConst.REPL_TARGET_DB_PROPERTY) ||
+
e.getKey().equals(ReplConst.REPL_FIRST_INC_PENDING_FLAG)));
}
if (additionalPropertiesProvider.isInReplicationScope()) {
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java
index 2194f39f5a9..e32b3275dd7 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AlterDatabaseHandler.java
@@ -17,8 +17,6 @@
*/
package org.apache.hadoop.hive.ql.parse.repl.load.message;
-import org.apache.hadoop.hive.common.repl.ReplConst;
-import org.apache.hadoop.hive.metastore.ReplChangeManager;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage;
import org.apache.hadoop.hive.ql.ddl.DDLWork;
@@ -28,10 +26,7 @@ import
org.apache.hadoop.hive.ql.ddl.database.alter.poperties.AlterDatabaseSetPr
import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.TaskFactory;
-import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
-import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
import java.util.Collections;
import java.util.HashMap;
@@ -61,18 +56,7 @@ public class AlterDatabaseHandler extends
AbstractMessageHandler {
Map<String, String> dbProps = newDb.getParameters();
for (Map.Entry<String, String> entry : dbProps.entrySet()) {
- String key = entry.getKey();
- // Ignore the keys which are local to source warehouse
- if (key.startsWith(Utils.BOOTSTRAP_DUMP_STATE_KEY_PREFIX)
- ||
key.equals(ReplicationSpec.KEY.CURR_STATE_ID_SOURCE.toString())
- ||
key.equals(ReplicationSpec.KEY.CURR_STATE_ID_TARGET.toString())
- || key.equals(ReplUtils.REPL_CHECKPOINT_KEY)
- || key.equals(ReplChangeManager.SOURCE_OF_REPLICATION)
- || key.equals(ReplUtils.REPL_FIRST_INC_PENDING_FLAG)
- || key.equals(ReplConst.REPL_FAILOVER_ENDPOINT)) {
- continue;
- }
- newDbProps.put(key, entry.getValue());
+ newDbProps.put(entry.getKey(), entry.getValue());
}
alterDbDesc = new AlterDatabaseSetPropertiesDesc(actualDbName,
newDbProps, context.eventOnlyReplicationSpec());
} else {
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/repl/ReplConst.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/repl/ReplConst.java
index f5131ca060b..627c965e256 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/repl/ReplConst.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/repl/ReplConst.java
@@ -58,4 +58,14 @@ public class ReplConst {
* Tracks the event id with respect to the target cluster.
*/
public static final String REPL_TARGET_DATABASE_PROPERTY =
"repl.target.last.id";
+
+ public static final String SOURCE_OF_REPLICATION = "repl.source.for";
+
+ public static final String REPL_FIRST_INC_PENDING_FLAG =
"hive.repl.first.inc.pending";
+
+ public static final String REPL_IS_CUSTOM_DB_LOC =
"hive.repl.is.custom.db.loc";
+
+ public static final String REPL_IS_CUSTOM_DB_MANAGEDLOC =
"hive.repl.is.custom.db.managedloc";
+
+ public static final String BOOTSTRAP_DUMP_STATE_KEY_PREFIX =
"bootstrap.dump.state.";
}
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java
index 04c47516214..3436fbc5060 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java
@@ -45,6 +45,7 @@ import static
org.apache.hadoop.fs.permission.AclEntryType.OTHER;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hive.common.repl.ReplConst;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Table;
@@ -74,7 +75,7 @@ public class ReplChangeManager {
private static final String ORIG_LOC_TAG = "user.original-loc";
static final String REMAIN_IN_TRASH_TAG = "user.remain-in-trash";
private static final String URI_FRAGMENT_SEPARATOR = "#";
- public static final String SOURCE_OF_REPLICATION = "repl.source.for";
+ public static final String SOURCE_OF_REPLICATION =
ReplConst.SOURCE_OF_REPLICATION;
private static final String TXN_WRITE_EVENT_FILE_SEPARATOR = "]";
static final String CM_THREAD_NAME_PREFIX = "cmclearer-";
private static final String NO_ENCRYPTION = "noEncryption";
@@ -552,8 +553,8 @@ public class ReplChangeManager {
public static String getReplPolicyIdString(Database db) {
if (db != null) {
Map<String, String> m = db.getParameters();
- if ((m != null) && (m.containsKey(SOURCE_OF_REPLICATION))) {
- String replPolicyId = m.get(SOURCE_OF_REPLICATION);
+ if ((m != null) && (m.containsKey(ReplConst.SOURCE_OF_REPLICATION))) {
+ String replPolicyId = m.get(ReplConst.SOURCE_OF_REPLICATION);
LOG.debug("repl policy for database {} is {}", db.getName(),
replPolicyId);
return replPolicyId;
}
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
index a6272071cac..06aaa5dd2e6 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.metastore.utils;
import java.io.File;
+import java.lang.reflect.Modifier;
import java.net.URL;
import java.net.URLClassLoader;
import java.security.AccessController;
@@ -302,6 +303,20 @@ public class MetaStoreUtils {
return false;
}
+ public static List<String> getReplicationDbProps() {
+ return Arrays.stream(ReplConst.class.getDeclaredFields())
+ .filter(field -> Modifier.isStatic(field.getModifiers()))
+ .map(field -> {
+ try {
+ String prop = (String) field.get(String.class);
+ return prop.replace("\"", "");
+ } catch (IllegalAccessException e) {
+ LOG.error("Failed to collect replication specific properties.
Reason: ", e);
+ throw new RuntimeException(e);
+ }
+ }).collect(Collectors.toList());
+ }
+
/**
* Determines whether an table needs to be purged or not.
*
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
index c1199df6f95..5b072093082 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
@@ -1168,15 +1168,8 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
}
static boolean isDbReplicationTarget(Database db) {
- if (db.getParameters() == null) {
- return false;
- }
-
- if (!db.getParameters().containsKey(ReplConst.REPL_TARGET_DB_PROPERTY)) {
- return false;
- }
-
- return
!db.getParameters().get(ReplConst.REPL_TARGET_DB_PROPERTY).trim().isEmpty();
+ String dbCkptStatus = (db.getParameters() == null) ? null :
db.getParameters().get(ReplConst.REPL_TARGET_DB_PROPERTY);
+ return dbCkptStatus != null && !dbCkptStatus.trim().isEmpty();
}
// Assumes that the catalog has already been set.
@@ -1501,10 +1494,11 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
ms.alterDatabase(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], newDB);
if (!transactionalListeners.isEmpty()) {
- transactionalListenersResponses =
- MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
- EventType.ALTER_DATABASE,
- new AlterDatabaseEvent(oldDB, newDB, true, this,
isReplicated));
+ AlterDatabaseEvent event = new AlterDatabaseEvent(oldDB, newDB, true,
this, isReplicated);
+ if (!event.shouldSkipCapturing()) {
+ transactionalListenersResponses =
+
MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
EventType.ALTER_DATABASE, event);
+ }
}
success = ms.commitTransaction();
@@ -1517,11 +1511,11 @@ public class HMSHandler extends FacebookBase implements
IHMSHandler {
}
if ((null != oldDB) && (!listeners.isEmpty())) {
- MetaStoreListenerNotifier.notifyEvent(listeners,
- EventType.ALTER_DATABASE,
- new AlterDatabaseEvent(oldDB, newDB, success, this, isReplicated),
- null,
- transactionalListenersResponses, ms);
+ AlterDatabaseEvent event = new AlterDatabaseEvent(oldDB, newDB,
success, this, isReplicated);
+ if (!event.shouldSkipCapturing()) {
+ MetaStoreListenerNotifier.notifyEvent(listeners,
+ EventType.ALTER_DATABASE, event, null,
transactionalListenersResponses, ms);
+ }
}
endFunction("alter_database", success, ex);
}
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AlterDatabaseEvent.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AlterDatabaseEvent.java
index fd14952abba..df42a53d0f3 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AlterDatabaseEvent.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/AlterDatabaseEvent.java
@@ -20,8 +20,14 @@ package org.apache.hadoop.hive.metastore.events;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.common.repl.ReplConst;
import org.apache.hadoop.hive.metastore.IHMSHandler;
import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
/**
* AlterDatabaseEvent.
@@ -34,15 +40,53 @@ public class AlterDatabaseEvent extends ListenerEvent {
private final Database oldDb;
private final Database newDb;
private final boolean isReplicated;
+ private final List<String> replDbProps;
public AlterDatabaseEvent(Database oldDb, Database newDb, boolean status,
IHMSHandler handler,
boolean isReplicated) {
super(status, handler);
- this.oldDb = oldDb;
- this.newDb = newDb;
+ replDbProps = MetaStoreUtils.getReplicationDbProps();
+ this.oldDb = new Database(oldDb);
+ this.newDb = new Database(newDb);
+ // Replication Specific properties should not be captured in this event.
+ // So, These props should be filtered out from both the databases;
+ filterOutReplProps(this.oldDb.getParameters());
+ filterOutReplProps(this.newDb.getParameters());
this.isReplicated = isReplicated;
}
+ private void filterOutReplProps(Map<String, String> dbProps) {
+ if (dbProps == null) {
+ return;
+ }
+ List<String> propsToRemove = new ArrayList<>();
+ for (Map.Entry<String, String> prop : dbProps.entrySet()) {
+ String propName = prop.getKey().replace("\"", "");
+ if (propName.startsWith(ReplConst.BOOTSTRAP_DUMP_STATE_KEY_PREFIX) ||
replDbProps.contains(propName)) {
+ propsToRemove.add(prop.getKey());
+ }
+ }
+ for (String key:propsToRemove) {
+ dbProps.remove(key);
+ }
+ }
+
+ /**
+ * @return whether this AlterDatabaseEvent should be logged or not.
+ * (Those AlterDatabaseEvent should not be logged which alters only the
replication properties of database.)
+ * */
+ public boolean shouldSkipCapturing() {
+ if ((oldDb.getOwnerType() == newDb.getOwnerType())
+ && oldDb.getOwnerName().equalsIgnoreCase(newDb.getOwnerName())) {
+ // If owner information is unchanged, then DB properties would've changed
+ Map<String, String> newDbProps = newDb.getParameters();
+ Map<String, String> oldDbProps = oldDb.getParameters();
+ return (newDbProps == null || newDbProps.isEmpty()) ? (oldDbProps ==
null || oldDbProps.isEmpty())
+ : newDbProps.equals(oldDbProps);
+ }
+ return false;
+ }
+
/**
* @return the old db
*/
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java
index 142dfa8dd66..1191e53b483 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java
@@ -21,7 +21,7 @@
*/
package org.apache.hadoop.hive.metastore.model;
-import org.apache.hadoop.hive.metastore.ReplChangeManager;
+import org.apache.hadoop.hive.common.repl.ReplConst;
import java.util.HashMap;
import java.util.HashSet;
@@ -174,9 +174,9 @@ public class MDatabase {
Set<String> keys = new HashSet<>(parameters.keySet());
for(String key : keys) {
// Normalize the case for source of replication parameter
- if (ReplChangeManager.SOURCE_OF_REPLICATION.equalsIgnoreCase(key)) {
+ if (ReplConst.SOURCE_OF_REPLICATION.equalsIgnoreCase(key)) {
// TODO : Some extra validation can also be added as this is a user
provided parameter.
- this.parameters.put(ReplChangeManager.SOURCE_OF_REPLICATION,
parameters.get(key));
+ this.parameters.put(ReplConst.SOURCE_OF_REPLICATION,
parameters.get(key));
} else {
this.parameters.put(key, parameters.get(key));
}