This is an automated email from the ASF dual-hosted git repository.
tchoi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 0d8ca3ca0bd HIVE-26921: Add failover_type and failover_endpoint as new
member to metadata column in replication_metrics table (#3999) (Amit Saonerkar,
reviewed by Teddy Choi)
0d8ca3ca0bd is described below
commit 0d8ca3ca0bd5d3e8ed53cf1bc4e6dfd7efa0fd3f
Author: atsaonerk <[email protected]>
AuthorDate: Fri Feb 3 13:47:52 2023 +0530
HIVE-26921: Add failover_type and failover_endpoint as new member to
metadata column in replication_metrics table (#3999) (Amit Saonerkar, reviewed
by Teddy Choi)
---
.../parse/TestReplicationOptimisedBootstrap.java | 2 +-
.../hive/ql/exec/repl/OptimisedBootstrapUtils.java | 2 +-
.../hadoop/hive/ql/exec/repl/ReplDumpTask.java | 67 +++++++++++++++-------
.../hadoop/hive/ql/exec/repl/ReplLoadTask.java | 5 --
.../incremental/IncrementalLoadTasksBuilder.java | 17 +++++-
.../hadoop/hive/ql/exec/repl/util/ReplUtils.java | 15 +++--
.../hive/ql/parse/ReplicationSemanticAnalyzer.java | 38 +++++++++---
.../OptimizedBootstrapDumpMetricCollector.java | 6 +-
.../PreOptimizedBootstrapDumpMetricCollector.java | 5 +-
.../OptimizedBootstrapLoadMetricCollector.java | 5 +-
.../PreOptimizedBootstrapLoadMetricCollector.java | 5 +-
.../repl/metric/ReplicationMetricCollector.java | 24 +++++++-
.../hive/ql/parse/repl/metric/event/Metadata.java | 12 ++++
.../metric/TestReplicationMetricCollector.java | 23 +++++---
.../repl/metric/TestReplicationMetricSink.java | 8 ++-
.../llap/replication_metrics_ingest.q.out | 4 +-
.../apache/hadoop/hive/common/repl/ReplConst.java | 10 ++++
17 files changed, 186 insertions(+), 62 deletions(-)
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java
index a55b7c8a5b4..1068fe4ecba 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java
@@ -547,7 +547,7 @@ public class TestReplicationOptimisedBootstrap extends
BaseReplicationScenariosA
// this load should throw exception
List<String> finalWithClause = withClause;
- assertThrows("Should fail with db doesn't exist exception",
HiveException.class, () -> {
+ assertThrows("Should fail with db doesn't exist exception",
SemanticException.class, () -> {
primary.load(primaryDbName, replicatedDbName, finalWithClause);
});
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/OptimisedBootstrapUtils.java
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/OptimisedBootstrapUtils.java
index 9ff0d244bbd..13ecf255718 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/OptimisedBootstrapUtils.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/OptimisedBootstrapUtils.java
@@ -88,7 +88,7 @@ public class OptimisedBootstrapUtils {
* @return true, if the database has repl.target.for property set.
* @throws HiveException
*/
- public static boolean isFailover(String dbName, Hive hive) throws
HiveException {
+ public static boolean isDbTargetOfFailover(String dbName, Hive hive) throws
HiveException {
Database database = hive.getDatabase(dbName);
return database != null ? MetaStoreUtils.isTargetOfReplication(database) :
false;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
index a2b1a900ae9..edbe52a2038 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
@@ -145,7 +145,7 @@ import static
org.apache.hadoop.hive.ql.exec.repl.OptimisedBootstrapUtils.getEve
import static
org.apache.hadoop.hive.ql.exec.repl.OptimisedBootstrapUtils.getReplEventIdFromDatabase;
import static
org.apache.hadoop.hive.ql.exec.repl.OptimisedBootstrapUtils.getTablesFromTableDiffFile;
import static
org.apache.hadoop.hive.ql.exec.repl.OptimisedBootstrapUtils.getTargetEventId;
-import static
org.apache.hadoop.hive.ql.exec.repl.OptimisedBootstrapUtils.isFailover;
+import static
org.apache.hadoop.hive.ql.exec.repl.OptimisedBootstrapUtils.isDbTargetOfFailover;
import static
org.apache.hadoop.hive.ql.exec.repl.OptimisedBootstrapUtils.isFirstIncrementalPending;
import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT;
import static
org.apache.hadoop.hive.ql.exec.repl.ReplAck.NON_RECOVERABLE_MARKER;
@@ -208,28 +208,42 @@ public class ReplDumpTask extends Task<ReplDumpWork>
implements Serializable {
}
Path previousValidHiveDumpPath =
getPreviousValidDumpMetadataPath(dumpRoot);
boolean isFailoverMarkerPresent = false;
- boolean isFailover = isFailover(work.dbNameOrPattern, getHive());
- LOG.debug("Database is {} going through failover", isFailover ? "" :
"not");
- if (previousValidHiveDumpPath == null && !isFailover) {
+ boolean isFailoverTarget = isDbTargetOfFailover(work.dbNameOrPattern,
getHive());
+ LOG.debug("Database {} is {} going through failover",
work.dbNameOrPattern, isFailoverTarget ? "" : "not");
+ if (previousValidHiveDumpPath == null && !isFailoverTarget) {
work.setBootstrap(true);
} else {
- work.setOldReplScope(isFailover ? null : new
DumpMetaData(previousValidHiveDumpPath, conf).getReplScope());
- isFailoverMarkerPresent = !isFailover &&
isDumpFailoverReady(previousValidHiveDumpPath);
+ work.setOldReplScope(isFailoverTarget ? null : new
DumpMetaData(previousValidHiveDumpPath, conf).getReplScope());
+ isFailoverMarkerPresent = !isFailoverTarget &&
isDumpFailoverReady(previousValidHiveDumpPath);
}
//Proceed with dump operation in following cases:
//1. No previous dump is present.
//2. Previous dump is already loaded and it is not in failover ready
status.
- if (shouldDump(previousValidHiveDumpPath, isFailoverMarkerPresent,
isFailover)) {
+ if (shouldDump(previousValidHiveDumpPath, isFailoverMarkerPresent,
isFailoverTarget)) {
Path currentDumpPath = getCurrentDumpPath(dumpRoot,
work.isBootstrap());
Path hiveDumpRoot = new Path(currentDumpPath,
ReplUtils.REPL_HIVE_BASE_DIR);
- if (!work.isBootstrap() && !isFailover) {
+ if (!work.isBootstrap() && !isFailoverTarget) {
preProcessFailoverIfRequired(previousValidHiveDumpPath,
isFailoverMarkerPresent);
}
+ // check if we need to create event marker
+ if (previousValidHiveDumpPath == null) {
+ createEventMarker = isFailoverTarget;
+ } else {
+ if (isFailoverTarget) {
+ boolean isEventAckFilePresent =
checkFileExists(previousValidHiveDumpPath.getParent(), conf, EVENT_ACK_FILE);
+ if (!isEventAckFilePresent) {
+ // If this is optimised bootstrap failover cycle and
_event_ack file is not present, then create it
+ createEventMarker = true;
+ }
+ }
+ }
// Set distCp custom name corresponding to the replication policy.
String mapRedCustomName = ReplUtils.getDistCpCustomName(conf,
work.dbNameOrPattern);
conf.set(JobContext.JOB_NAME, mapRedCustomName);
work.setCurrentDumpPath(currentDumpPath);
- work.setMetricCollector(initMetricCollection(work.isBootstrap(),
hiveDumpRoot, isFailover));
+ // Initialize repl dump metric collector for all replication stage
(Bootstrap, incremental, pre-optimised and optimised bootstrap)
+ ReplicationMetricCollector dumpMetricCollector =
initReplicationDumpMetricCollector(hiveDumpRoot, work.isBootstrap(),
createEventMarker /*isPreOptimisedBootstrap*/, isFailoverTarget);
+ work.setMetricCollector(dumpMetricCollector);
if (shouldDumpAtlasMetadata()) {
addAtlasDumpTask(work.isBootstrap(), previousValidHiveDumpPath);
LOG.info("Added task to dump atlas metadata.");
@@ -243,7 +257,7 @@ public class ReplDumpTask extends Task<ReplDumpWork>
implements Serializable {
Path cmRoot = new Path(conf.getVar(HiveConf.ConfVars.REPLCMDIR));
Long lastReplId;
LOG.info("Data copy at load enabled : {}",
conf.getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET));
- if (isFailover) {
+ if (isFailoverTarget) {
if (createEventMarker) {
LOG.info("Optimised Bootstrap Dump triggered for {}.",
work.dbNameOrPattern);
// Before starting optimised bootstrap, check if the first
incremental is done to ensure database is in
@@ -272,8 +286,6 @@ public class ReplDumpTask extends Task<ReplDumpWork>
implements Serializable {
assert isTableDiffDirectoryPresent;
work.setSecondDumpAfterFailover(true);
- long executorId =
conf.getLong(Constants.SCHEDULED_QUERY_EXECUTIONID, 0L);
- work.setMetricCollector(new
OptimizedBootstrapDumpMetricCollector(work.dbNameOrPattern,
dumpRoot.toString(), conf, executorId));
long fromEventId =
Long.parseLong(getEventIdFromFile(previousValidHiveDumpPath.getParent(),
conf)[1]);
LOG.info("Starting optimised bootstrap from event id {} for
database {}", fromEventId,
work.dbNameOrPattern);
@@ -617,7 +629,6 @@ public class ReplDumpTask extends Task<ReplDumpWork>
implements Serializable {
* skip doing any further dump.
*/
if (previousDumpPath == null) {
- createEventMarker = isFailover;
return true;
} else if (isFailoverMarkerPresent && shouldFailover()) {
return false;
@@ -632,7 +643,6 @@ public class ReplDumpTask extends Task<ReplDumpWork>
implements Serializable {
// we need to trigger the failover dump
LOG.debug("EVENT_ACK file not found in {}. Proceeding with
OptimisedBootstrap Failover",
previousDumpPath.getParent());
- createEventMarker = true;
return true;
}
// Event_ACK file is present check if it contains correct value or not.
@@ -879,8 +889,16 @@ public class ReplDumpTask extends Task<ReplDumpWork>
implements Serializable {
if (size > 0) {
metricMap.put(ReplUtils.MetricName.TABLES.name(), (long)
tablesForBootstrap.size());
}
- if (conf.getBoolVar(HiveConf.ConfVars.HIVE_REPL_FAILOVER_START)) {
- work.getMetricCollector().reportFailoverStart(getName(), metricMap,
work.getFailoverMetadata());
+ if (shouldFailover()) {
+ Map<String, String> params = db.getParameters();
+ String dbFailoverEndPoint = "";
+ if (params != null) {
+ dbFailoverEndPoint = params.get(ReplConst.REPL_FAILOVER_ENDPOINT);
+ LOG.debug("Replication Metrics: setting failover endpoint to {} ",
dbFailoverEndPoint);
+ } else {
+ LOG.warn("Replication Metrics: Cannot obtained failover endpoint
info, setting failover endpoint to null ");
+ }
+ work.getMetricCollector().reportFailoverStart(getName(), metricMap,
work.getFailoverMetadata(), dbFailoverEndPoint,
ReplConst.FailoverType.PLANNED.toString());
} else {
work.getMetricCollector().reportStageStart(getName(), metricMap);
}
@@ -1071,17 +1089,24 @@ public class ReplDumpTask extends Task<ReplDumpWork>
implements Serializable {
}
}
- private ReplicationMetricCollector initMetricCollection(boolean isBootstrap,
Path dumpRoot, boolean isFailover) {
+ private ReplicationMetricCollector initReplicationDumpMetricCollector(Path
dumpRoot, boolean isBootstrap, boolean isPreOptimisedBootstrap, boolean
isFailover) throws HiveException {
ReplicationMetricCollector collector;
long executorId = conf.getLong(Constants.SCHEDULED_QUERY_EXECUTIONID, 0L);
if (isBootstrap) {
collector = new BootstrapDumpMetricCollector(work.dbNameOrPattern,
dumpRoot.toString(), conf, executorId);
- } else {
- if (isFailover) {
- collector = new
PreOptimizedBootstrapDumpMetricCollector(work.dbNameOrPattern,
dumpRoot.toString(), conf, executorId);
+ } else if (isFailover) {
+ // db property ReplConst.FAILOVER_ENDPOINT is only set during planned
failover.
+ String failoverType =
MetaStoreUtils.isDbBeingFailedOver(getHive().getDatabase(work.dbNameOrPattern))
?
+ ReplConst.FailoverType.PLANNED.toString() :
ReplConst.FailoverType.UNPLANNED.toString();
+ if (isPreOptimisedBootstrap) {
+ collector = new
PreOptimizedBootstrapDumpMetricCollector(work.dbNameOrPattern,
dumpRoot.toString(), conf, executorId,
+ MetaStoreUtils.FailoverEndpoint.SOURCE.toString(), failoverType);
} else {
- collector = new IncrementalDumpMetricCollector(work.dbNameOrPattern,
dumpRoot.toString(), conf, executorId);
+ collector = new
OptimizedBootstrapDumpMetricCollector(work.dbNameOrPattern,
dumpRoot.toString(), conf, executorId,
+ MetaStoreUtils.FailoverEndpoint.SOURCE.toString(), failoverType);
}
+ } else {
+ collector = new IncrementalDumpMetricCollector(work.dbNameOrPattern,
dumpRoot.toString(), conf, executorId);
}
return collector;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java
index 83fda3a22b9..f577b169d44 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java
@@ -761,11 +761,6 @@ public class ReplLoadTask extends Task<ReplLoadWork>
implements Serializable {
}
Database targetDb = getHive().getDatabase(work.dbNameToLoadIn);
Map<String, String> props = new HashMap<>();
-
- if (targetDb == null) {
- throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS,
work.dbNameToLoadIn);
- }
-
// check if db is set READ_ONLY, if not then set it. Basically this
ensures backward
// compatibility.
if (!isDbReadOnly(targetDb) && isReadOnlyHookRegistered()) {
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java
index 31a53054028..c04f8c9465a 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.exec.repl.incremental;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.repl.ReplConst;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.ql.Context;
@@ -99,8 +100,22 @@ public class IncrementalLoadTasksBuilder {
metricMap.put(ReplUtils.MetricName.EVENTS.name(), (long)
iterator.getNumEvents());
this.shouldFailover = shouldFailover;
if (shouldFailover) {
+ Database db = null;
+ try {
+ db = Hive.get().getDatabase(dbName);
+ } catch (HiveException e) {
+ throw new RuntimeException(e);
+ }
+ String dbFailoverEndPoint = "";
+ if (db != null) {
+ Map<String, String> params = db.getParameters();
+ if (params != null) {
+ dbFailoverEndPoint = params.get(ReplConst.REPL_FAILOVER_ENDPOINT);
+ }
+ }
this.metricCollector.reportFailoverStart("REPL_LOAD", metricMap,
- new FailoverMetaData(new Path(dumpDirectory,
ReplUtils.REPL_HIVE_BASE_DIR), conf));
+ new FailoverMetaData(new Path(dumpDirectory,
ReplUtils.REPL_HIVE_BASE_DIR), conf),
+ dbFailoverEndPoint, ReplConst.FailoverType.PLANNED.toString());
} else {
//Registering table metric as we do boostrap of selective tables
// in second load cycle of optimized bootstrap
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
index 607804449b9..b884030723d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java
@@ -534,13 +534,20 @@ public class ReplUtils {
}
}
+ /**
+ * Used to report status of replication stage which is skipped or has some
error
+ * @param stageName Name of replication stage
+ * @param status Status skipped or FAILED etc
+ * @param errorLogPath path of error log file
+ * @param conf handle configuration parameter
+ * @param dbName name of database
+ * @param replicationType type of replication incremental, bootstrap, etc
+ * @throws SemanticException
+ */
public static void reportStatusInReplicationMetrics(String stageName, Status
status, String errorLogPath,
HiveConf conf, String
dbName, Metadata.ReplicationType replicationType)
throws SemanticException {
- ReplicationMetricCollector metricCollector;
- metricCollector =
- new ReplicationMetricCollector(dbName, replicationType, null, 0,
conf) {};
-
+ ReplicationMetricCollector metricCollector = new
ReplicationMetricCollector(dbName, replicationType, null, 0, conf) {};
metricCollector.reportStageStart(stageName, new HashMap<>());
metricCollector.reportStageEnd(stageName, status, errorLogPath);
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
index aad3a8aac4c..65c5c344555 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
@@ -360,11 +360,12 @@ public class ReplicationSemanticAnalyzer extends
BaseSemanticAnalyzer {
LOG.debug("{} contains an bootstrap dump", loadPath);
}
+ ReplicationMetricCollector metricCollector =
initReplicationLoadMetricCollector(loadPath.toString(), replScope.getDbName(),
dmd);
ReplLoadWork replLoadWork = new ReplLoadWork(conf,
loadPath.toString(), sourceDbNameOrPattern,
replScope.getDbName(),
dmd.getReplScope(),
queryState.getLineageState(), evDump, dmd.getEventTo(),
dmd.getDumpExecutionId(),
- initMetricCollection(loadPath.toString(),
replScope.getDbName(), dmd), dmd.isReplScopeModified());
+ metricCollector, dmd.isReplScopeModified());
rootTasks.add(TaskFactory.get(replLoadWork, conf));
if (dmd.isPreOptimizedBootstrapDump()) {
dmd.setOptimizedBootstrapToDumpMetadataFile();
@@ -379,14 +380,35 @@ public class ReplicationSemanticAnalyzer extends
BaseSemanticAnalyzer {
}
}
- private ReplicationMetricCollector initMetricCollection(String dumpDirectory,
- String
dbNameToLoadIn, DumpMetaData dmd) throws SemanticException {
-
+ private ReplicationMetricCollector initReplicationLoadMetricCollector(String
dumpDirectory, String dbNameToLoadIn,
+
DumpMetaData dmd) throws SemanticException {
ReplicationMetricCollector collector;
- if (dmd.isPreOptimizedBootstrapDump()) {
- collector = new PreOptimizedBootstrapLoadMetricCollector(dbNameToLoadIn,
dumpDirectory, dmd.getDumpExecutionId(), conf);
- } else if (dmd.isOptimizedBootstrapDump()) {
- collector = new OptimizedBootstrapLoadMetricCollector(dbNameToLoadIn,
dumpDirectory, dmd.getDumpExecutionId(), conf);
+ if (dmd.isPreOptimizedBootstrapDump() || dmd.isOptimizedBootstrapDump()) {
+ Database dbToLoad = null;
+ try {
+ dbToLoad = db.getDatabase(dbNameToLoadIn);
+ } catch (HiveException e) {
+ throw new SemanticException(e.getMessage(), e);
+ }
+ if (dbToLoad == null) {
+ throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS,
dbNameToLoadIn);
+ }
+ // db property ReplConst.FAILOVER_ENDPOINT is only set during planned
failover.
+ String failoverType = "";
+ try {
+ // check whether ReplConst.FAILOVER_ENDPOINT is set
+ failoverType =
MetaStoreUtils.isDbBeingFailedOver(db.getDatabase(dbNameToLoadIn)) ?
ReplConst.FailoverType.PLANNED.toString() :
ReplConst.FailoverType.UNPLANNED.toString();
+ } catch (HiveException e) {
+ throw new RuntimeException(e);
+ }
+ if (dmd.isPreOptimizedBootstrapDump()) {
+ collector = new
PreOptimizedBootstrapLoadMetricCollector(dbNameToLoadIn, dumpDirectory,
dmd.getDumpExecutionId(), conf,
+ MetaStoreUtils.FailoverEndpoint.TARGET.toString(),
failoverType);
+ } else {
+ // db property ReplConst.FAILOVER_ENDPOINT is only set during planned
failover.
+ collector = new OptimizedBootstrapLoadMetricCollector(dbNameToLoadIn,
dumpDirectory, dmd.getDumpExecutionId(), conf,
+ MetaStoreUtils.FailoverEndpoint.TARGET.toString(),
failoverType);
+ }
} else if (dmd.isBootstrapDump()) {
collector = new BootstrapLoadMetricCollector(dbNameToLoadIn,
dumpDirectory, dmd.getDumpExecutionId(), conf);
} else {
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/OptimizedBootstrapDumpMetricCollector.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/OptimizedBootstrapDumpMetricCollector.java
index ac88cbb541b..b85ab2e8d23 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/OptimizedBootstrapDumpMetricCollector.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/OptimizedBootstrapDumpMetricCollector.java
@@ -23,7 +23,9 @@ import
org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata;
public class OptimizedBootstrapDumpMetricCollector extends
ReplicationMetricCollector {
- public OptimizedBootstrapDumpMetricCollector(String dbName, String
stagingDir, HiveConf conf, Long executorId) {
- super(dbName, Metadata.ReplicationType.OPTIMIZED_BOOTSTRAP, stagingDir,
executorId, conf);
+ public OptimizedBootstrapDumpMetricCollector(String dbName, String
stagingDir, HiveConf conf, Long executorId,
+ String failoverEndPoint, String
failoverType) {
+ super(dbName, Metadata.ReplicationType.OPTIMIZED_BOOTSTRAP, stagingDir,
executorId, conf, failoverEndPoint,
+ failoverType);
}
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/PreOptimizedBootstrapDumpMetricCollector.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/PreOptimizedBootstrapDumpMetricCollector.java
index 0684ba9d9de..47bccd6f78a 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/PreOptimizedBootstrapDumpMetricCollector.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/metric/PreOptimizedBootstrapDumpMetricCollector.java
@@ -23,7 +23,8 @@ import
org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata;
public class PreOptimizedBootstrapDumpMetricCollector extends
ReplicationMetricCollector {
- public PreOptimizedBootstrapDumpMetricCollector(String dbName, String
stagingDir, HiveConf conf, Long executorId) {
- super(dbName, Metadata.ReplicationType.PRE_OPTIMIZED_BOOTSTRAP,
stagingDir, executorId, conf);
+ public PreOptimizedBootstrapDumpMetricCollector(String dbName, String
stagingDir, HiveConf conf, Long executorId,
+ String failoverEndpoint,
String failOverType) {
+ super(dbName, Metadata.ReplicationType.PRE_OPTIMIZED_BOOTSTRAP,
stagingDir, executorId, conf, failoverEndpoint, failOverType);
}
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/OptimizedBootstrapLoadMetricCollector.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/OptimizedBootstrapLoadMetricCollector.java
index 8a582772804..38fdde6314b 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/OptimizedBootstrapLoadMetricCollector.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/OptimizedBootstrapLoadMetricCollector.java
@@ -26,7 +26,8 @@ import
org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata;
* Bootstrap Load Metric Collector
*/
public class OptimizedBootstrapLoadMetricCollector extends
ReplicationMetricCollector {
- public OptimizedBootstrapLoadMetricCollector(String dbName, String
stagingDir, long dumpExecutionId, HiveConf conf) {
- super(dbName, Metadata.ReplicationType.OPTIMIZED_BOOTSTRAP, stagingDir,
dumpExecutionId, conf);
+ public OptimizedBootstrapLoadMetricCollector(String dbName, String
stagingDir, long dumpExecutionId, HiveConf conf,
+ String failoverEndpoint, String
failoverType) {
+ super(dbName, Metadata.ReplicationType.OPTIMIZED_BOOTSTRAP, stagingDir,
dumpExecutionId, conf, failoverEndpoint, failoverType);
}
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/PreOptimizedBootstrapLoadMetricCollector.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/PreOptimizedBootstrapLoadMetricCollector.java
index 3308550c0a3..67bb4bf2729 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/PreOptimizedBootstrapLoadMetricCollector.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/metric/PreOptimizedBootstrapLoadMetricCollector.java
@@ -23,7 +23,8 @@ import
org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata;
public class PreOptimizedBootstrapLoadMetricCollector extends
ReplicationMetricCollector {
- public PreOptimizedBootstrapLoadMetricCollector(String dbName, String
stagingDir, long dumpExecutionId, HiveConf conf) {
- super(dbName, Metadata.ReplicationType.PRE_OPTIMIZED_BOOTSTRAP,
stagingDir, dumpExecutionId, conf);
+ public PreOptimizedBootstrapLoadMetricCollector(String dbName, String
stagingDir, long dumpExecutionId, HiveConf conf,
+ String failoverEndpoint,
String failoverType) {
+ super(dbName, Metadata.ReplicationType.PRE_OPTIMIZED_BOOTSTRAP,
stagingDir, dumpExecutionId, conf, failoverEndpoint, failoverType);
}
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java
index 5e5639bbefe..14e01a400f6 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hive.ql.parse.repl.metric;
-import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hive.conf.Constants;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -74,6 +73,24 @@ public abstract class ReplicationMetricCollector {
}
}
+ public ReplicationMetricCollector(String dbName, Metadata.ReplicationType
replicationType,
+ String stagingDir, long dumpExecutionId,
HiveConf conf,
+ String failoverEndpoint, String
failoverType) {
+ this.conf = conf;
+ checkEnabledForTests(conf);
+ String policy = conf.get(Constants.SCHEDULED_QUERY_SCHEDULENAME);
+ long executionId = conf.getLong(Constants.SCHEDULED_QUERY_EXECUTIONID, 0L);
+ if (!StringUtils.isEmpty(policy) && executionId > 0) {
+ isEnabled = true;
+ metricCollector = MetricCollector.getInstance().init(conf);
+ MetricSink.getInstance().init(conf);
+ Metadata metadata = new Metadata(dbName, replicationType,
getStagingDir(stagingDir));
+ metadata.setFailoverEndPoint(failoverEndpoint);
+ metadata.setFailoverType(failoverType);
+ replicationMetric = new ReplicationMetric(executionId, policy,
dumpExecutionId, metadata);
+ }
+ }
+
public void reportStageStart(String stageName, Map<String, Long> metricMap)
throws SemanticException {
if (isEnabled) {
LOG.debug("Stage Started {}, {}, {}", stageName, metricMap.size(),
metricMap );
@@ -90,7 +107,8 @@ public abstract class ReplicationMetricCollector {
}
public void reportFailoverStart(String stageName, Map<String, Long>
metricMap,
- FailoverMetaData failoverMd) throws
SemanticException {
+ FailoverMetaData failoverMd, String
failoverEndpoint,
+ String failoverType) throws
SemanticException {
if (isEnabled) {
LOG.info("Failover Stage Started {}, {}, {}", stageName,
metricMap.size(), metricMap);
Progress progress = replicationMetric.getProgress();
@@ -104,6 +122,8 @@ public abstract class ReplicationMetricCollector {
Metadata metadata = replicationMetric.getMetadata();
metadata.setFailoverMetadataLoc(failoverMd.getFilePath());
metadata.setFailoverEventId(failoverMd.getFailoverEventId());
+ metadata.setFailoverEndPoint(failoverEndpoint);
+ metadata.setFailoverType(failoverType);
replicationMetric.setMetadata(metadata);
metricCollector.addMetric(replicationMetric);
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metadata.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metadata.java
index 51bcd9434b4..aed7f95d22a 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metadata.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Metadata.java
@@ -30,12 +30,15 @@ public class Metadata {
PRE_OPTIMIZED_BOOTSTRAP,
OPTIMIZED_BOOTSTRAP
}
+
private String dbName;
private ReplicationType replicationType;
private String stagingDir;
private long lastReplId;
private String failoverMetadataLoc;
private long failoverEventId;
+ private String failoverEndPoint;
+ private String failoverType;
public Metadata() {
@@ -48,6 +51,8 @@ public class Metadata {
this.lastReplId = metadata.lastReplId;
this.failoverMetadataLoc = metadata.failoverMetadataLoc;
this.failoverEventId = metadata.failoverEventId;
+ this.failoverEndPoint = metadata.failoverEndPoint;
+ this.failoverType = metadata.failoverType;
}
public Metadata(String dbName, ReplicationType replicationType, String
stagingDir) {
@@ -92,4 +97,11 @@ public class Metadata {
this.failoverEventId = failoverEventId;
}
+ public String getFailoverEndPoint() { return failoverEndPoint; }
+
+ public void setFailoverEndPoint(String endpoint) { this.failoverEndPoint =
endpoint; }
+
+ public String getFailoverType() { return failoverType; }
+
+ public void setFailoverType(String type) { this.failoverType = type; }
}
diff --git
a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java
b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java
index 8ac2896ccf6..d237027f181 100644
---
a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java
+++
b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java
@@ -18,10 +18,12 @@
package org.apache.hadoop.hive.ql.parse.repl.metric;
+import org.apache.hadoop.hive.common.repl.ReplConst;
import org.apache.hadoop.hive.conf.Constants;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
import org.apache.hadoop.hive.metastore.messaging.MessageSerializer;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker;
import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
import org.apache.hadoop.hive.ql.exec.repl.util.SnapshotUtils;
@@ -30,10 +32,10 @@ import org.apache.hadoop.hive.ql.parse.repl.DumpType;
import
org.apache.hadoop.hive.ql.parse.repl.dump.metric.BootstrapDumpMetricCollector;
import
org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector;
import
org.apache.hadoop.hive.ql.parse.repl.dump.metric.OptimizedBootstrapDumpMetricCollector;
+import
org.apache.hadoop.hive.ql.parse.repl.dump.metric.PreOptimizedBootstrapDumpMetricCollector;
import org.apache.hadoop.hive.ql.parse.repl.load.FailoverMetaData;
import
org.apache.hadoop.hive.ql.parse.repl.load.metric.BootstrapLoadMetricCollector;
import
org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector;
-import
org.apache.hadoop.hive.ql.parse.repl.load.metric.PreOptimizedBootstrapLoadMetricCollector;
import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status;
import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric;
import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata;
@@ -235,8 +237,8 @@ public class TestReplicationMetricCollector {
@Test
public void testSuccessPreOptimizedBootstrapDumpMetrics() throws Exception {
- ReplicationMetricCollector preOptimizedBootstrapDumpMetricCollector = new
PreOptimizedBootstrapLoadMetricCollector("db",
- "dummyDir",-1, conf);
+ ReplicationMetricCollector preOptimizedBootstrapDumpMetricCollector = new
PreOptimizedBootstrapDumpMetricCollector("db",
+ "dummyDir", conf, (long) -1,
MetaStoreUtils.FailoverEndpoint.SOURCE.toString(),
ReplConst.FailoverType.UNPLANNED.toString());
Map<String, Long> metricMap = new HashMap<>();
metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 0);
metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 0);
@@ -253,6 +255,8 @@ public class TestReplicationMetricCollector {
Metadata expectedMetadata = new Metadata("db",
Metadata.ReplicationType.PRE_OPTIMIZED_BOOTSTRAP, "dummyDir");
expectedMetadata.setLastReplId(-1);
+
expectedMetadata.setFailoverEndPoint(MetaStoreUtils.FailoverEndpoint.SOURCE.toString());
+
expectedMetadata.setFailoverType(ReplConst.FailoverType.UNPLANNED.toString());
Progress expectedProgress = new Progress();
expectedProgress.setStatus(Status.SUCCESS);
Stage dumpStage = new Stage("dump", Status.SUCCESS, 0);
@@ -270,13 +274,10 @@ public class TestReplicationMetricCollector {
Arrays.asList(ReplUtils.MetricName.TABLES.name(),
ReplUtils.MetricName.FUNCTIONS.name()));
}
-
-
-
@Test
public void testSuccessOptimizedBootstrapDumpMetrics() throws Exception {
ReplicationMetricCollector optimizedBootstrapDumpMetricCollector = new
OptimizedBootstrapDumpMetricCollector("db",
- "dummyDir", conf, 0L);
+ "dummyDir", conf, 0L,
MetaStoreUtils.FailoverEndpoint.SOURCE.toString(),
ReplConst.FailoverType.UNPLANNED.toString());
Map<String, Long> metricMap = new HashMap<>();
metricMap.put(ReplUtils.MetricName.TABLES.name(), (long) 10);
metricMap.put(ReplUtils.MetricName.FUNCTIONS.name(), (long) 1);
@@ -298,6 +299,8 @@ public class TestReplicationMetricCollector {
Metadata expectedMetadata = new Metadata("db",
Metadata.ReplicationType.OPTIMIZED_BOOTSTRAP, "dummyDir");
expectedMetadata.setLastReplId(10);
+
expectedMetadata.setFailoverEndPoint(MetaStoreUtils.FailoverEndpoint.SOURCE.toString());
+
expectedMetadata.setFailoverType(ReplConst.FailoverType.UNPLANNED.toString());
Progress expectedProgress = new Progress();
expectedProgress.setStatus(Status.SUCCESS);
Stage dumpStage = new Stage("dump", Status.SUCCESS, 0);
@@ -322,7 +325,7 @@ public class TestReplicationMetricCollector {
"dummyDir", conf, 0L);
Map<String, Long> metricMap = new HashMap<>();
metricMap.put(ReplUtils.MetricName.EVENTS.name(), (long) 10);
- incrDumpMetricCollector.reportFailoverStart("dump", metricMap, fmd);
+ incrDumpMetricCollector.reportFailoverStart("dump", metricMap, fmd,
MetaStoreUtils.FailoverEndpoint.SOURCE.toString(),
ReplConst.FailoverType.PLANNED.toString());
incrDumpMetricCollector.reportStageProgress("dump",
ReplUtils.MetricName.EVENTS.name(), 2);
List<ReplicationMetric> actualMetrics =
MetricCollector.getInstance().getMetrics();
Assert.assertEquals(1, actualMetrics.size());
@@ -337,6 +340,8 @@ public class TestReplicationMetricCollector {
expectedMetadata.setLastReplId(10);
expectedMetadata.setFailoverEventId(10);
expectedMetadata.setFailoverMetadataLoc("dummyDir");
+
expectedMetadata.setFailoverEndPoint(MetaStoreUtils.FailoverEndpoint.SOURCE.toString());
+
expectedMetadata.setFailoverType(ReplConst.FailoverType.PLANNED.toString());
Progress expectedProgress = new Progress();
expectedProgress.setStatus(Status.FAILOVER_READY);
Stage dumpStage = new Stage("dump", Status.SUCCESS, 0);
@@ -448,6 +453,8 @@ public class TestReplicationMetricCollector {
Assert.assertEquals(expected.getMetadata().getDbName(),
actual.getMetadata().getDbName());
Assert.assertEquals(expected.getMetadata().getStagingDir(),
actual.getMetadata().getStagingDir());
Assert.assertEquals(expected.getMetadata().getLastReplId(),
actual.getMetadata().getLastReplId());
+ Assert.assertEquals(expected.getMetadata().getFailoverEndPoint(),
actual.getMetadata().getFailoverEndPoint());
+ Assert.assertEquals(expected.getMetadata().getFailoverType(),
actual.getMetadata().getFailoverType());
Assert.assertEquals(expected.getProgress().getStatus(),
actual.getProgress().getStatus());
Assert.assertEquals(expected.getProgress().getStageByName(stageName).getStatus(),
actual.getProgress().getStageByName(stageName).getStatus());
diff --git
a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricSink.java
b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricSink.java
index 5059f9af472..2d1b691e36a 100644
---
a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricSink.java
+++
b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricSink.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.parse.repl.metric;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hive.common.repl.ReplConst;
import org.apache.hadoop.hive.conf.Constants;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.GetReplicationMetricsRequest;
@@ -27,6 +28,7 @@ import
org.apache.hadoop.hive.metastore.api.ReplicationMetricList;
import org.apache.hadoop.hive.metastore.api.ReplicationMetrics;
import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer;
import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker;
import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
import org.apache.hadoop.hive.ql.exec.repl.util.SnapshotUtils;
@@ -220,7 +222,7 @@ public class TestReplicationMetricSink {
"testAcidTablesReplLoadBootstrapIncr_1592205875387", stagingDir,
conf, 0L);
metricMap = new HashMap<String,
Long>(){{put(ReplUtils.MetricName.EVENTS.name(), (long) 10);}};
- failoverDumpMetricCollector.reportFailoverStart("dump", metricMap, fmd);
+ failoverDumpMetricCollector.reportFailoverStart("dump", metricMap, fmd,
MetaStoreUtils.FailoverEndpoint.SOURCE.toString(),
ReplConst.FailoverType.PLANNED.toString());
failoverDumpMetricCollector.reportStageProgress("dump",
ReplUtils.MetricName.EVENTS.name(), 10);
failoverDumpMetricCollector.reportStageEnd("dump", Status.SUCCESS, 10, new
SnapshotUtils.ReplSnapshotCount(),
new ReplStatsTracker(0));
@@ -231,6 +233,8 @@ public class TestReplicationMetricSink {
expectedMetadata.setLastReplId(10);
expectedMetadata.setFailoverEventId(100);
expectedMetadata.setFailoverMetadataLoc(stagingDir +
FailoverMetaData.FAILOVER_METADATA);
+
expectedMetadata.setFailoverEndPoint(MetaStoreUtils.FailoverEndpoint.SOURCE.toString());
+
expectedMetadata.setFailoverType(ReplConst.FailoverType.PLANNED.toString());
expectedProgress = new Progress();
expectedProgress.setStatus(Status.FAILOVER_READY);
dumpStage = new Stage("dump", Status.SUCCESS, 0);
@@ -302,6 +306,8 @@ public class TestReplicationMetricSink {
Assert.assertEquals(expected.getMetadata().getDbName(),
actual.getMetadata().getDbName());
Assert.assertEquals(expected.getMetadata().getStagingDir(),
actual.getMetadata().getStagingDir());
Assert.assertEquals(expected.getMetadata().getLastReplId(),
actual.getMetadata().getLastReplId());
+ Assert.assertEquals(expected.getMetadata().getFailoverEndPoint(),
actual.getMetadata().getFailoverEndPoint());
+ Assert.assertEquals(expected.getMetadata().getFailoverType(),
actual.getMetadata().getFailoverType());
Assert.assertEquals(expected.getProgress().getStatus(),
actual.getProgress().getStatus());
Assert.assertEquals(expected.getProgress().getStageByName(stageName).getStatus(),
actual.getProgress().getStageByName(stageName).getStatus());
diff --git
a/ql/src/test/results/clientpositive/llap/replication_metrics_ingest.q.out
b/ql/src/test/results/clientpositive/llap/replication_metrics_ingest.q.out
index 8bf7c4962cd..4b6a3ab5b03 100644
--- a/ql/src/test/results/clientpositive/llap/replication_metrics_ingest.q.out
+++ b/ql/src/test/results/clientpositive/llap/replication_metrics_ingest.q.out
@@ -92,5 +92,5 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@replication_metrics
POSTHOOK: Input: sys@replication_metrics_orig
#### A masked pattern was here ####
-repl1 1
{"dbName":"src","replicationType":"BOOTSTRAP","stagingDir":"dummyDir","lastReplId":0,"failoverMetadataLoc":null,"failoverEventId":0}
H4sIAAAAAAAAAG2PwQ6CMBBE/2XPHOTKTSsmJojEwskQ02gDJKUl2+2J9N8tEohEb7sz83ayI1gS5CwkwCvGUs4hmqRGBuk+gha9DN4tLbLHsboUs/sHQCq7KbqLQOrXOveSsHtubp2qnJXnaz6BT4coNTHjNH3yZEioZfXRCpX7Q5b+EvGWiH0d6hENZqYpBLWQaKdUBCgHxbUYbGsW9MsID9lZ8LV/A7NIwGISAQAA
{"status":"SUCCESS","stages":[{"name":"REPL_DUMP","status":"SUCCESS","startTime":0,"endTime":0,"metrics":[{"name":
[...]
-repl2 1
{"dbName":"destination","replicationType":"BOOTSTRAP","stagingDir":"dummyDir","lastReplId":0,"failoverMetadataLoc":null,"failoverEventId":0}
H4sIAAAAAAAAAG2PwQqDMBBE/yVnD/XqzUYLBbFS9VSkBF1UiImsm5Pk3xu1CtLedmb3zbAzm0iQmVjA8pLzOM+Zt1gtOOs1MyUGcLtnnCXv5BFG2/YPgFT0y+nFY6CaYx6AsK9PWbcy5cX9kS5gbRBBEddG0XpPmoTcpfUOqAivSfxL+GfCt5WrR9SY6DYT1LFAGSk9hjDKXIlx6vSOumgzcARB0KzVTkYgYZP2y7hfpy3EVvYDvpfiNy0BAAA=
{"status":"SUCCESS","stages":[{"name":"REPL_LOAD","status":"SUCCESS","startTime":0,"en
[...]
+repl1 1
{"dbName":"src","replicationType":"BOOTSTRAP","stagingDir":"dummyDir","lastReplId":0,"failoverMetadataLoc":null,"failoverEventId":0,"failoverEndPoint":null,"failoverType":null}
H4sIAAAAAAAAAG2PwQ6CMBBE/2XPHOTKTSsmJojEwskQ02gDJKUl2+2J9N8tEohEb7sz83ayI1gS5CwkwCvGUs4hmqRGBuk+gha9DN4tLbLHsboUs/sHQCq7KbqLQOrXOveSsHtubp2qnJXnaz6BT4coNTHjNH3yZEioZfXRCpX7Q5b+EvGWiH0d6hENZqYpBLWQaKdUBCgHxbUYbGsW9MsID9lZ8LV/A7NIwGISAQAA
{"status":"SUCCESS","stages":[{"name":"REPL_DUMP","status":"SUCCESS"," [...]
+repl2 1
{"dbName":"destination","replicationType":"BOOTSTRAP","stagingDir":"dummyDir","lastReplId":0,"failoverMetadataLoc":null,"failoverEventId":0,"failoverEndPoint":null,"failoverType":null}
H4sIAAAAAAAAAG2PwQqDMBBE/yVnD/XqzUYLBbFS9VSkBF1UiImsm5Pk3xu1CtLedmb3zbAzm0iQmVjA8pLzOM+Zt1gtOOs1MyUGcLtnnCXv5BFG2/YPgFT0y+nFY6CaYx6AsK9PWbcy5cX9kS5gbRBBEddG0XpPmoTcpfUOqAivSfxL+GfCt5WrR9SY6DYT1LFAGSk9hjDKXIlx6vSOumgzcARB0KzVTkYgYZP2y7hfpy3EVvYDvpfiNy0BAAA=
{"status":"SUCCESS","stages":[{"name":"REP [...]
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/repl/ReplConst.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/repl/ReplConst.java
index 897be7fa75a..9641149f6cc 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/repl/ReplConst.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/repl/ReplConst.java
@@ -47,6 +47,8 @@ public class ReplConst {
/**
* Database level prop to identify the failover endPoint of the database.
+ * It is set during planned failover and unset or removed after optimised
+ * bootstrap is completed. During unplanned failover this prop is not set
* */
public static final String REPL_FAILOVER_ENDPOINT = "repl.failover.endpoint";
@@ -82,4 +84,12 @@ public class ReplConst {
public static final String BOOTSTRAP_DUMP_STATE_KEY_PREFIX =
"bootstrap.dump.state.";
public static final String READ_ONLY_HOOK =
"org.apache.hadoop.hive.ql.hooks.EnforceReadOnlyDatabaseHook";
+
+ /**
+ * Type of failover
+ */
+ public enum FailoverType {
+ PLANNED,
+ UNPLANNED;
+ }
}