This is an automated email from the ASF dual-hosted git repository.
avijayan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
The following commit(s) were added to refs/heads/master by this push:
new ada3b3b HDDS-2848. Recon changes to make snapshots work with OM HA.
(#666)
ada3b3b is described below
commit ada3b3be094a02200f41dac25367cb2a4ebc29ed
Author: Siddharth <[email protected]>
AuthorDate: Sat Mar 14 17:05:37 2020 -0700
HDDS-2848. Recon changes to make snapshots work with OM HA. (#666)
* HDDS-2848. Recon changes to make snapshots work with OM HA.
* HDDS-2848. Checkstyle fixes.
* HDDS-2848. Checkstyle fix.
* Review comment addressed.
* Unused import.
* Verify snapshot is synced to Recon.
* Minor whitespace fix.
* trigger new CI check
---
.../apache/hadoop/ozone/MiniOzoneClusterImpl.java | 15 +--
.../hadoop/ozone/MiniOzoneHAClusterImpl.java | 19 ++-
.../ozone/recon/TestReconWithOzoneManagerHA.java | 142 +++++++++++++++++++++
.../hadoop/ozone/recon/ReconControllerModule.java | 5 +-
.../org/apache/hadoop/ozone/recon/ReconServer.java | 5 +
.../org/apache/hadoop/ozone/recon/ReconUtils.java | 3 +-
.../recon/recovery/ReconOmMetadataManagerImpl.java | 3 +-
.../spi/impl/OzoneManagerServiceProviderImpl.java | 50 ++++++--
8 files changed, 214 insertions(+), 28 deletions(-)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 03ada7e..8fdc489 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -144,10 +144,11 @@ public class MiniOzoneClusterImpl implements
MiniOzoneCluster {
* @param hddsDatanodes
*/
MiniOzoneClusterImpl(OzoneConfiguration conf, StorageContainerManager scm,
- List<HddsDatanodeService> hddsDatanodes) {
+ List<HddsDatanodeService> hddsDatanodes, ReconServer reconServer) {
this.conf = conf;
this.scm = scm;
this.hddsDatanodes = hddsDatanodes;
+ this.reconServer = reconServer;
}
public OzoneConfiguration getConf() {
@@ -517,14 +518,8 @@ public class MiniOzoneClusterImpl implements
MiniOzoneCluster {
hddsDatanodes = createHddsDatanodes(scm);
- MiniOzoneClusterImpl cluster;
-
- if (includeRecon) {
- cluster = new MiniOzoneClusterImpl(conf, om, scm, hddsDatanodes,
- reconServer);
- } else {
- cluster = new MiniOzoneClusterImpl(conf, om, scm, hddsDatanodes);
- }
+ MiniOzoneClusterImpl cluster = new MiniOzoneClusterImpl(conf, om, scm,
+ hddsDatanodes, reconServer);
cluster.setCAClient(certClient);
if (startDataNodes) {
@@ -768,7 +763,7 @@ public class MiniOzoneClusterImpl implements
MiniOzoneCluster {
GenericTestUtils.setRootLogLevel(Level.INFO);
}
- private void configureRecon() throws IOException {
+ protected void configureRecon() throws IOException {
ConfigurationProvider.resetConfiguration();
TemporaryFolder tempFolder = new TemporaryFolder();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
index d232b86..639c6f7 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMStorage;
import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.recon.ReconServer;
import
org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -70,7 +71,7 @@ public final class MiniOzoneHAClusterImpl extends
MiniOzoneClusterImpl {
*
* @throws IOException if there is an I/O error
*/
-
+ @SuppressWarnings("checkstyle:ParameterNumber")
private MiniOzoneHAClusterImpl(
OzoneConfiguration conf,
Map<String, OzoneManager> omMap,
@@ -78,8 +79,9 @@ public final class MiniOzoneHAClusterImpl extends
MiniOzoneClusterImpl {
List<OzoneManager> inactiveOMList,
StorageContainerManager scm,
List<HddsDatanodeService> hddsDatanodes,
- String omServiceId) {
- super(conf, scm, hddsDatanodes);
+ String omServiceId,
+ ReconServer reconServer) {
+ super(conf, scm, hddsDatanodes, reconServer);
this.ozoneManagerMap = omMap;
this.ozoneManagers = new ArrayList<>(omMap.values());
this.activeOMs = activeOMList;
@@ -213,20 +215,29 @@ public final class MiniOzoneHAClusterImpl extends
MiniOzoneClusterImpl {
initializeConfiguration();
StorageContainerManager scm;
Map<String, OzoneManager> omMap;
+ ReconServer reconServer = null;
try {
scm = createSCM();
scm.start();
omMap = createOMService();
+ if (includeRecon) {
+ configureRecon();
+ reconServer = new ReconServer();
+ }
} catch (AuthenticationException ex) {
throw new IOException("Unable to build MiniOzoneCluster. ", ex);
}
final List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(scm);
MiniOzoneHAClusterImpl cluster = new MiniOzoneHAClusterImpl(
- conf, omMap, activeOMs, inactiveOMs, scm, hddsDatanodes,
omServiceId);
+ conf, omMap, activeOMs, inactiveOMs, scm, hddsDatanodes,
+ omServiceId, reconServer);
if (startDataNodes) {
cluster.startHddsDatanodes();
}
+ if (includeRecon) {
+ reconServer.execute(new String[] {});
+ }
return cluster;
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java
new file mode 100644
index 0000000..eb2185f
--- /dev/null
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.recon;
+
+import static
org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT;
+
+import java.util.HashMap;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
+import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
+import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+/**
+ * This class sets up a MiniOzoneHACluster to test with Recon.
+ */
+public class TestReconWithOzoneManagerHA {
+ @Rule
+ public Timeout timeout = new Timeout(300_000);
+
+ private MiniOzoneHAClusterImpl cluster;
+ private ObjectStore objectStore;
+ private final String omServiceId = "omService1";
+ private final String volName = "testrecon";
+
+ @Before
+ public void setup() throws Exception {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.set(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, Boolean.TRUE.toString());
+ // Sync to disk enabled
+ conf.set("hadoop.hdds.db.rocksdb.writeoption.sync",
+ Boolean.TRUE.toString());
+ cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
+ .setClusterId(UUID.randomUUID().toString())
+ .setScmId(UUID.randomUUID().toString())
+ .setOMServiceId(omServiceId)
+ .setNumDatanodes(1)
+ .setNumOfOzoneManagers(3)
+ .includeRecon(true)
+ .build();
+ cluster.waitForClusterToBeReady();
+ objectStore = OzoneClientFactory.getRpcClient(omServiceId, conf)
+ .getObjectStore();
+ objectStore.createVolume(volName);
+ objectStore.getVolume(volName).createBucket(volName);
+ }
+
+ @After
+ public void tearDown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ @Test
+ public void testReconGetsSnapshotFromLeader() throws Exception {
+ AtomicReference<OzoneManager> ozoneManager = new AtomicReference<>();
+ // Wait for OM leader election to finish
+ GenericTestUtils.waitFor(() -> {
+ OzoneManager om = cluster.getOMLeader();
+ ozoneManager.set(om);
+ return om != null;
+ }, 100, 120000);
+ Assert.assertNotNull("Timed out waiting OM leader election to finish: "
+ + "no leader or more than one leader.", ozoneManager);
+ Assert.assertTrue("Should have gotten the leader!",
+ ozoneManager.get().isLeader());
+
+ OzoneManagerServiceProviderImpl impl = (OzoneManagerServiceProviderImpl)
+ cluster.getReconServer().getOzoneManagerServiceProvider();
+
+ String hostname =
+ ozoneManager.get().getHttpServer().getHttpAddress().getHostName();
+ String expectedUrl = "http://" +
+ (hostname.equals("0.0.0.0") ? "localhost" : hostname) + ":" +
+ ozoneManager.get().getHttpServer().getHttpAddress().getPort() +
+ OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT;
+ String snapshotUrl = impl.getOzoneManagerSnapshotUrl();
+ Assert.assertEquals("OM Snapshot should be requested from the leader.",
+ expectedUrl, snapshotUrl);
+ // Write some data
+ String keyPrefix = "ratis";
+ OzoneOutputStream key = objectStore.getVolume(volName)
+ .getBucket(volName)
+ .createKey(keyPrefix, 1024, ReplicationType.RATIS,
+ ReplicationFactor.ONE, new HashMap<>());
+ key.write(keyPrefix.getBytes());
+ key.flush();
+ key.close();
+ // Sync data to Recon
+ impl.syncDataFromOM();
+
+ ContainerDBServiceProvider containerDBServiceProvider =
+ cluster.getReconServer().getContainerDBServiceProvider();
+ TableIterator iterator =
+ containerDBServiceProvider.getContainerTableIterator();
+ String reconKeyPrefix = null;
+ while (iterator.hasNext()) {
+ Table.KeyValue<ContainerKeyPrefix, Integer> keyValue =
+ (Table.KeyValue<ContainerKeyPrefix, Integer>) iterator.next();
+ reconKeyPrefix = keyValue.getKey().getKeyPrefix();
+ }
+ Assert.assertEquals("Container data should be synced to recon.",
+ String.format("/%s/%s/%s", volName, volName, keyPrefix),
+ reconKeyPrefix);
+ }
+}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java
index 6062bac..fc4375d 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.recon;
import static
org.apache.hadoop.hdds.scm.cli.ContainerOperationClient.newContainerRpcClient;
+import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID;
import static
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_AUTO_COMMIT;
import static
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_CONNECTION_TIMEOUT;
import static
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_DB_DRIVER;
@@ -140,7 +141,9 @@ public class ReconControllerModule extends AbstractModule {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
ozoneManagerClient = new
OzoneManagerProtocolClientSideTranslatorPB(
- ozoneConfiguration, clientId.toString(), null, ugi);
+ ozoneConfiguration, clientId.toString(),
+ ozoneConfiguration.get(OZONE_OM_INTERNAL_SERVICE_ID),
+ ugi);
} catch (IOException ioEx) {
LOG.error("Error in provisioning OzoneManagerProtocol ", ioEx);
}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
index a13f6b7..9af7114 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
@@ -221,4 +221,9 @@ public class ReconServer extends GenericCli {
public StorageContainerServiceProvider getStorageContainerServiceProvider() {
return injector.getInstance(StorageContainerServiceProvider.class);
}
+
+ @VisibleForTesting
+ public ContainerDBServiceProvider getContainerDBServiceProvider() {
+ return containerDBServiceProvider;
+ }
}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
index 1b07593..69992c7 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
@@ -224,8 +224,7 @@ public class ReconUtils {
* @return Inputstream to the response of the HTTP call.
* @throws IOException While reading the response.
*/
- public InputStream makeHttpCall(CloseableHttpClient httpClient,
- String url)
+ public InputStream makeHttpCall(CloseableHttpClient httpClient, String url)
throws IOException {
HttpGet httpGet = new HttpGet(url);
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
index f6e8e4a..26fda47 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
@@ -90,8 +90,7 @@ public class ReconOmMetadataManagerImpl extends
OmMetadataManagerImpl
LOG.info("Created OM DB handle from snapshot at {}.",
dbFile.getAbsolutePath());
} catch (IOException ioEx) {
- LOG.error("Unable to initialize Recon OM DB snapshot store.",
- ioEx);
+ LOG.error("Unable to initialize Recon OM DB snapshot store.", ioEx);
}
if (getStore() != null) {
initializeOmTables();
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
index 7df706f..3101733 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java
@@ -33,12 +33,14 @@ import static
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPS
import static
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT;
import static
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SOCKET_TIMEOUT;
import static
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SOCKET_TIMEOUT_DEFAULT;
+import static org.apache.ratis.proto.RaftProtos.RaftPeerRole.LEADER;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Path;
import java.nio.file.Paths;
+import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
@@ -48,22 +50,24 @@ import javax.inject.Singleton;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.server.http.HttpConfig;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
+import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper;
+import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
+import org.apache.hadoop.hdds.utils.db.RDBStore;
+import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest;
+import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort.Type;
import org.apache.hadoop.ozone.recon.ReconUtils;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider;
import org.apache.hadoop.ozone.recon.tasks.OMDBUpdatesHandler;
import org.apache.hadoop.ozone.recon.tasks.OMUpdateEventBatch;
import org.apache.hadoop.ozone.recon.tasks.ReconTaskController;
-import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
-import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper;
-import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
-import org.apache.hadoop.hdds.utils.db.RDBStore;
-import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
@@ -238,18 +242,46 @@ public class OzoneManagerServiceProviderImpl
}
/**
+ * Find the OM leader's address to get the snapshot from.
+ */
+ @VisibleForTesting
+ public String getOzoneManagerSnapshotUrl() throws IOException {
+ if (!configuration.getBoolean(
+ OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, false)) {
+ return omDBSnapshotUrl;
+ }
+ String omLeaderUrl = omDBSnapshotUrl;
+ List<org.apache.hadoop.ozone.om.helpers.ServiceInfo> serviceList =
+ ozoneManagerClient.getServiceList();
+ HttpConfig.Policy policy = HttpConfig.getHttpPolicy(configuration);
+ if (!serviceList.isEmpty()) {
+ for (org.apache.hadoop.ozone.om.helpers.ServiceInfo info : serviceList) {
+ if (info.getNodeType().equals(HddsProtos.NodeType.OM) &&
+ info.getOmRoleInfo().hasServerRole() &&
+ info.getOmRoleInfo().getServerRole().equals(LEADER.name())) {
+ omLeaderUrl = (policy.isHttpsEnabled() ?
+ "https://" + info.getServiceAddress(Type.HTTPS) :
+ "http://" + info.getServiceAddress(Type.HTTP)) +
+ OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT;
+ }
+ }
+ }
+ return omLeaderUrl;
+ }
+
+ /**
* Method to obtain current OM DB Snapshot.
* @return DBCheckpoint instance.
*/
@VisibleForTesting
DBCheckpoint getOzoneManagerDBSnapshot() {
- String snapshotFileName = RECON_OM_SNAPSHOT_DB + "_" + System
- .currentTimeMillis();
+ String snapshotFileName = RECON_OM_SNAPSHOT_DB + "_" +
+ System.currentTimeMillis();
File targetFile = new File(omSnapshotDBParentDir, snapshotFileName +
".tar.gz");
try {
try (InputStream inputStream = reconUtils.makeHttpCall(httpClient,
- omDBSnapshotUrl)) {
+ getOzoneManagerSnapshotUrl())) {
FileUtils.copyInputStreamToFile(inputStream, targetFile);
}
@@ -310,7 +342,7 @@ public class OzoneManagerServiceProviderImpl
DBUpdatesWrapper dbUpdates = ozoneManagerClient.getDBUpdates(
dbUpdatesRequest);
if (null != dbUpdates) {
- RDBStore rocksDBStore = (RDBStore)omMetadataManager.getStore();
+ RDBStore rocksDBStore = (RDBStore) omMetadataManager.getStore();
RocksDB rocksDB = rocksDBStore.getDb();
LOG.debug("Number of updates received from OM : {}",
dbUpdates.getData().size());
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]