guihecheng commented on code in PR #3392:
URL: https://github.com/apache/ozone/pull/3392#discussion_r868757123


##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java:
##########
@@ -114,8 +123,12 @@ public void createWorkingDir(String workingDirName,
       MutableVolumeSet dbVolumeSet) throws IOException {
     super.createWorkingDir(workingDirName, dbVolumeSet);
 
-    if (SchemaV3.isFinalizedAndEnabled(getConf())) {
-      createDbStore(dbVolumeSet);
+    // Create DB store for a newly added volume
+    if (!isTest) {
+      if (VersionedDatanodeFeatures.isFinalized(
+          HDDSLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT)) {

Review Comment:
   Do we have any special reason to check for EC support here?



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java:
##########
@@ -144,9 +157,7 @@ public void shutdown() {
     if (volumeIOStats != null) {
       volumeIOStats.unregister();
     }
-    if (SchemaV3.isFinalizedAndEnabled(getConf())) {

Review Comment:
   I saw that we have dbLoaded check inside closeDbStore, so we don't need the 
if wrap here, then maybe we should do the same for failVolume()?



##########
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java:
##########
@@ -0,0 +1,777 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.upgrade;
+
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.pipeline.MockPipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
+import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage;
+import org.apache.hadoop.ozone.container.common.SCMTestUtils;
+import org.apache.hadoop.ozone.container.common.ScmTestMock;
+import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
+import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+import 
org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine;
+import 
org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
+import org.apache.hadoop.ozone.container.common.volume.DbVolume;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.File;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.UUID;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+/**
+ * Tests upgrading a single datanode from container Schema V2 to Schema V3.
+ */
+@RunWith(Parameterized.class)
+public class TestDatanodeUpgradeToSchemaV3 {
+  @Rule
+  public TemporaryFolder tempFolder;
+
+  private DatanodeStateMachine dsm;
+  private final OzoneConfiguration conf;
+  private static final String CLUSTER_ID = "clusterID";
+  private final boolean schemaV3Enabled;
+
+  private RPC.Server scmRpcServer;
+  private InetSocketAddress address;
+  private ScmTestMock scmServerImpl;
+
+  private Random random;
+
+  // hdds.datanode.container.schema.v3.enabled
+  @Parameterized.Parameters
+  public static Collection<Object[]> getSchemaFiles() {
+    Collection<Object[]> parameters = new ArrayList<>();
+    parameters.add(new Boolean[]{false});
+    parameters.add(new Boolean[]{true});
+    return parameters;
+  }
+
+  public TestDatanodeUpgradeToSchemaV3(Boolean enable) {
+    this.schemaV3Enabled = enable;
+    conf = new OzoneConfiguration();
+    conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED,
+        this.schemaV3Enabled);
+  }
+
+  @Before
+  public void setup() throws Exception {
+    tempFolder = new TemporaryFolder();
+    tempFolder.create();
+    random = new Random();
+
+    address = SCMTestUtils.getReuseableAddress();
+    conf.setSocketAddr(ScmConfigKeys.OZONE_SCM_NAMES, address);
+    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
+        tempFolder.getRoot().getAbsolutePath());
+  }
+
+  @After
+  public void teardown() throws Exception {
+    if (scmRpcServer != null) {
+      scmRpcServer.stop();
+    }
+
+    if (dsm != null) {
+      dsm.close();
+    }
+  }
+
+  /**
+   * Test RocksDB is created on data volume, not matter Schema V3 is
+   * enabled or not.
+   * If Schema V3 is enabled, RocksDB will be loaded.
+   */
+  @Test
+  public void testDBOnHddsVolume() throws Exception {
+    // start DN and SCM
+    startScmServer();
+    addHddsVolume();
+
+    startPreFinalizedDatanode();
+    HddsVolume dataVolume = (HddsVolume) dsm.getContainer().getVolumeSet()
+        .getVolumesList().get(0);
+    File dbFile = new File(dataVolume.getStorageDir().getAbsolutePath() + "/" +
+        dataVolume.getClusterID() + "/" + dataVolume.getStorageID());
+    // RocksDB is created at first startup, but not loaded
+    Assert.assertTrue(dbFile.exists());
+    Assert.assertNull(dataVolume.getDbVolume());
+    Assert.assertFalse(dataVolume.isDbLoaded());
+
+    dsm.finalizeUpgrade();
+    // RocksDB loaded when SchemaV3 is enabled
+    if (VersionedDatanodeFeatures.SchemaV3.isFinalizedAndEnabled(conf)) {
+      Assert.assertNotNull(dataVolume.getDbParentDir().getAbsolutePath()
+          .startsWith(dataVolume.getStorageDir().toString()));
+    } else {
+      // RocksDB is not loaded when SchemaV3 is disabled.
+      Assert.assertFalse(dataVolume.isDbLoaded());
+    }
+  }
+
+  /**
+   * Test RocksDB is created on DB volume when configured, not matter
+   * Schema V3 is enabled or not.
+   * If Schema V3 is enabled, RocksDB will be loaded.
+   */
+  @Test
+  public void testDBOnDbVolume() throws Exception {
+    // start DN and SCM
+    startScmServer();
+    addHddsVolume();
+    addDbVolume();
+
+    startPreFinalizedDatanode();
+    HddsVolume dataVolume = (HddsVolume) dsm.getContainer().getVolumeSet()
+        .getVolumesList().get(0);
+    Assert.assertNotNull(dataVolume.getDbParentDir());
+
+    DbVolume dbVolume = (DbVolume) dsm.getContainer().getDbVolumeSet()
+        .getVolumesList().get(0);
+    File dbFile = new File(dbVolume.getStorageDir().getAbsolutePath() + "/" +
+        dbVolume.getClusterID() + "/" + dataVolume.getStorageID());
+    // RocksDB is created at first startup, but not loaded
+    Assert.assertTrue(dbFile.exists());
+    Assert.assertEquals(dbVolume, dataVolume.getDbVolume());
+    Assert.assertTrue(
+        dbVolume.getHddsVolumeIDs().contains(dataVolume.getStorageID()));
+    Assert.assertFalse(dataVolume.isDbLoaded());
+
+    dsm.finalizeUpgrade();
+    // RocksDB loaded when SchemaV3 is enabled
+    if (VersionedDatanodeFeatures.SchemaV3.isFinalizedAndEnabled(conf)) {
+      Assert.assertTrue(dataVolume.getDbParentDir().getAbsolutePath()
+          .startsWith(dbVolume.getStorageDir().toString()));
+    } else {
+      // RocksDB is not loaded when SchemaV3 is disabled.
+      Assert.assertFalse(dataVolume.isDbLoaded());
+    }
+  }
+
+  /**
+   * Test RocksDB in created in Finalize action for an existing hddsVolume.
+   * This mimics the real cluster upgrade situation.
+   */
+  @Test
+  public void testDBCreatedInFinalize() throws Exception {
+    // start DN and SCM
+    startScmServer();
+    // add one HddsVolume
+    addHddsVolume();
+    // Let HddsVolume be formatted to mimic the real cluster upgrade
+    // Set layout version.
+    DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage(conf,
+        UUID.randomUUID().toString(),
+        HDDSLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT.layoutVersion());
+    layoutStorage.initialize();
+    dsm = new DatanodeStateMachine(
+        ContainerTestUtils.createDatanodeDetails(), conf, null, null, null);
+    HddsVolume dataVolume = (
+        HddsVolume) dsm.getContainer().getVolumeSet().getVolumesList().get(0);
+    dataVolume.setTest(true);

Review Comment:
   Well, I don't think we should setTest here if we intends to mimic the real 
upgrade situation.



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java:
##########
@@ -178,13 +189,29 @@ public File getDbParentDir() {
     return this.dbParentDir;
   }
 
+  public boolean isDbLoaded() {
+    return dbLoaded.get();
+  }
+
+  @VisibleForTesting
+  public void setTest(boolean test) {

Review Comment:
   This flag is set mostly for failure tests(except for one place that I think 
it should be setted there).
   Is it possible to mock the failure with mockito or powermock in those tests ?



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DatanodeSchemaV3FinalizeAction.java:
##########
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.upgrade;
+
+import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
+import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
+import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
+import org.apache.hadoop.ozone.upgrade.UpgradeActionHdds;
+import org.apache.ratis.util.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.DATANODE_SCHEMA_V3;
+import static 
org.apache.hadoop.ozone.upgrade.LayoutFeature.UpgradeActionType.ON_FINALIZE;
+import static 
org.apache.hadoop.ozone.upgrade.UpgradeActionHdds.Component.DATANODE;
+
+/**
+ * Upgrade Action for DataNode for SCHEMA V3.
+ */
+@UpgradeActionHdds(feature = DATANODE_SCHEMA_V3, component = DATANODE,
+    type = ON_FINALIZE)
+public class DatanodeSchemaV3FinalizeAction
+    implements HDDSUpgradeAction<DatanodeStateMachine> {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(DatanodeSchemaV3FinalizeAction.class);
+
+  @Override
+  public void execute(DatanodeStateMachine dsm) throws Exception {
+    LOG.info("Upgrading Datanode volume layout for Schema V3 support.");
+
+    // Load RocksDB for each HddsVolume, build the relationship between
+    // HddsVolume and DbVolume if DbVolume is configured.
+    MutableVolumeSet dataVolumeSet = dsm.getContainer().getVolumeSet();
+    MutableVolumeSet dbVolumeSet = dsm.getContainer().getDbVolumeSet();
+    Preconditions.assertNotNull(dataVolumeSet, "Data Volume should be null");
+    Preconditions.assertNotNull(dataVolumeSet, "Data Volume should be null");
+    dataVolumeSet.writeLock();
+    try {
+      for (StorageVolume hddsVolume : dataVolumeSet.getVolumesList()) {
+        HddsVolume dataVolume = (HddsVolume) hddsVolume;
+        if (dataVolume.getDbParentDir() != null) {
+          // The RocksDB for this hddsVolume is already created(newly added
+          // volume case).
+          continue;
+        }
+        dataVolume.createDbStore(dbVolumeSet);
+      }
+    } finally {
+      dataVolumeSet.writeUnlock();
+    }
+    DatanodeConfiguration dcf =
+        dsm.getConf().getObject(DatanodeConfiguration.class);
+    if (!dcf.getContainerSchemaV3Enabled()) {
+      LOG.info("SchemaV3 is disabled. Don't load RocksDB in upgrade.");

Review Comment:
   Maybe "Don't" -> "Won't", since "Don't" is like an order to an ozone 
administrator not to do sth, but here it just means the hook function won't 
load the db.



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java:
##########
@@ -178,13 +189,29 @@ public File getDbParentDir() {
     return this.dbParentDir;
   }
 
+  public boolean isDbLoaded() {
+    return dbLoaded.get();
+  }
+
+  @VisibleForTesting
+  public void setTest(boolean test) {
+    this.isTest = test;
+  }
+
   public void loadDbStore() throws IOException {
     // DN startup for the first time, not registered yet,
     // so the DbVolume is not formatted.
     if (!getStorageState().equals(VolumeState.NORMAL)) {
       return;
     }
 
+    // DB is already loaded
+    if (dbLoaded.get()) {
+      LOG.info("Db is already loaded from {} for volume {}", getDbParentDir(),

Review Comment:
   Do we have a special but, normal, case that a db store would be loaded more 
than once?
   if not, maybe we should `warn` here.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to