swamirishi commented on code in PR #8589:
URL: https://github.com/apache/ozone/pull/8589#discussion_r2137762763


##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/ContainerCreateInfo.java:
##########
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.metadata;
+
+import java.util.function.Supplier;
+import net.jcip.annotations.Immutable;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.hdds.utils.db.CodecBuffer;
+import org.apache.hadoop.hdds.utils.db.CodecException;
+import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
+import org.apache.hadoop.hdds.utils.db.Proto3Codec;
+import org.apache.hadoop.hdds.utils.db.StringCodec;
+import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
+import org.apache.ratis.util.MemoizedSupplier;
+
+/**
+ * ContainerCreateInfo is a class that holds information about the state and 
other information on creation
+ * This class is immutable.
+ */
+@Immutable
+public final class ContainerCreateInfo {
+  private static final Codec<ContainerCreateInfo> CODEC = new DelegatedCodec<>(
+      
Proto3Codec.get(ContainerProtos.ContainerCreateInfo.getDefaultInstance()),
+      ContainerCreateInfo::getFromProtobuf, ContainerCreateInfo::getProtobuf,
+      ContainerCreateInfo.class);
+  private static final Codec<ContainerCreateInfo> CODEC_OLD_VERSION = new 
ContainerCreateInfoCodec();
+
+  private final ContainerProtos.ContainerDataProto.State state;
+  private final Supplier<ContainerProtos.ContainerCreateInfo> proto;
+
+  public static Codec<ContainerCreateInfo> getCodec() {
+    if 
(VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.CONTAINERID_TABLE_SCHEMA_CHANGE))
 {
+      // If the container ID table schema is finalized, we can use the proto3 
codec directly.
+      return CODEC;
+    }
+    return CODEC_OLD_VERSION;
+  }
+
+  private ContainerCreateInfo(ContainerProtos.ContainerDataProto.State state) {
+    this.state = state;
+    this.proto = MemoizedSupplier.valueOf(
+        () -> 
ContainerProtos.ContainerCreateInfo.newBuilder().setState(state).build());
+  }
+
+  /**
+   * Factory method for creation of ContainerCreateInfo.
+   * @param state  State
+   * @return ContainerCreateInfo.
+   */
+  public static ContainerCreateInfo valueOf(final 
ContainerProtos.ContainerDataProto.State state) {
+    return new ContainerCreateInfo(state);
+  }
+
+  public ContainerProtos.ContainerCreateInfo getProtobuf() {
+    return proto.get();
+  }
+
+  public static ContainerCreateInfo 
getFromProtobuf(ContainerProtos.ContainerCreateInfo proto) {
+    return ContainerCreateInfo.valueOf(proto.getState());
+  }
+
+  public ContainerProtos.ContainerDataProto.State getState() {
+    return state;
+  }
+
+  /**
+   * ContainerCreateInfoCodec handles compatibility for containerIds Table, 
where old format from String is changed
+   * to proto3 format, ContainerCreateInfo. So this codec can read both 
formats based on the HDDSLayoutFeature.
+   * For write case, it will create ContainerCreateInfo in proto3 format, but 
write is allowed only after the
+   * finalization  of feature.
+   */
+  public static class ContainerCreateInfoCodec implements 
Codec<ContainerCreateInfo> {

Review Comment:
   We might not need this class.



##########
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToContainerIdsTable.java:
##########
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.upgrade;
+
+import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.OPEN;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import java.net.InetSocketAddress;
+import java.nio.file.Path;
+import java.util.Collections;
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.pipeline.MockPipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ozone.container.common.SCMTestUtils;
+import org.apache.hadoop.ozone.container.common.ScmTestMock;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
+import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+import org.apache.hadoop.ozone.container.metadata.ContainerCreateInfo;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+
+/**
+ * Tests upgrading a single datanode from HBASE_SUPPORT to 
CONTAINERID_TABLE_SCHEMA_CHANGE.
+ */
+public class TestDatanodeUpgradeToContainerIdsTable {
+  @TempDir
+  private Path tempFolder;
+
+  private DatanodeStateMachine dsm;
+  private ContainerDispatcher dispatcher;
+  private OzoneConfiguration conf;
+  private static final String CLUSTER_ID = "clusterID";
+
+  private RPC.Server scmRpcServer;
+  private InetSocketAddress address;
+
+  private void initTests() throws Exception {
+    conf = new OzoneConfiguration();
+    setup();
+  }
+
+  private void setup() throws Exception {
+    address = SCMTestUtils.getReuseableAddress();
+    conf.setSocketAddr(ScmConfigKeys.OZONE_SCM_NAMES, address);
+    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
+        tempFolder.toString());
+  }
+
+  @AfterEach
+  public void teardown() throws Exception {
+    if (scmRpcServer != null) {
+      scmRpcServer.stop();
+    }
+
+    if (dsm != null) {
+      dsm.close();
+    }
+  }
+
+  @Test
+  public void testContainerTableAccessBeforeAndAfterUpgrade() throws Exception 
{

Review Comment:
   Can we also add a failure case of the finalize block maybe by mocking the 
iterator to throw an exception.



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/ContainerTableSchemaFinalizeAction.java:
##########
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.upgrade;
+
+import static 
org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.CONTAINERID_TABLE_SCHEMA_CHANGE;
+import static 
org.apache.hadoop.ozone.upgrade.LayoutFeature.UpgradeActionType.ON_FINALIZE;
+import static 
org.apache.hadoop.ozone.upgrade.UpgradeActionHdds.Component.DATANODE;
+
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+import org.apache.hadoop.ozone.container.metadata.ContainerCreateInfo;
+import 
org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore;
+import org.apache.hadoop.ozone.upgrade.UpgradeActionHdds;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Upgrade Action for DataNode for update the table schema data of 
containerIds Table.
+ */
+@UpgradeActionHdds(feature = CONTAINERID_TABLE_SCHEMA_CHANGE, component = 
DATANODE, type = ON_FINALIZE)
+public class ContainerTableSchemaFinalizeAction
+    implements HDDSUpgradeAction<DatanodeStateMachine> {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ContainerTableSchemaFinalizeAction.class);
+
+  @Override
+  public void execute(DatanodeStateMachine arg) throws Exception {
+    WitnessedContainerMetadataStore metadataStore = 
arg.getContainer().getWitnessedContainerMetadataStore();
+    Table<ContainerID, ContainerCreateInfo> containerIdsTable = 
metadataStore.getContainerIdsTable();
+
+    // during finalization, it will read entries in old format and write with 
new format in same table with
+    // customized coded.
+    try (TableIterator<ContainerID, ? extends Table.KeyValue<ContainerID, 
ContainerCreateInfo>> iterator =
+             containerIdsTable.iterator()) {
+      while (iterator.hasNext()) {
+        Table.KeyValue<ContainerID, ContainerCreateInfo> next = 
iterator.next();
+        containerIdsTable.put(next.getKey(), next.getValue());

Review Comment:
   We are writing back to the table without transaction. This code might run 
multiple times we might not be able to deserialize correctly if the finalize 
block fails in between because of jvm crash. Let us add a rocksdb batch.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to