swamirishi commented on code in PR #8589:
URL: https://github.com/apache/ozone/pull/8589#discussion_r2144252133


##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/ContainerCreateInfo.java:
##########
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.metadata;
+
+import java.util.function.Supplier;
+import net.jcip.annotations.Immutable;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.hdds.utils.db.CodecBuffer;
+import org.apache.hadoop.hdds.utils.db.CodecException;
+import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
+import org.apache.hadoop.hdds.utils.db.Proto3Codec;
+import org.apache.hadoop.hdds.utils.db.StringCodec;
+import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
+import org.apache.ratis.util.MemoizedSupplier;
+
+/**
+ * ContainerCreateInfo is a class that holds information about the state and 
other information on creation
+ * This class is immutable.
+ */
+@Immutable
+public final class ContainerCreateInfo {
+  private static final Codec<ContainerCreateInfo> CODEC = new DelegatedCodec<>(
+      
Proto3Codec.get(ContainerProtos.ContainerCreateInfo.getDefaultInstance()),
+      ContainerCreateInfo::getFromProtobuf, ContainerCreateInfo::getProtobuf,
+      ContainerCreateInfo.class);
+  private static final Codec<ContainerCreateInfo> CODEC_OLD_VERSION = new 
ContainerCreateInfoCodec();
+
+  private final ContainerProtos.ContainerDataProto.State state;
+  private final Supplier<ContainerProtos.ContainerCreateInfo> proto;
+
+  public static Codec<ContainerCreateInfo> getCodec() {
+    if 
(VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.CONTAINERID_TABLE_SCHEMA_CHANGE))
 {
+      // If the container ID table schema is finalized, we can use the proto3 
codec directly.
+      return CODEC;
+    }
+    return CODEC_OLD_VERSION;
+  }
+
+  public static Codec<ContainerCreateInfo> getNewCodec() {
+    return CODEC;
+  }
+
+  private ContainerCreateInfo(ContainerProtos.ContainerDataProto.State state) {
+    this.state = state;
+    this.proto = MemoizedSupplier.valueOf(
+        () -> 
ContainerProtos.ContainerCreateInfo.newBuilder().setState(state).build());
+  }
+
+  /**
+   * Factory method for creation of ContainerCreateInfo.
+   * @param state  State
+   * @return ContainerCreateInfo.
+   */
+  public static ContainerCreateInfo valueOf(final 
ContainerProtos.ContainerDataProto.State state) {

Review Comment:
   Can we have a builder instead here? We would eventually have more fields 
here.



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/ContainerCreateInfo.java:
##########
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.metadata;
+
+import java.util.function.Supplier;
+import net.jcip.annotations.Immutable;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.hdds.utils.db.CodecBuffer;
+import org.apache.hadoop.hdds.utils.db.CodecException;
+import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
+import org.apache.hadoop.hdds.utils.db.Proto3Codec;
+import org.apache.hadoop.hdds.utils.db.StringCodec;
+import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
+import org.apache.ratis.util.MemoizedSupplier;
+
+/**
+ * ContainerCreateInfo is a class that holds information about the state and 
other information on creation
+ * This class is immutable.
+ */
+@Immutable
+public final class ContainerCreateInfo {
+  private static final Codec<ContainerCreateInfo> CODEC = new DelegatedCodec<>(
+      
Proto3Codec.get(ContainerProtos.ContainerCreateInfo.getDefaultInstance()),
+      ContainerCreateInfo::getFromProtobuf, ContainerCreateInfo::getProtobuf,
+      ContainerCreateInfo.class);
+  private static final Codec<ContainerCreateInfo> CODEC_OLD_VERSION = new 
ContainerCreateInfoCodec();
+
+  private final ContainerProtos.ContainerDataProto.State state;
+  private final Supplier<ContainerProtos.ContainerCreateInfo> proto;
+
+  public static Codec<ContainerCreateInfo> getCodec() {
+    if 
(VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.CONTAINERID_TABLE_SCHEMA_CHANGE))
 {
+      // If the container ID table schema is finalized, we can use the proto3 
codec directly.
+      return CODEC;
+    }
+    return CODEC_OLD_VERSION;
+  }
+
+  public static Codec<ContainerCreateInfo> getNewCodec() {
+    return CODEC;
+  }
+
+  private ContainerCreateInfo(ContainerProtos.ContainerDataProto.State state) {
+    this.state = state;
+    this.proto = MemoizedSupplier.valueOf(
+        () -> 
ContainerProtos.ContainerCreateInfo.newBuilder().setState(state).build());
+  }
+
+  /**
+   * Factory method for creation of ContainerCreateInfo.
+   * @param state  State
+   * @return ContainerCreateInfo.
+   */
+  public static ContainerCreateInfo valueOf(final 
ContainerProtos.ContainerDataProto.State state) {
+    return new ContainerCreateInfo(state);
+  }
+
+  public ContainerProtos.ContainerCreateInfo getProtobuf() {
+    return proto.get();
+  }
+
+  public static ContainerCreateInfo 
getFromProtobuf(ContainerProtos.ContainerCreateInfo proto) {
+    return ContainerCreateInfo.valueOf(proto.getState());
+  }
+
+  public ContainerProtos.ContainerDataProto.State getState() {
+    return state;
+  }
+
+  /**
+   * ContainerCreateInfoCodec handles compatibility for containerIds Table, 
where old format from String is changed
+   * to proto3 format, ContainerCreateInfo. So this codec can read both 
formats based on the HDDSLayoutFeature.
+   * For write case, it will create ContainerCreateInfo in proto3 format, but 
write is allowed only after the
+   * finalization  of feature.
+   */
+  public static class ContainerCreateInfoCodec implements 
Codec<ContainerCreateInfo> {
+    @Override
+    public Class<ContainerCreateInfo> getTypeClass() {
+      return ContainerCreateInfo.class;
+    }
+
+    @Override
+    public boolean supportCodecBuffer() {
+      return CODEC.supportCodecBuffer();

Review Comment:
   We also need an if else here. 
   ```suggestion
         if 
(VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.CONTAINERID_TABLE_SCHEMA_CHANGE))
 {
         return CODEC.supportCodecBuffer();
         }
         return StringCodec.get().supportCodecBuffer();
   ```



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/ContainerTableSchemaFinalizeAction.java:
##########
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.upgrade;
+
+import static 
org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.CONTAINERID_TABLE_SCHEMA_CHANGE;
+import static 
org.apache.hadoop.ozone.upgrade.LayoutFeature.UpgradeActionType.ON_FINALIZE;
+import static 
org.apache.hadoop.ozone.upgrade.UpgradeActionHdds.Component.DATANODE;
+
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.hdds.utils.db.TypedTable;
+import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+import org.apache.hadoop.ozone.container.metadata.ContainerCreateInfo;
+import 
org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore;
+import org.apache.hadoop.ozone.upgrade.UpgradeActionHdds;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Upgrade Action for DataNode for update the table schema data of 
containerIds Table.
+ */
+@UpgradeActionHdds(feature = CONTAINERID_TABLE_SCHEMA_CHANGE, component = 
DATANODE, type = ON_FINALIZE)
+public class ContainerTableSchemaFinalizeAction
+    implements HDDSUpgradeAction<DatanodeStateMachine> {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ContainerTableSchemaFinalizeAction.class);
+
+  @Override
+  public void execute(DatanodeStateMachine arg) throws Exception {
+    WitnessedContainerMetadataStore metadataStore = 
arg.getContainer().getWitnessedContainerMetadataStore();
+    // containerIdsTable is in old format where String value type is mapped to 
ContainerCreateInfo before finalize
+    Table<ContainerID, ContainerCreateInfo> containerIdsTable = 
metadataStore.getContainerIdsTable();
+
+    // to write to new format, we need to create a new table with explicit 
codec before finalize
+    TypedTable<ContainerID, ContainerCreateInfo> writeNewFormatTable = 
metadataStore.getStore().getTable(
+            containerIdsTable.getName(), ContainerID.getCodec(), 
ContainerCreateInfo.getNewCodec());
+    try (BatchOperation batch = metadataStore.getStore().initBatchOperation()) 
{
+      try (TableIterator<ContainerID, ? extends Table.KeyValue<ContainerID, 
ContainerCreateInfo>> iterator =

Review Comment:
   No need of 2 try-with-resource . We can have a single try with resource. The 
iterator could be still open while we commit a batch operation



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/ContainerTableSchemaFinalizeAction.java:
##########
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.upgrade;
+
+import static 
org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.CONTAINERID_TABLE_SCHEMA_CHANGE;
+import static 
org.apache.hadoop.ozone.upgrade.LayoutFeature.UpgradeActionType.ON_FINALIZE;
+import static 
org.apache.hadoop.ozone.upgrade.UpgradeActionHdds.Component.DATANODE;
+
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.hdds.utils.db.TypedTable;
+import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+import org.apache.hadoop.ozone.container.metadata.ContainerCreateInfo;
+import 
org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore;
+import org.apache.hadoop.ozone.upgrade.UpgradeActionHdds;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Upgrade Action for DataNode for update the table schema data of 
containerIds Table.
+ */
+@UpgradeActionHdds(feature = CONTAINERID_TABLE_SCHEMA_CHANGE, component = 
DATANODE, type = ON_FINALIZE)
+public class ContainerTableSchemaFinalizeAction
+    implements HDDSUpgradeAction<DatanodeStateMachine> {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ContainerTableSchemaFinalizeAction.class);
+
+  @Override
+  public void execute(DatanodeStateMachine arg) throws Exception {
+    WitnessedContainerMetadataStore metadataStore = 
arg.getContainer().getWitnessedContainerMetadataStore();
+    // containerIdsTable is in old format where String value type is mapped to 
ContainerCreateInfo before finalize
+    Table<ContainerID, ContainerCreateInfo> containerIdsTable = 
metadataStore.getContainerIdsTable();
+
+    // to write to new format, we need to create a new table with explicit 
codec before finalize
+    TypedTable<ContainerID, ContainerCreateInfo> writeNewFormatTable = 
metadataStore.getStore().getTable(
+            containerIdsTable.getName(), ContainerID.getCodec(), 
ContainerCreateInfo.getNewCodec());
+    try (BatchOperation batch = metadataStore.getStore().initBatchOperation()) 
{
+      try (TableIterator<ContainerID, ? extends Table.KeyValue<ContainerID, 
ContainerCreateInfo>> iterator =
+                   containerIdsTable.iterator()) {
+        while (iterator.hasNext()) {
+          Table.KeyValue<ContainerID, ContainerCreateInfo> next = 
iterator.next();
+          writeNewFormatTable.putWithBatch(batch, next.getKey(), 
next.getValue());
+        }
+      }
+      metadataStore.getStore().commitBatchOperation(batch);

Review Comment:
   On hindsight this will also not work as updating version file
   
https://github.com/apache/ozone/blob/3d4b5fdf553500fcf21eab5dfd34dea297bb2671/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java#L260
   and commitBatchOperation has to happen in an atomic way. The only possible 
way would be to define a new column family for finalized flow and read it from 
there when finalized. So this Action has to create a copy of the data into the 
proto format(if the new column family exists it should truncate and then add 
everything again. This is for handling partial proto flow.) and the new 
finalized version should read from the new column family and that would be an 
atomic switch. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to