ashishkumar50 commented on code in PR #5662:
URL: https://github.com/apache/ozone/pull/5662#discussion_r1446870325


##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java:
##########
@@ -354,14 +367,6 @@ public void shutdown() {
 
   private BlockData getBlockByID(DBHandle db, BlockID blockID,
       KeyValueContainerData containerData) throws IOException {
-    String blockKey = containerData.getBlockKey(blockID.getLocalID());
-
-    BlockData blockData = db.getStore().getBlockDataTable().get(blockKey);
-    if (blockData == null) {
-      throw new StorageContainerException(NO_SUCH_BLOCK_ERR_MSG +
-              " BlockID : " + blockID, NO_SUCH_BLOCK);
-    }
-
-    return blockData;
+    return db.getStore().getBlockByID(blockID, containerData);

Review Comment:
   blockKey can be passed instead of containerData.



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java:
##########
@@ -224,4 +235,185 @@ public void compactionIfNeeded() throws Exception {
       }
     }
   }
+
+  @Override
+  public BlockData getBlockByID(BlockID blockID,
+      KeyValueContainerData containerData) throws IOException {
+    String blockKey = containerData.getBlockKey(blockID.getLocalID());
+
+    // check last chunk table
+    BlockData lastChunk = getLastChunkInfoTable().get(blockKey);
+    // check block data table
+    BlockData blockData = getBlockDataTable().get(blockKey);
+
+    if (blockData == null) {
+      if (lastChunk == null) {
+        throw new StorageContainerException(
+            NO_SUCH_BLOCK_ERR_MSG + " BlockID : " + blockID, NO_SUCH_BLOCK);
+      } else {
+        LOG.debug("blockData=(null), lastChunk={}", lastChunk.getChunks());
+        return lastChunk;
+      }
+    } else {
+      if (lastChunk != null) {
+        reconcilePartialChunks(lastChunk, blockData);
+      } else {
+        LOG.debug("blockData={}, lastChunk=(null)", blockData.getChunks());
+      }
+    }
+
+    return blockData;
+  }
+
+  private static void reconcilePartialChunks(

Review Comment:
   I think no need of static method here.



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java:
##########
@@ -224,4 +235,185 @@ public void compactionIfNeeded() throws Exception {
       }
     }
   }
+
+  @Override
+  public BlockData getBlockByID(BlockID blockID,
+      KeyValueContainerData containerData) throws IOException {
+    String blockKey = containerData.getBlockKey(blockID.getLocalID());
+
+    // check last chunk table
+    BlockData lastChunk = getLastChunkInfoTable().get(blockKey);
+    // check block data table
+    BlockData blockData = getBlockDataTable().get(blockKey);
+
+    if (blockData == null) {
+      if (lastChunk == null) {
+        throw new StorageContainerException(
+            NO_SUCH_BLOCK_ERR_MSG + " BlockID : " + blockID, NO_SUCH_BLOCK);
+      } else {
+        LOG.debug("blockData=(null), lastChunk={}", lastChunk.getChunks());
+        return lastChunk;
+      }
+    } else {
+      if (lastChunk != null) {
+        reconcilePartialChunks(lastChunk, blockData);
+      } else {
+        LOG.debug("blockData={}, lastChunk=(null)", blockData.getChunks());
+      }
+    }
+
+    return blockData;
+  }
+
+  private static void reconcilePartialChunks(
+      BlockData lastChunk, BlockData blockData) {
+    LOG.debug("blockData={}, lastChunk={}",
+        blockData.getChunks(), lastChunk.getChunks());
+    Preconditions.checkState(lastChunk.getChunks().size() == 1);
+    ContainerProtos.ChunkInfo lastChunkInBlockData =
+        blockData.getChunks().get(blockData.getChunks().size() - 1);
+    Preconditions.checkState(
+        lastChunkInBlockData.getOffset() + lastChunkInBlockData.getLen()
+            == lastChunk.getChunks().get(0).getOffset(),
+        "chunk offset does not match");
+
+    // append last partial chunk to the block data
+    List<ContainerProtos.ChunkInfo> chunkInfos =
+        new ArrayList<>(blockData.getChunks());
+    chunkInfos.add(lastChunk.getChunks().get(0));
+    blockData.setChunks(chunkInfos);
+
+    blockData.setBlockCommitSequenceId(
+        lastChunk.getBlockCommitSequenceId());
+  }
+
+  private static boolean isPartialChunkList(BlockData data) {
+    return data.getMetadata().containsKey(INCREMENTAL_CHUNK_LIST);
+  }
+
+  private static boolean isFullChunk(ContainerProtos.ChunkInfo chunkInfo) {
+    for (ContainerProtos.KeyValue kv: chunkInfo.getMetadataList()) {
+      if (kv.getKey().equals(FULL_CHUNK)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  // if eob or if the last chunk is full,
+  private static boolean shouldAppendLastChunk(boolean endOfBlock,
+      BlockData data) {
+    if (endOfBlock || data.getChunks().isEmpty()) {
+      return true;
+    }
+    return isFullChunk(data.getChunks().get(data.getChunks().size() - 1));
+  }
+
+  public void putBlockByID(BatchOperation batch, boolean incremental,
+      long localID, BlockData data, KeyValueContainerData containerData,
+      boolean endOfBlock) throws IOException {
+    if (!incremental && !isPartialChunkList(data)) {
+      // Case (1) old client: override chunk list.
+      getBlockDataTable().putWithBatch(
+          batch, containerData.getBlockKey(localID), data);
+    } else if (shouldAppendLastChunk(endOfBlock, data)) {
+      moveLastChunkToBlockData(batch, localID, data, containerData);
+    } else {
+      // incremental chunk list,
+      // not end of block, has partial chunks
+      putBlockWithPartialChunks(batch, localID, data, containerData);
+    }
+  }
+
+  private void moveLastChunkToBlockData(BatchOperation batch, long localID,
+      BlockData data, KeyValueContainerData containerData) throws IOException {
+    // if eob or if the last chunk is full,
+    // the 'data' is full so append it to the block table's chunk info
+    // and then remove from lastChunkInfo
+    BlockData blockData = getBlockDataTable().get(
+        containerData.getBlockKey(localID));
+    if (blockData == null) {
+      // Case 2.1 if the block did not have full chunks before,
+      // the block's chunk is what received from client this time.
+      blockData = data;
+    } else {
+      // case 2.2 the block already has some full chunks
+      List<ContainerProtos.ChunkInfo> chunkInfoList = blockData.getChunks();
+      blockData.setChunks(new ArrayList<>(chunkInfoList));
+      for (ContainerProtos.ChunkInfo chunk : data.getChunks()) {
+        blockData.addChunk(chunk);
+      }
+      blockData.setBlockCommitSequenceId(data.getBlockCommitSequenceId());
+
+      /*int next = 0;
+      for (ContainerProtos.ChunkInfo info : chunkInfoList) {
+        assert info.getOffset() == next;

Review Comment:
   Please remove commented code



##########
hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockDatanodeStorage.java:
##########
@@ -77,31 +159,56 @@ public void writeChunk(
     if (exception != null) {
       throw exception;
     }
-    data.put(createKey(blockID, chunkInfo),
-        ByteString.copyFrom(bytes.toByteArray()));
-    chunks.put(createKey(blockID, chunkInfo), chunkInfo);
+    String blockKey = createKey(blockID);
+    ByteString block;
+    if (data.containsKey(blockKey)) {
+      block = data.get(blockKey);
+      assert block.size() == chunkInfo.getOffset();
+      data.put(blockKey, block.concat(bytes));
+    } else {
+      assert chunkInfo.getOffset() == 0;
+      data.put(blockKey, bytes);
+    }
+
     fullBlockData
         .put(new BlockID(blockID.getContainerID(), blockID.getLocalID()),
-            fullBlockData.getOrDefault(blockID, "")
+            fullBlockData.getOrDefault(toBlockID(blockID), "")
                 .concat(bytes.toStringUtf8()));
   }
 
   public ChunkInfo readChunkInfo(
       DatanodeBlockID blockID,
       ChunkInfo chunkInfo) {
-    return chunks.get(createKey(blockID, chunkInfo));
+    BlockData blockData = getBlock(blockID);
+    for (ChunkInfo info : blockData.getChunksList()) {
+      if (info.getLen() == chunkInfo.getLen() &&
+          info.getOffset() == chunkInfo.getOffset()) {
+        return info;
+      }
+    }
+    throw new AssertionError("chunk " + chunkInfo + " not found");
   }
 
   public ByteString readChunkData(
       DatanodeBlockID blockID,
       ChunkInfo chunkInfo) {
-    return data.get(createKey(blockID, chunkInfo));
-
+    //return data.get(createKey(blockID, chunkInfo));
+    LOG.debug("readChunkData: blockID=" + createKey(blockID) +
+        " chunkInfo offset=" + chunkInfo.getOffset() +
+        " chunkInfo len=" + chunkInfo.getLen());
+    ByteString str = data.get(createKey(blockID)).substring(
+        (int)chunkInfo.getOffset(),
+        (int)chunkInfo.getOffset() + (int)chunkInfo.getLen());
+    return str;
   }
 
-  private String createKey(DatanodeBlockID blockId, ChunkInfo chunkInfo) {
+  /*private String createKey(DatanodeBlockID blockId, ChunkInfo chunkInfo) {

Review Comment:
   Unused method can be removed.



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java:
##########
@@ -224,4 +235,185 @@ public void compactionIfNeeded() throws Exception {
       }
     }
   }
+
+  @Override
+  public BlockData getBlockByID(BlockID blockID,
+      KeyValueContainerData containerData) throws IOException {
+    String blockKey = containerData.getBlockKey(blockID.getLocalID());
+
+    // check last chunk table
+    BlockData lastChunk = getLastChunkInfoTable().get(blockKey);

Review Comment:
   If incremental is disabled we can avoid calling getLastChunkInfoTable().



##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java:
##########
@@ -224,4 +235,185 @@ public void compactionIfNeeded() throws Exception {
       }
     }
   }
+
+  @Override
+  public BlockData getBlockByID(BlockID blockID,
+      KeyValueContainerData containerData) throws IOException {
+    String blockKey = containerData.getBlockKey(blockID.getLocalID());
+
+    // check last chunk table
+    BlockData lastChunk = getLastChunkInfoTable().get(blockKey);
+    // check block data table
+    BlockData blockData = getBlockDataTable().get(blockKey);
+
+    if (blockData == null) {
+      if (lastChunk == null) {
+        throw new StorageContainerException(
+            NO_SUCH_BLOCK_ERR_MSG + " BlockID : " + blockID, NO_SUCH_BLOCK);
+      } else {
+        LOG.debug("blockData=(null), lastChunk={}", lastChunk.getChunks());
+        return lastChunk;
+      }
+    } else {
+      if (lastChunk != null) {
+        reconcilePartialChunks(lastChunk, blockData);
+      } else {
+        LOG.debug("blockData={}, lastChunk=(null)", blockData.getChunks());
+      }
+    }
+
+    return blockData;
+  }
+
+  private static void reconcilePartialChunks(
+      BlockData lastChunk, BlockData blockData) {
+    LOG.debug("blockData={}, lastChunk={}",
+        blockData.getChunks(), lastChunk.getChunks());
+    Preconditions.checkState(lastChunk.getChunks().size() == 1);
+    ContainerProtos.ChunkInfo lastChunkInBlockData =
+        blockData.getChunks().get(blockData.getChunks().size() - 1);
+    Preconditions.checkState(
+        lastChunkInBlockData.getOffset() + lastChunkInBlockData.getLen()
+            == lastChunk.getChunks().get(0).getOffset(),
+        "chunk offset does not match");
+
+    // append last partial chunk to the block data
+    List<ContainerProtos.ChunkInfo> chunkInfos =
+        new ArrayList<>(blockData.getChunks());
+    chunkInfos.add(lastChunk.getChunks().get(0));
+    blockData.setChunks(chunkInfos);
+
+    blockData.setBlockCommitSequenceId(
+        lastChunk.getBlockCommitSequenceId());
+  }
+
+  private static boolean isPartialChunkList(BlockData data) {
+    return data.getMetadata().containsKey(INCREMENTAL_CHUNK_LIST);
+  }
+
+  private static boolean isFullChunk(ContainerProtos.ChunkInfo chunkInfo) {
+    for (ContainerProtos.KeyValue kv: chunkInfo.getMetadataList()) {
+      if (kv.getKey().equals(FULL_CHUNK)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  // if eob or if the last chunk is full,
+  private static boolean shouldAppendLastChunk(boolean endOfBlock,
+      BlockData data) {
+    if (endOfBlock || data.getChunks().isEmpty()) {
+      return true;
+    }
+    return isFullChunk(data.getChunks().get(data.getChunks().size() - 1));
+  }
+
+  public void putBlockByID(BatchOperation batch, boolean incremental,
+      long localID, BlockData data, KeyValueContainerData containerData,
+      boolean endOfBlock) throws IOException {
+    if (!incremental && !isPartialChunkList(data)) {
+      // Case (1) old client: override chunk list.
+      getBlockDataTable().putWithBatch(
+          batch, containerData.getBlockKey(localID), data);
+    } else if (shouldAppendLastChunk(endOfBlock, data)) {
+      moveLastChunkToBlockData(batch, localID, data, containerData);
+    } else {
+      // incremental chunk list,
+      // not end of block, has partial chunks
+      putBlockWithPartialChunks(batch, localID, data, containerData);
+    }
+  }
+
+  private void moveLastChunkToBlockData(BatchOperation batch, long localID,
+      BlockData data, KeyValueContainerData containerData) throws IOException {
+    // if eob or if the last chunk is full,
+    // the 'data' is full so append it to the block table's chunk info
+    // and then remove from lastChunkInfo
+    BlockData blockData = getBlockDataTable().get(
+        containerData.getBlockKey(localID));
+    if (blockData == null) {
+      // Case 2.1 if the block did not have full chunks before,
+      // the block's chunk is what received from client this time.
+      blockData = data;
+    } else {
+      // case 2.2 the block already has some full chunks
+      List<ContainerProtos.ChunkInfo> chunkInfoList = blockData.getChunks();
+      blockData.setChunks(new ArrayList<>(chunkInfoList));
+      for (ContainerProtos.ChunkInfo chunk : data.getChunks()) {
+        blockData.addChunk(chunk);
+      }
+      blockData.setBlockCommitSequenceId(data.getBlockCommitSequenceId());
+
+      /*int next = 0;
+      for (ContainerProtos.ChunkInfo info : chunkInfoList) {
+        assert info.getOffset() == next;
+        next += info.getLen();
+      }*/
+    }
+    // delete the entry from last chunk info table
+    getLastChunkInfoTable().deleteWithBatch(
+        batch, containerData.getBlockKey(localID));
+    // update block data table
+    getBlockDataTable().putWithBatch(batch,
+        containerData.getBlockKey(localID), blockData);
+  }
+
+  private void putBlockWithPartialChunks(BatchOperation batch, long localID,
+      BlockData data, KeyValueContainerData containerData) throws IOException {
+    if (data.getChunks().size() == 1) {
+      // Case (3.1) replace/update the last chunk info table
+      getLastChunkInfoTable().putWithBatch(
+          batch, containerData.getBlockKey(localID), data);
+    } else {
+      int lastChunkIndex = data.getChunks().size() - 1;
+      // received more than one chunk this time
+      List<ContainerProtos.ChunkInfo> lastChunkInfo =
+          Collections.singletonList(
+              data.getChunks().get(lastChunkIndex));
+      BlockData blockData = getBlockDataTable().get(
+          containerData.getBlockKey(localID));
+      if (blockData == null) {
+        // Case 3.2: if the block does not exist in the block data table
+        List<ContainerProtos.ChunkInfo> chunkInfos =
+            new ArrayList<>(data.getChunks());
+        chunkInfos.remove(lastChunkIndex);
+        data.setChunks(chunkInfos);
+        blockData = data;
+        LOG.debug("block {} does not have full chunks yet. Adding the " +
+            "chunks to it {}", localID, blockData);
+      } else {
+        // Case 3.3: if the block exists in the block data table,
+        // append chunks till except the last one (supposedly partial)
+        List<ContainerProtos.ChunkInfo> chunkInfos =
+            new ArrayList<>(blockData.getChunks());
+
+        LOG.debug("blockData.getChunks()={}", chunkInfos);
+        LOG.debug("data.getChunks()={}", data.getChunks());
+
+        for (int i = 0; i < lastChunkIndex; i++) {
+          chunkInfos.add(data.getChunks().get(i));
+        }
+        blockData.setChunks(chunkInfos);
+        blockData.setBlockCommitSequenceId(data.getBlockCommitSequenceId());
+
+        /*int next = 0;
+        for (ContainerProtos.ChunkInfo info : chunkInfos) {
+          if (info.getOffset() != next) {
+
+            LOG.error("blockData.getChunks()={}", chunkInfos);
+            LOG.error("data.getChunks()={}", data.getChunks());
+          }
+          assert info.getOffset() == next;
+          next += info.getLen();
+        }*/
+      }
+      getBlockDataTable().putWithBatch(batch,
+          containerData.getBlockKey(localID), blockData);
+      // update the last partial chunk
+      data.setChunks(lastChunkInfo);
+      getLastChunkInfoTable().putWithBatch(
+          batch, containerData.getBlockKey(localID), data);

Review Comment:
   Can we just define and store the last chunk instead of blockData containing 
last chunk.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to