runzhiwang commented on a change in pull request #1828:
URL: https://github.com/apache/ozone/pull/1828#discussion_r561619668
##########
File path:
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
##########
@@ -270,15 +300,8 @@ private boolean
isTransactionFailed(DeleteBlockTransactionResult result) {
@Override
public void addTransaction(long containerID, List<Long> blocks)
throws IOException {
- lock.lock();
- try {
- Long nextTXID = scmMetadataStore.getNextDeleteBlockTXID();
- DeletedBlocksTransaction tx =
- constructNewTransaction(nextTXID, containerID, blocks);
- scmMetadataStore.getDeletedBlocksTXTable().put(nextTXID, tx);
- } finally {
- lock.unlock();
- }
+ Map<Long, List<Long>> map = Collections.singletonMap(containerID, blocks);
+ addTransactions(map);
Review comment:
This can be done in my DeleteBlock pr.
##########
File path:
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
##########
@@ -270,15 +300,8 @@ private boolean
isTransactionFailed(DeleteBlockTransactionResult result) {
@Override
public void addTransaction(long containerID, List<Long> blocks)
throws IOException {
- lock.lock();
- try {
- Long nextTXID = scmMetadataStore.getNextDeleteBlockTXID();
- DeletedBlocksTransaction tx =
- constructNewTransaction(nextTXID, containerID, blocks);
- scmMetadataStore.getDeletedBlocksTXTable().put(nextTXID, tx);
- } finally {
- lock.unlock();
- }
+ Map<Long, List<Long>> map = Collections.singletonMap(containerID, blocks);
+ addTransactions(map);
Review comment:
@GlenGeng I have done this in my DeleteBlock pr.
##########
File path:
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
##########
@@ -312,19 +335,19 @@ public int getNumOfValidTransactions() throws IOException
{
public void addTransactions(Map<Long, List<Long>> containerBlocksMap)
throws IOException {
lock.lock();
- try {
- try(BatchOperation batch =
- scmMetadataStore.getStore().initBatchOperation()) {
- for (Map.Entry< Long, List< Long > > entry :
- containerBlocksMap.entrySet()) {
- long nextTXID = scmMetadataStore.getNextDeleteBlockTXID();
- DeletedBlocksTransaction tx = constructNewTransaction(nextTXID,
- entry.getKey(), entry.getValue());
- scmMetadataStore.getDeletedBlocksTXTable().putWithBatch(batch,
- nextTXID, tx);
- }
- scmMetadataStore.getStore().commitBatchOperation(batch);
+ try (BatchOperation batch = scmMetadataStore.getStore()
+ .initBatchOperation()) {
+ for (Map.Entry<Long, List<Long>> entry : containerBlocksMap.entrySet()) {
+ long nextTXID = getNextDeleteBlockTXID();
+ scmMetadataStore.getDeletedBlocksTXTable().putWithBatch(batch,
nextTXID,
+ constructNewTransaction(nextTXID, entry.getKey(),
+ entry.getValue()));
}
+ scmMetadataStore.getDeletedBlocksTXTable()
+ .putWithBatch(batch, largestTxnIdHolderKey,
+ DeletedBlocksTransaction.newBuilder().setTxID(getCurrentTXID())
+ .setContainerID(1).setCount(1).build());
Review comment:
@lokeshj1703 I think we need some comments. Besides can we define a new
method to do the following. And because the containerID and count are dummy, we
can define a const such as DUMMY_CONTAINERID = 1.
```
scmMetadataStore.getDeletedBlocksTXTable()
.putWithBatch(batch, largestTxnIdHolderKey,
DeletedBlocksTransaction.newBuilder().setTxID(getCurrentTXID())
.setContainerID(1).setCount(1).build());
```
##########
File path:
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
##########
@@ -312,19 +335,19 @@ public int getNumOfValidTransactions() throws IOException
{
public void addTransactions(Map<Long, List<Long>> containerBlocksMap)
throws IOException {
lock.lock();
- try {
- try(BatchOperation batch =
- scmMetadataStore.getStore().initBatchOperation()) {
- for (Map.Entry< Long, List< Long > > entry :
- containerBlocksMap.entrySet()) {
- long nextTXID = scmMetadataStore.getNextDeleteBlockTXID();
- DeletedBlocksTransaction tx = constructNewTransaction(nextTXID,
- entry.getKey(), entry.getValue());
- scmMetadataStore.getDeletedBlocksTXTable().putWithBatch(batch,
- nextTXID, tx);
- }
- scmMetadataStore.getStore().commitBatchOperation(batch);
+ try (BatchOperation batch = scmMetadataStore.getStore()
+ .initBatchOperation()) {
+ for (Map.Entry<Long, List<Long>> entry : containerBlocksMap.entrySet()) {
+ long nextTXID = getNextDeleteBlockTXID();
+ scmMetadataStore.getDeletedBlocksTXTable().putWithBatch(batch,
nextTXID,
+ constructNewTransaction(nextTXID, entry.getKey(),
+ entry.getValue()));
}
+ scmMetadataStore.getDeletedBlocksTXTable()
+ .putWithBatch(batch, largestTxnIdHolderKey,
+ DeletedBlocksTransaction.newBuilder().setTxID(getCurrentTXID())
+ .setContainerID(1).setCount(1).build());
Review comment:
@lokeshj1703 maybe we need some comments. Besides can we define a new
method to do the following. And because the containerID and count are dummy, we
can define a const such as DUMMY_CONTAINERID = 1.
```
scmMetadataStore.getDeletedBlocksTXTable()
.putWithBatch(batch, largestTxnIdHolderKey,
DeletedBlocksTransaction.newBuilder().setTxID(getCurrentTXID())
.setContainerID(1).setCount(1).build());
```
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]