This is an automated email from the ASF dual-hosted git repository.
sammichen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 7923e25be4 HDDS-7441. Rename function names of retrieving metadata
keys (#3918)
7923e25be4 is described below
commit 7923e25be44ca89e581aa493443f9f9310483883
Author: Symious <[email protected]>
AuthorDate: Fri Jan 13 14:35:09 2023 +0800
HDDS-7441. Rename function names of retrieving metadata keys (#3918)
---
.../commandhandler/DeleteBlocksCommandHandler.java | 13 ++++++-----
.../container/keyvalue/KeyValueContainerCheck.java | 2 +-
.../container/keyvalue/KeyValueContainerData.java | 27 +++++++++++-----------
.../KeyValueContainerMetadataInspector.java | 14 +++++------
.../keyvalue/helpers/KeyValueContainerUtil.java | 10 ++++----
.../container/keyvalue/impl/BlockManagerImpl.java | 14 +++++------
.../background/BlockDeletingService.java | 7 +++---
.../container/common/TestBlockDeletingService.java | 19 +++++++--------
.../TestSchemaOneBackwardsCompatibility.java | 27 +++++++++++-----------
.../TestSchemaTwoBackwardsCompatibility.java | 4 ++--
.../container/keyvalue/TestKeyValueContainer.java | 4 ++--
.../TestKeyValueContainerIntegrityChecks.java | 4 ++--
.../TestKeyValueContainerMetadataInspector.java | 8 +++----
.../container/ozoneimpl/TestContainerReader.java | 13 ++++++-----
.../container/ozoneimpl/TestOzoneContainer.java | 7 +++---
.../ozone/TestStorageContainerManagerHelper.java | 2 +-
.../client/rpc/TestFailureHandlingByClient.java | 4 ++--
.../client/rpc/TestOzoneRpcClientAbstract.java | 3 ++-
.../client/rpc/TestValidateBCSIDOnRestart.java | 2 +-
.../commandhandler/TestBlockDeletion.java | 7 +++---
20 files changed, 100 insertions(+), 91 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index c5912a2ffd..3e63af05f2 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -333,7 +333,8 @@ public class DeleteBlocksCommandHandler implements
CommandHandler {
DeletionMarker schemaV3Marker = (table, batch, tid, txn) -> {
Table<String, DeletedBlocksTransaction> delTxTable =
(Table<String, DeletedBlocksTransaction>) table;
- delTxTable.putWithBatch(batch, containerData.deleteTxnKey(tid), txn);
+ delTxTable.putWithBatch(batch, containerData.getDeleteTxnKey(tid),
+ txn);
};
markBlocksForDeletionTransaction(containerData, delTX, newDeletionBlocks,
@@ -403,10 +404,10 @@ public class DeleteBlocksCommandHandler implements
CommandHandler {
try (BatchOperation batch = containerDB.getStore().getBatchHandler()
.initBatchOperation()) {
for (Long blkLong : delTX.getLocalIDList()) {
- String blk = containerData.blockKey(blkLong);
+ String blk = containerData.getBlockKey(blkLong);
BlockData blkInfo = blockDataTable.get(blk);
if (blkInfo != null) {
- String deletingKey = containerData.deletingBlockKey(blkLong);
+ String deletingKey = containerData.getDeletingBlockKey(blkLong);
if (blockDataTable.get(deletingKey) != null
|| deletedBlocksTable.get(blk) != null) {
if (LOG.isDebugEnabled()) {
@@ -463,15 +464,15 @@ public class DeleteBlocksCommandHandler implements
CommandHandler {
if (delTX.getTxID() > containerData.getDeleteTransactionId()) {
// Update in DB pending delete key count and delete transaction ID.
metadataTable
- .putWithBatch(batchOperation, containerData.latestDeleteTxnKey(),
- delTX.getTxID());
+ .putWithBatch(batchOperation,
+ containerData.getLatestDeleteTxnKey(), delTX.getTxID());
}
long pendingDeleteBlocks =
containerData.getNumPendingDeletionBlocks() + newDeletionBlocks;
metadataTable
.putWithBatch(batchOperation,
- containerData.pendingDeleteBlockCountKey(),
+ containerData.getPendingDeleteBlockCountKey(),
pendingDeleteBlocks);
// update pending deletion blocks count and delete transaction ID in
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
index 115fc7f373..27b138da95 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
@@ -277,7 +277,7 @@ public class KeyValueContainerCheck {
private BlockData getBlockDataFromDB(DBHandle db, BlockData block)
throws IOException {
String blockKey =
- onDiskContainerData.blockKey(block.getBlockID().getLocalID());
+ onDiskContainerData.getBlockKey(block.getBlockID().getLocalID());
return db.getStore().getBlockDataTable().get(blockKey);
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index 19a13a6db5..58862925c5 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -306,11 +306,12 @@ public class KeyValueContainerData extends ContainerData {
Table<String, Long> metadataTable = db.getStore().getMetadataTable();
// Set Bytes used and block count key.
- metadataTable.putWithBatch(batchOperation, bytesUsedKey(),
+ metadataTable.putWithBatch(batchOperation, getBytesUsedKey(),
getBytesUsed() - releasedBytes);
- metadataTable.putWithBatch(batchOperation, blockCountKey(),
+ metadataTable.putWithBatch(batchOperation, getBlockCountKey(),
getBlockCount() - deletedBlockCount);
- metadataTable.putWithBatch(batchOperation, pendingDeleteBlockCountKey(),
+ metadataTable.putWithBatch(batchOperation,
+ getPendingDeleteBlockCountKey(),
getNumPendingDeletionBlocks() - deletedBlockCount);
db.getStore().getBatchHandler().commitBatchOperation(batchOperation);
@@ -328,39 +329,39 @@ public class KeyValueContainerData extends ContainerData {
// to container schemas, we should use them instead of using
// raw const variables defined.
- public String blockKey(long localID) {
+ public String getBlockKey(long localID) {
return formatKey(Long.toString(localID));
}
- public String deletingBlockKey(long localID) {
+ public String getDeletingBlockKey(long localID) {
return formatKey(DELETING_KEY_PREFIX + localID);
}
- public String deleteTxnKey(long txnID) {
+ public String getDeleteTxnKey(long txnID) {
return formatKey(Long.toString(txnID));
}
- public String latestDeleteTxnKey() {
+ public String getLatestDeleteTxnKey() {
return formatKey(DELETE_TRANSACTION_KEY);
}
- public String bcsIdKey() {
+ public String getBcsIdKey() {
return formatKey(BLOCK_COMMIT_SEQUENCE_ID);
}
- public String blockCountKey() {
+ public String getBlockCountKey() {
return formatKey(BLOCK_COUNT);
}
- public String bytesUsedKey() {
+ public String getBytesUsedKey() {
return formatKey(CONTAINER_BYTES_USED);
}
- public String pendingDeleteBlockCountKey() {
+ public String getPendingDeleteBlockCountKey() {
return formatKey(PENDING_DELETE_BLOCK_COUNT);
}
- public String deletingBlockKeyPrefix() {
+ public String getDeletingBlockKeyPrefix() {
return formatKey(DELETING_KEY_PREFIX);
}
@@ -370,7 +371,7 @@ public class KeyValueContainerData extends ContainerData {
}
public KeyPrefixFilter getDeletingBlockKeyFilter() {
- return new KeyPrefixFilter().addFilter(deletingBlockKeyPrefix());
+ return new KeyPrefixFilter().addFilter(getDeletingBlockKeyPrefix());
}
/**
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
index c6395de27d..595aa925a4 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
@@ -229,15 +229,15 @@ public class KeyValueContainerMetadataInspector
implements ContainerInspector {
JsonObject dBMetadata = new JsonObject();
dBMetadata.addProperty(OzoneConsts.BLOCK_COUNT,
- metadataTable.get(containerData.blockCountKey()));
+ metadataTable.get(containerData.getBlockCountKey()));
dBMetadata.addProperty(OzoneConsts.CONTAINER_BYTES_USED,
- metadataTable.get(containerData.bytesUsedKey()));
+ metadataTable.get(containerData.getBytesUsedKey()));
dBMetadata.addProperty(OzoneConsts.PENDING_DELETE_BLOCK_COUNT,
- metadataTable.get(containerData.pendingDeleteBlockCountKey()));
+ metadataTable.get(containerData.getPendingDeleteBlockCountKey()));
dBMetadata.addProperty(OzoneConsts.DELETE_TRANSACTION_KEY,
- metadataTable.get(containerData.latestDeleteTxnKey()));
+ metadataTable.get(containerData.getLatestDeleteTxnKey()));
dBMetadata.addProperty(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID,
- metadataTable.get(containerData.bcsIdKey()));
+ metadataTable.get(containerData.getBcsIdKey()));
return dBMetadata;
}
@@ -341,7 +341,7 @@ public class KeyValueContainerMetadataInspector implements
ContainerInspector {
BooleanSupplier keyRepairAction = () -> {
boolean repaired = false;
try {
- metadataTable.put(containerData.blockCountKey(),
+ metadataTable.put(containerData.getBlockCountKey(),
blockCountAggregate.getAsLong());
repaired = true;
} catch (IOException ex) {
@@ -376,7 +376,7 @@ public class KeyValueContainerMetadataInspector implements
ContainerInspector {
BooleanSupplier keyRepairAction = () -> {
boolean repaired = false;
try {
- metadataTable.put(containerData.bytesUsedKey(),
+ metadataTable.put(containerData.getBytesUsedKey(),
usedBytesAggregate.getAsLong());
repaired = true;
} catch (IOException ex) {
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index b23a49556f..bc3d96d9b2 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -246,7 +246,7 @@ public final class KeyValueContainerUtil {
// Set pending deleted block count.
Long pendingDeleteBlockCount =
metadataTable.get(kvContainerData
- .pendingDeleteBlockCountKey());
+ .getPendingDeleteBlockCountKey());
if (pendingDeleteBlockCount != null) {
kvContainerData.incrPendingDeletionBlocks(
pendingDeleteBlockCount);
@@ -263,7 +263,7 @@ public final class KeyValueContainerUtil {
// Set delete transaction id.
Long delTxnId =
- metadataTable.get(kvContainerData.latestDeleteTxnKey());
+ metadataTable.get(kvContainerData.getLatestDeleteTxnKey());
if (delTxnId != null) {
kvContainerData
.updateDeleteTransactionId(delTxnId);
@@ -271,7 +271,7 @@ public final class KeyValueContainerUtil {
// Set BlockCommitSequenceId.
Long bcsId = metadataTable.get(
- kvContainerData.bcsIdKey());
+ kvContainerData.getBcsIdKey());
if (bcsId != null) {
kvContainerData
.updateBlockCommitSequenceId(bcsId);
@@ -280,7 +280,7 @@ public final class KeyValueContainerUtil {
// Set bytes used.
// commitSpace for Open Containers relies on usedBytes
Long bytesUsed =
- metadataTable.get(kvContainerData.bytesUsedKey());
+ metadataTable.get(kvContainerData.getBytesUsedKey());
if (bytesUsed != null) {
isBlockMetadataSet = true;
kvContainerData.setBytesUsed(bytesUsed);
@@ -288,7 +288,7 @@ public final class KeyValueContainerUtil {
// Set block count.
Long blockCount = metadataTable.get(
- kvContainerData.blockCountKey());
+ kvContainerData.getBlockCountKey());
if (blockCount != null) {
isBlockMetadataSet = true;
kvContainerData.setBlockCount(blockCount);
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 67b38ff68d..d822ce5119 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -158,7 +158,7 @@ public class BlockManagerImpl implements BlockManager {
// If block exists in cache, blockCount should not be incremented.
if (!isBlockInCache) {
if (db.getStore().getBlockDataTable().get(
- containerData.blockKey(localID)) == null) {
+ containerData.getBlockKey(localID)) == null) {
// Block does not exist in DB => blockCount needs to be
// incremented when the block is added into DB.
incrBlockCount = true;
@@ -166,10 +166,10 @@ public class BlockManagerImpl implements BlockManager {
}
db.getStore().getBlockDataTable().putWithBatch(
- batch, containerData.blockKey(localID), data);
+ batch, containerData.getBlockKey(localID), data);
if (bcsId != 0) {
db.getStore().getMetadataTable().putWithBatch(
- batch, containerData.bcsIdKey(), bcsId);
+ batch, containerData.getBcsIdKey(), bcsId);
}
// Set Bytes used, this bytes used will be updated for every write and
@@ -179,13 +179,13 @@ public class BlockManagerImpl implements BlockManager {
// is only used to compute the bytes used. This is done to keep the
// current behavior and avoid DB write during write chunk operation.
db.getStore().getMetadataTable().putWithBatch(
- batch, containerData.bytesUsedKey(),
+ batch, containerData.getBytesUsedKey(),
containerData.getBytesUsed());
// Set Block Count for a container.
if (incrBlockCount) {
db.getStore().getMetadataTable().putWithBatch(
- batch, containerData.blockCountKey(),
+ batch, containerData.getBlockCountKey(),
containerData.getBlockCount() + 1);
}
@@ -327,7 +327,7 @@ public class BlockManagerImpl implements BlockManager {
try (DBHandle db = BlockUtils.getDB(cData, config)) {
result = new ArrayList<>();
String startKey = (startLocalID == -1) ? cData.startKeyEmpty()
- : cData.blockKey(startLocalID);
+ : cData.getBlockKey(startLocalID);
List<? extends Table.KeyValue<String, BlockData>> range =
db.getStore().getBlockDataTable()
.getSequentialRangeKVs(startKey, count,
@@ -352,7 +352,7 @@ public class BlockManagerImpl implements BlockManager {
private BlockData getBlockByID(DBHandle db, BlockID blockID,
KeyValueContainerData containerData) throws IOException {
- String blockKey = containerData.blockKey(blockID.getLocalID());
+ String blockKey = containerData.getBlockKey(blockID.getLocalID());
BlockData blockData = db.getStore().getBlockDataTable().get(blockKey);
if (blockData == null) {
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index c8cffc380e..0f8b80cf0d 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -440,7 +440,8 @@ public class BlockDeletingService extends BackgroundService
{
Deleter schema3Deleter = (table, batch, tid) -> {
Table<String, DeletedBlocksTransaction> delTxTable =
(Table<String, DeletedBlocksTransaction>) table;
- delTxTable.deleteWithBatch(batch, containerData.deleteTxnKey(tid));
+ delTxTable.deleteWithBatch(batch,
+ containerData.getDeleteTxnKey(tid));
};
Table<String, DeletedBlocksTransaction> deleteTxns =
((DeleteTransactionStore<String>) meta.getStore())
@@ -502,7 +503,7 @@ public class BlockDeletingService extends BackgroundService
{
deleter.apply(deleteTxns, batch, delTx.getTxID());
for (Long blk : delTx.getLocalIDList()) {
blockDataTable.deleteWithBatch(batch,
- containerData.blockKey(blk));
+ containerData.getBlockKey(blk));
}
}
@@ -550,7 +551,7 @@ public class BlockDeletingService extends BackgroundService
{
long bytesReleased = 0;
for (DeletedBlocksTransaction entry : delBlocks) {
for (Long blkLong : entry.getLocalIDList()) {
- String blk = containerData.blockKey(blkLong);
+ String blk = containerData.getBlockKey(blkLong);
BlockData blkInfo = blockDataTable.get(blk);
LOG.debug("Deleting block {}", blkLong);
if (blkInfo == null) {
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index a27a675a3a..dfdcef0ee8 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -224,7 +224,8 @@ public class TestBlockDeletingService {
try (DBHandle metadata = BlockUtils.getDB(data, conf)) {
for (int j = 0; j < numOfBlocksPerContainer; j++) {
blockID = ContainerTestHelper.getTestBlockID(containerID);
- String deleteStateName = data.deletingBlockKey(blockID.getLocalID());
+ String deleteStateName = data.getDeletingBlockKey(
+ blockID.getLocalID());
BlockData kd = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunks = Lists.newArrayList();
putChunksInBlock(numOfChunksPerBlock, j, chunks, buffer, chunkManager,
@@ -256,7 +257,7 @@ public class TestBlockDeletingService {
container, blockID);
kd.setChunks(chunks);
try (DBHandle metadata = BlockUtils.getDB(data, conf)) {
- String blockKey = data.blockKey(blockID.getLocalID());
+ String blockKey = data.getBlockKey(blockID.getLocalID());
metadata.getStore().getBlockDataTable().put(blockKey, kd);
} catch (IOException exception) {
LOG.info("Exception = " + exception);
@@ -291,7 +292,7 @@ public class TestBlockDeletingService {
DatanodeStoreSchemaThreeImpl dnStoreThreeImpl =
(DatanodeStoreSchemaThreeImpl) ds;
dnStoreThreeImpl.getDeleteTransactionTable()
- .putWithBatch(batch, data.deleteTxnKey(txnID), dtx);
+ .putWithBatch(batch, data.getDeleteTxnKey(txnID), dtx);
} else {
DatanodeStoreSchemaTwoImpl dnStoreTwoImpl =
(DatanodeStoreSchemaTwoImpl) ds;
@@ -344,12 +345,12 @@ public class TestBlockDeletingService {
container.getContainerData().setBlockCount(numOfBlocksPerContainer);
// Set block count, bytes used and pending delete block count.
metadata.getStore().getMetadataTable()
- .put(data.blockCountKey(), (long) numOfBlocksPerContainer);
+ .put(data.getBlockCountKey(), (long) numOfBlocksPerContainer);
metadata.getStore().getMetadataTable()
- .put(data.bytesUsedKey(),
+ .put(data.getBytesUsedKey(),
chunkLength * numOfChunksPerBlock * numOfBlocksPerContainer);
metadata.getStore().getMetadataTable()
- .put(data.pendingDeleteBlockCountKey(),
+ .put(data.getPendingDeleteBlockCountKey(),
(long) numOfBlocksPerContainer);
} catch (IOException exception) {
LOG.warn("Meta Data update was not successful for container: "
@@ -465,7 +466,7 @@ public class TestBlockDeletingService {
// Ensure there are 3 blocks under deletion and 0 deleted blocks
Assert.assertEquals(3, getUnderDeletionBlocksCount(meta, data));
Assert.assertEquals(3, meta.getStore().getMetadataTable()
- .get(data.pendingDeleteBlockCountKey()).longValue());
+ .get(data.getPendingDeleteBlockCountKey()).longValue());
// Container contains 3 blocks. So, space used by the container
// should be greater than zero.
@@ -495,9 +496,9 @@ public class TestBlockDeletingService {
// Check finally DB counters.
// Not checking bytes used, as handler is a mock call.
Assert.assertEquals(0, meta.getStore().getMetadataTable()
- .get(data.pendingDeleteBlockCountKey()).longValue());
+ .get(data.getPendingDeleteBlockCountKey()).longValue());
Assert.assertEquals(0,
- meta.getStore().getMetadataTable().get(data.blockCountKey())
+ meta.getStore().getMetadataTable().get(data.getBlockCountKey())
.longValue());
Assert.assertEquals(3,
deletingServiceMetrics.getSuccessCount()
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
index 7aab0af64e..da9dd88c41 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
@@ -239,14 +239,14 @@ public class TestSchemaOneBackwardsCompatibility {
try (DBHandle db = BlockUtils.getDB(cData, conf)) {
Table<String, Long> metadataTable = db.getStore().getMetadataTable();
- metadataTable.delete(cData.blockCountKey());
- assertNull(metadataTable.get(cData.blockCountKey()));
+ metadataTable.delete(cData.getBlockCountKey());
+ assertNull(metadataTable.get(cData.getBlockCountKey()));
- metadataTable.delete(cData.bytesUsedKey());
- assertNull(metadataTable.get(cData.bytesUsedKey()));
+ metadataTable.delete(cData.getBytesUsedKey());
+ assertNull(metadataTable.get(cData.getBytesUsedKey()));
- metadataTable.delete(cData.pendingDeleteBlockCountKey());
- assertNull(metadataTable.get(cData.pendingDeleteBlockCountKey()));
+ metadataTable.delete(cData.getPendingDeleteBlockCountKey());
+ assertNull(metadataTable.get(cData.getPendingDeleteBlockCountKey()));
}
// Create a new container data object, and fill in its metadata by
@@ -317,7 +317,7 @@ public class TestSchemaOneBackwardsCompatibility {
Table<String, Long> metadataTable =
refCountedDB.getStore().getMetadataTable();
assertEquals(expectedRegularBlocks + expectedDeletingBlocks,
- (long)metadataTable.get(cData.blockCountKey()));
+ (long)metadataTable.get(cData.getBlockCountKey()));
}
}
@@ -401,7 +401,7 @@ public class TestSchemaOneBackwardsCompatibility {
// Test encoding keys and decoding database values.
for (String blockID: TestDB.BLOCK_IDS) {
- String blockKey = cData.blockKey(Long.parseLong(blockID));
+ String blockKey = cData.getBlockKey(Long.parseLong(blockID));
BlockData blockData = blockDataTable.get(blockKey);
Assert.assertEquals(Long.toString(blockData.getLocalID()), blockID);
}
@@ -444,7 +444,8 @@ public class TestSchemaOneBackwardsCompatibility {
refCountedDB.getStore().getBlockDataTable();
for (String blockID: TestDB.DELETING_BLOCK_IDS) {
- String blockKey = cData.deletingBlockKey(Long.parseLong(blockID));
+ String blockKey = cData.getDeletingBlockKey(
+ Long.parseLong(blockID));
BlockData blockData = blockDataTable.get(blockKey);
Assert.assertEquals(Long.toString(blockData.getLocalID()), blockID);
}
@@ -464,7 +465,7 @@ public class TestSchemaOneBackwardsCompatibility {
// Apply the deleting prefix to the saved block IDs so we can compare
// them to the retrieved keys.
List<String> expectedKeys = TestDB.DELETING_BLOCK_IDS.stream()
- .map(key -> cData.deletingBlockKey(Long.parseLong(key)))
+ .map(key -> cData.getDeletingBlockKey(Long.parseLong(key)))
.collect(Collectors.toList());
Assert.assertEquals(expectedKeys, decodedKeys);
@@ -496,11 +497,11 @@ public class TestSchemaOneBackwardsCompatibility {
refCountedDB.getStore().getMetadataTable();
Assert.assertEquals(TestDB.KEY_COUNT,
- metadataTable.get(cData.blockCountKey()).longValue());
+ metadataTable.get(cData.getBlockCountKey()).longValue());
Assert.assertEquals(TestDB.BYTES_USED,
- metadataTable.get(cData.bytesUsedKey()).longValue());
+ metadataTable.get(cData.getBytesUsedKey()).longValue());
Assert.assertEquals(TestDB.NUM_PENDING_DELETION_BLOCKS,
- metadataTable.get(cData.pendingDeleteBlockCountKey())
+ metadataTable.get(cData.getPendingDeleteBlockCountKey())
.longValue());
}
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
index d2741dc9cd..3624309d9d 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
@@ -317,9 +317,9 @@ public class TestSchemaTwoBackwardsCompatibility {
// update delete related metadata
db.getStore().getMetadataTable().putWithBatch(batch,
- cData.latestDeleteTxnKey(), txn.getTxID());
+ cData.getLatestDeleteTxnKey(), txn.getTxID());
db.getStore().getMetadataTable().putWithBatch(batch,
- cData.pendingDeleteBlockCountKey(),
+ cData.getPendingDeleteBlockCountKey(),
cData.getNumPendingDeletionBlocks() + BLOCKS_PER_TXN);
db.getStore().getBatchHandler().commitBatchOperation(batch);
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index 0ab457a52b..62a432e514 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -333,14 +333,14 @@ public class TestKeyValueContainer {
metadataStore.getStore().getBlockDataTable();
for (long i = 0; i < numberOfKeysToWrite; i++) {
- blockDataTable.put(cData.blockKey(i),
+ blockDataTable.put(cData.getBlockKey(i),
new BlockData(new BlockID(i, i)));
}
// As now when we put blocks, we increment block count and update in DB.
// As for test, we are doing manually so adding key count to DB.
metadataStore.getStore().getMetadataTable()
- .put(cData.blockCountKey(), numberOfKeysToWrite);
+ .put(cData.getBlockCountKey(), numberOfKeysToWrite);
}
Map<String, String> metadata = new HashMap<>();
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
index 51e72839fa..cf18fa8948 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
@@ -172,10 +172,10 @@ public class TestKeyValueContainerIntegrityChecks {
blockData.setChunks(chunkList);
// normal key
- String key = containerData.blockKey(blockID.getLocalID());
+ String key = containerData.getBlockKey(blockID.getLocalID());
if (i >= normalBlocks) {
// deleted key
- key = containerData.deletingBlockKey(blockID.getLocalID());
+ key = containerData.getDeletingBlockKey(blockID.getLocalID());
}
metadataStore.getStore().getBlockDataTable().put(key, blockData);
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java
index 8195e6f4eb..aea451bc3a 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java
@@ -293,8 +293,8 @@ public class TestKeyValueContainerMetadataInspector
try (DBHandle db = BlockUtils.getDB(containerData, getConf())) {
Table<String, Long> metadataTable = db.getStore().getMetadataTable();
// Don't care about in memory state. Just change the DB values.
- metadataTable.put(containerData.blockCountKey(), blockCount);
- metadataTable.put(containerData.bytesUsedKey(), byteCount);
+ metadataTable.put(containerData.getBlockCountKey(), blockCount);
+ metadataTable.put(containerData.getBytesUsedKey(), byteCount);
}
}
@@ -303,10 +303,10 @@ public class TestKeyValueContainerMetadataInspector
try (DBHandle db = BlockUtils.getDB(containerData, getConf())) {
Table<String, Long> metadataTable = db.getStore().getMetadataTable();
- long bytesUsed = metadataTable.get(containerData.bytesUsedKey());
+ long bytesUsed = metadataTable.get(containerData.getBytesUsedKey());
Assert.assertEquals(expectedBytesUsed, bytesUsed);
- long blockCount = metadataTable.get(containerData.blockCountKey());
+ long blockCount = metadataTable.get(containerData.getBlockCountKey());
Assert.assertEquals(expectedBlockCount, blockCount);
}
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
index a17a5f7f8c..229e883ad8 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
@@ -161,11 +161,11 @@ public class TestContainerReader {
metadataStore.getStore().getBlockDataTable();
Long localID = blockNames.get(i);
- String blk = cData.blockKey(localID);
+ String blk = cData.getBlockKey(localID);
BlockData blkInfo = blockDataTable.get(blk);
blockDataTable.delete(blk);
- blockDataTable.put(cData.deletingBlockKey(localID), blkInfo);
+ blockDataTable.put(cData.getDeletingBlockKey(localID), blkInfo);
}
if (setMetaData) {
@@ -173,7 +173,8 @@ public class TestContainerReader {
// and bytes used metadata values, so those do not change.
Table<String, Long> metadataTable =
metadataStore.getStore().getMetadataTable();
- metadataTable.put(cData.pendingDeleteBlockCountKey(), (long)count);
+ metadataTable.put(cData.getPendingDeleteBlockCountKey(),
+ (long)count);
}
}
@@ -201,14 +202,14 @@ public class TestContainerReader {
blockData.setChunks(chunkList);
blkNames.add(localBlockID);
metadataStore.getStore().getBlockDataTable()
- .put(cData.blockKey(localBlockID), blockData);
+ .put(cData.getBlockKey(localBlockID), blockData);
}
if (setMetaData) {
metadataStore.getStore().getMetadataTable()
- .put(cData.blockCountKey(), (long)blockCount);
+ .put(cData.getBlockCountKey(), (long)blockCount);
metadataStore.getStore().getMetadataTable()
- .put(cData.bytesUsedKey(), blockCount * blockLen);
+ .put(cData.getBytesUsedKey(), blockCount * blockLen);
}
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 5ae3bad21f..cceed79390 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -320,12 +320,13 @@ public class TestOzoneContainer {
chunkList.add(info.getProtoBufMessage());
}
blockData.setChunks(chunkList);
- blockDataTable.put(cData.blockKey(blockID.getLocalID()), blockData);
+ blockDataTable.put(cData.getBlockKey(blockID.getLocalID()),
+ blockData);
}
// Set Block count and used bytes.
- metadataTable.put(cData.blockCountKey(), (long) blocks);
- metadataTable.put(cData.bytesUsedKey(), usedBytes);
+ metadataTable.put(cData.getBlockCountKey(), (long) blocks);
+ metadataTable.put(cData.getBytesUsedKey(), usedBytes);
}
// remaining available capacity of the container
return (freeBytes - usedBytes);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index 3ff6041232..2d031ada23 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -112,7 +112,7 @@ public class TestStorageContainerManagerHelper {
for (Table.KeyValue<String, BlockData> entry : kvs) {
pendingDeletionBlocks
- .add(entry.getKey().replace(cData.deletingBlockKeyPrefix(), ""));
+ .add(entry.getKey().replace(cData.getDeletingBlockKeyPrefix(),
""));
}
}
return pendingDeletionBlocks;
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index 6fa5324af6..7ddb6e13d3 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -268,7 +268,7 @@ public class TestFailureHandlingByClient {
.getContainer(containerId1)).getContainerData();
try (DBHandle containerDb1 = BlockUtils.getDB(containerData1, conf)) {
BlockData blockData1 = containerDb1.getStore().getBlockDataTable().get(
- containerData1.blockKey(locationList.get(0).getBlockID()
+ containerData1.getBlockKey(locationList.get(0).getBlockID()
.getLocalID()));
// The first Block could have 1 or 2 chunkSize of data
int block1NumChunks = blockData1.getChunks().size();
@@ -287,7 +287,7 @@ public class TestFailureHandlingByClient {
.getContainer(containerId2)).getContainerData();
try (DBHandle containerDb2 = BlockUtils.getDB(containerData2, conf)) {
BlockData blockData2 = containerDb2.getStore().getBlockDataTable().get(
- containerData2.blockKey(locationList.get(1).getBlockID()
+ containerData2.getBlockKey(locationList.get(1).getBlockID()
.getLocalID()));
// The second Block should have 0.5 chunkSize of data
Assert.assertEquals(block2ExpectedChunkCount,
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index e6bfca173d..361b6fdcf1 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -1889,7 +1889,8 @@ public abstract class TestOzoneRpcClientAbstract {
KeyValueContainerData cData =
(KeyValueContainerData) container.getContainerData();
try (DBHandle db = BlockUtils.getDB(cData, cluster.getConf())) {
- db.getStore().getMetadataTable().put(cData.bcsIdKey(), newBCSID);
+ db.getStore().getMetadataTable().put(cData.getBcsIdKey(),
+ newBCSID);
}
container.updateBlockCommitSequenceId(newBCSID);
index++;
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
index 7f00825e34..ac964834fc 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
@@ -227,7 +227,7 @@ public class TestValidateBCSIDOnRestart {
// modify the bcsid for the container in the ROCKS DB thereby inducing
// corruption
db.getStore().getMetadataTable()
- .put(keyValueContainerData.bcsIdKey(), 0L);
+ .put(keyValueContainerData.getBcsIdKey(), 0L);
}
// after the restart, there will be a mismatch in BCSID of what is recorded
// in the and what is there in RockSDB and hence the container would be
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index b76092122c..e8ac072c5d 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -441,7 +441,7 @@ public class TestBlockDeletion {
.getContainer(blockID.getContainerID()).getContainerData();
try (DBHandle db = BlockUtils.getDB(cData, conf)) {
Assertions.assertNotNull(db.getStore().getBlockDataTable()
- .get(cData.blockKey(blockID.getLocalID())));
+ .get(cData.getBlockKey(blockID.getLocalID())));
}
}, omKeyLocationInfoGroups);
}
@@ -459,12 +459,13 @@ public class TestBlockDeletion {
Table<String, BlockData> blockDataTable =
db.getStore().getBlockDataTable();
- String blockKey = cData.blockKey(blockID.getLocalID());
+ String blockKey = cData.getBlockKey(blockID.getLocalID());
BlockData blockData = blockDataTable.get(blockKey);
Assertions.assertNull(blockData);
- String deletingKey = cData.deletingBlockKey(blockID.getLocalID());
+ String deletingKey = cData.getDeletingBlockKey(
+ blockID.getLocalID());
Assertions.assertNull(blockDataTable.get(deletingKey));
}
containerIdsWithDeletedBlocks.add(blockID.getContainerID());
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]