This is an automated email from the ASF dual-hosted git repository.
elek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 9007b47 HDDS-3678. Remove usage of DFSUtil.addPBProtocol method (#987)
9007b47 is described below
commit 9007b47c7222307137a251db6be967a085d7bc5f
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Wed Jun 3 12:29:30 2020 +0200
HDDS-3678. Remove usage of DFSUtil.addPBProtocol method (#987)
---
.../main/java/org/apache/hadoop/ozone/OzoneConsts.java | 12 ++++++------
.../commandhandler/DeleteBlocksCommandHandler.java | 6 +++---
.../statemachine/background/BlockDeletingService.java | 10 +++++-----
.../hadoop/ozone/container/common/SCMTestUtils.java | 4 ++--
.../container/common/TestBlockDeletingService.java | 4 ++--
.../container/keyvalue/TestKeyValueBlockIterator.java | 4 ++--
.../container/keyvalue/TestKeyValueContainerCheck.java | 6 +++---
.../ozone/container/ozoneimpl/TestContainerReader.java | 4 ++--
.../org/apache/hadoop/hdds/utils/HddsServerUtil.java | 15 +++++++++++++++
.../hdds/scm/server/StorageContainerManager.java | 3 +--
.../hadoop/hdds/scm/block/TestDeletedBlockLog.java | 4 ++--
.../apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java | 4 ++--
.../apache/hadoop/fs/ozone/TestOzoneFSInputStream.java | 5 +++--
.../ozone/TestStorageContainerManagerHelper.java | 4 ++--
.../client/rpc/TestContainerStateMachineFailures.java | 4 ++--
.../ozone/client/rpc/TestOzoneRpcClientAbstract.java | 18 +++++++-----------
.../statemachine/commandhandler/TestBlockDeletion.java | 8 ++++----
.../java/org/apache/hadoop/ozone/om/OzoneManager.java | 3 +--
.../apache/hadoop/ozone/freon/RandomKeyGenerator.java | 4 ++--
.../java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java | 9 ++++-----
20 files changed, 70 insertions(+), 61 deletions(-)
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index b31f80d..5a1f915 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.ozone;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.ratis.thirdparty.io.grpc.Context;
import org.apache.ratis.thirdparty.io.grpc.Metadata;
@@ -148,15 +148,15 @@ public final class OzoneConsts {
public static final byte[] DB_BLOCK_COUNT_KEY =
- DFSUtil.string2Bytes(OzoneConsts.BLOCK_COUNT);
+ StringUtils.string2Bytes(OzoneConsts.BLOCK_COUNT);
public static final byte[] DB_CONTAINER_BYTES_USED_KEY =
- DFSUtil.string2Bytes(OzoneConsts.CONTAINER_BYTES_USED);
+ StringUtils.string2Bytes(OzoneConsts.CONTAINER_BYTES_USED);
public static final byte[] DB_PENDING_DELETE_BLOCK_COUNT_KEY =
- DFSUtil.string2Bytes(PENDING_DELETE_BLOCK_COUNT);
+ StringUtils.string2Bytes(PENDING_DELETE_BLOCK_COUNT);
public static final byte[] DB_CONTAINER_DELETE_TRANSACTION_KEY =
- DFSUtil.string2Bytes(DELETE_TRANSACTION_KEY_PREFIX);
+ StringUtils.string2Bytes(DELETE_TRANSACTION_KEY_PREFIX);
public static final byte[] DB_BLOCK_COMMIT_SEQUENCE_ID_KEY =
- DFSUtil.string2Bytes(BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
+ StringUtils.string2Bytes(BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index 4324875..64cc804 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -17,13 +17,13 @@
package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
import com.google.common.primitives.Longs;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
import org.apache.hadoop.hdds.protocol.proto
@@ -215,9 +215,9 @@ public class DeleteBlocksCommandHandler implements
CommandHandler {
byte[] blkInfo = containerDB.getStore().get(blkBytes);
if (blkInfo != null) {
byte[] deletingKeyBytes =
- DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk);
+ StringUtils.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk);
byte[] deletedKeyBytes =
- DFSUtil.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk);
+ StringUtils.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk);
if (containerDB.getStore().get(deletingKeyBytes) != null
|| containerDB.getStore().get(deletedKeyBytes) != null) {
if (LOG.isDebugEnabled()) {
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index 375263c..cec723c 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -28,6 +28,7 @@ import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
import org.apache.hadoop.hdds.utils.BatchOperation;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@@ -278,7 +278,7 @@ public class BlockDeletingService extends BackgroundService
{
.getHandler(container.getContainerType()));
toDeleteBlocks.forEach(entry -> {
- String blockName = DFSUtil.bytes2String(entry.getKey());
+ String blockName = StringUtils.bytes2String(entry.getKey());
LOG.debug("Deleting block {}", blockName);
try {
ContainerProtos.BlockData data =
@@ -299,9 +299,9 @@ public class BlockDeletingService extends BackgroundService
{
String blockId =
entry.substring(OzoneConsts.DELETING_KEY_PREFIX.length());
String deletedEntry = OzoneConsts.DELETED_KEY_PREFIX + blockId;
- batch.put(DFSUtil.string2Bytes(deletedEntry),
- DFSUtil.string2Bytes(blockId));
- batch.delete(DFSUtil.string2Bytes(entry));
+ batch.put(StringUtils.string2Bytes(deletedEntry),
+ StringUtils.string2Bytes(blockId));
+ batch.delete(StringUtils.string2Bytes(entry));
});
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
index f696ac3..0d80fa4 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
@@ -30,9 +30,9 @@ import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageContainerDatanodeProtocolService;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
@@ -79,7 +79,7 @@ public final class SCMTestUtils {
.setSecretManager(null)
.build();
- DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
+ HddsServerUtil.addPBProtocol(conf, protocol, instance, rpcServer);
return rpcServer;
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index b4c1ae5..52daeff 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -30,6 +30,7 @@ import com.google.common.collect.Lists;
import com.google.common.primitives.Ints;
import com.google.common.primitives.Longs;
import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -37,7 +38,6 @@ import
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.utils.BackgroundService;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.Checksum;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
@@ -165,7 +165,7 @@ public class TestBlockDeletingService {
chunks.add(info);
}
kd.setChunks(chunks);
- metadata.getStore().put(DFSUtil.string2Bytes(deleteStateName),
+ metadata.getStore().put(StringUtils.string2Bytes(deleteStateName),
kd.getProtoBufMessage().toByteArray());
container.getContainerData().incrPendingDeletionBlocks(1);
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
index 256f8b7..ccd3227 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
@@ -28,11 +28,11 @@ import java.util.UUID;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
@@ -280,7 +280,7 @@ public class TestKeyValueBlockIterator {
BlockID blockID = new BlockID(containerId, i);
BlockData blockData = new BlockData(blockID);
blockData.setChunks(chunkList);
- metadataStore.getStore().put(DFSUtil.string2Bytes(OzoneConsts
+ metadataStore.getStore().put(StringUtils.string2Bytes(OzoneConsts
.DELETING_KEY_PREFIX + blockID.getLocalID()), blockData
.getProtoBufMessage().toByteArray());
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
index 0bd5b07..41ae57d 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
@@ -21,10 +21,10 @@ package org.apache.hadoop.ozone.container.keyvalue;
import com.google.common.primitives.Longs;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.Checksum;
@@ -263,7 +263,7 @@ import static org.junit.Assert.assertFalse;
if (i >= normalBlocks) {
// deleted key
- metadataStore.getStore().put(DFSUtil.string2Bytes(
+ metadataStore.getStore().put(StringUtils.string2Bytes(
OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID()),
blockData.getProtoBufMessage().toByteArray());
} else {
@@ -278,4 +278,4 @@ import static org.junit.Assert.assertFalse;
}
}
-}
\ No newline at end of file
+}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
index 6929864..e1c5f33 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
@@ -20,11 +20,11 @@ package org.apache.hadoop.ozone.container.ozoneimpl;
import com.google.common.primitives.Longs;
import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
@@ -130,7 +130,7 @@ public class TestContainerReader {
byte[] blkInfo = metadataStore.getStore().get(blkBytes);
byte[] deletingKeyBytes =
- DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX +
+ StringUtils.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX +
blockNames.get(i));
metadataStore.getStore().delete(blkBytes);
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
index 214adb2..ee51142 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
@@ -24,6 +24,8 @@ import java.util.Optional;
import java.util.OptionalInt;
import java.util.concurrent.TimeUnit;
+import com.google.protobuf.BlockingService;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -80,6 +82,19 @@ public final class HddsServerUtil {
HddsServerUtil.class);
/**
+ * Add protobuf-based protocol to the {@link RPC.Server}.
+ * @param conf configuration
+ * @param protocol Protocol interface
+ * @param service service that implements the protocol
+ * @param server RPC server to which the protocol & implementation is added
to
+ */
+ public static void addPBProtocol(Configuration conf, Class<?> protocol,
+ BlockingService service, RPC.Server server) throws IOException {
+ RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class);
+ server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, service);
+ }
+
+ /**
* Retrieve the socket address that should be used by DataNodes to connect
* to the SCM.
*
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index a6cd5d4..4b4e073 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -93,7 +93,6 @@ import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.hdds.utils.HddsVersionInfo;
import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.metrics2.MetricsSystem;
@@ -607,7 +606,7 @@ public final class StorageContainerManager extends
ServiceRuntimeInfoImpl
.setSecretManager(null)
.build();
- DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
+ HddsServerUtil.addPBProtocol(conf, protocol, instance, rpcServer);
return rpcServer;
}
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index c9166ab..9843b74 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdds.scm.block;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
@@ -32,7 +33,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -268,7 +268,7 @@ public class TestDeletedBlockLog {
int added = 0, committed = 0;
List<DeletedBlocksTransaction> blocks = new ArrayList<>();
List<Long> txIDs = new ArrayList<>();
- byte[] latestTxid = DFSUtil.string2Bytes("#LATEST_TXID#");
+ byte[] latestTxid = StringUtils.string2Bytes("#LATEST_TXID#");
MetadataKeyFilters.MetadataKeyFilter avoidLatestTxid =
(preKey, currentKey, nextKey) ->
!Arrays.equals(latestTxid, currentKey);
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java
index 84c341d..eecc73b 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java
@@ -32,9 +32,9 @@ import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.security.Credentials;
@@ -124,7 +124,7 @@ public final class OzoneKMSUtil {
// put back into UGI
if (keyProviderUri != null && credsKey != null) {
credentials.addSecretKey(
- credsKey, DFSUtil.string2Bytes(keyProviderUri.toString()));
+ credsKey, StringUtils.string2Bytes(keyProviderUri.toString()));
}
return keyProviderUri;
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
index 1f705cf..5655f85 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
@@ -42,6 +41,8 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
+import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
+
/**
* Test OzoneFSInputStream by reading through multiple interfaces.
*/
@@ -85,7 +86,7 @@ public class TestOzoneFSInputStream {
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri);
fs = FileSystem.get(conf);
int fileLen = 30 * 1024 * 1024;
- data = DFSUtil.string2Bytes(RandomStringUtils.randomAlphanumeric(fileLen));
+ data = string2Bytes(RandomStringUtils.randomAlphanumeric(fileLen));
filePath = new Path("/" + RandomStringUtils.randomAlphanumeric(5));
try (FSDataOutputStream stream = fs.create(filePath)) {
stream.write(data);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index 714b7ef..c732c88 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -21,12 +21,12 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
@@ -102,7 +102,7 @@ public class TestStorageContainerManagerHelper {
List<Map.Entry<byte[], byte[]>> kvs = meta.getStore()
.getRangeKVs(null, Integer.MAX_VALUE, filter);
kvs.forEach(entry -> {
- String key = DFSUtil.bytes2String(entry.getKey());
+ String key = StringUtils.bytes2String(entry.getKey());
pendingDeletionBlocks
.add(key.replace(OzoneConsts.DELETING_KEY_PREFIX, ""));
});
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
index c0be97e..4bd3e4a 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.client.rpc;
import com.google.common.primitives.Longs;
import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -28,7 +29,6 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -605,7 +605,7 @@ public class TestContainerStateMachineFailures {
ReferenceCountedDB db = BlockUtils.
getDB(keyValueContainerData, conf);
byte[] blockCommitSequenceIdKey =
- DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
+ StringUtils.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
// modify the bcsid for the container in the ROCKS DB thereby inducing
// corruption
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index fd40e49..79e6eca 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneAcl;
@@ -102,6 +101,8 @@ import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils;
+
+import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
@@ -1608,8 +1609,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
sampleData.length(), 1, uploadID);
- ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0,
- sampleData.length());
+ ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
ozoneOutputStream.close();
OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
@@ -1646,8 +1646,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
sampleData.length(), partNumber, uploadID);
- ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0,
- sampleData.length());
+ ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
ozoneOutputStream.close();
OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
@@ -1661,8 +1660,7 @@ public abstract class TestOzoneRpcClientAbstract {
sampleData = "sample Data Changed";
ozoneOutputStream = bucket.createMultipartKey(keyName,
sampleData.length(), partNumber, uploadID);
- ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0, "name"
- .length());
+ ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length());
ozoneOutputStream.close();
commitUploadPartInfo = ozoneOutputStream
@@ -1702,8 +1700,7 @@ public abstract class TestOzoneRpcClientAbstract {
OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
sampleData.length(), partNumber, uploadID);
- ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0,
- sampleData.length());
+ ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
ozoneOutputStream.close();
OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
@@ -1717,8 +1714,7 @@ public abstract class TestOzoneRpcClientAbstract {
sampleData = "sample Data Changed";
ozoneOutputStream = bucket.createMultipartKey(keyName,
sampleData.length(), partNumber, uploadID);
- ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0, "name"
- .length());
+ ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length());
ozoneOutputStream.close();
commitUploadPartInfo = ozoneOutputStream
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index 000f9b5..97a27c1 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -19,6 +19,7 @@ package
org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
import com.google.common.primitives.Longs;
import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -31,7 +32,6 @@ import org.apache.hadoop.hdds.protocol.proto
import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl;
import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.OzoneTestUtils;
@@ -341,12 +341,12 @@ public class TestBlockDeletion {
.getContainer(blockID.getContainerID()).getContainerData(), conf)) {
Assert.assertNull(db.getStore().get(
Longs.toByteArray(blockID.getLocalID())));
- Assert.assertNull(db.getStore().get(DFSUtil.string2Bytes(
+ Assert.assertNull(db.getStore().get(StringUtils.string2Bytes(
OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID())));
- Assert.assertNotNull(DFSUtil.string2Bytes(
+ Assert.assertNotNull(StringUtils.string2Bytes(
OzoneConsts.DELETED_KEY_PREFIX + blockID.getLocalID()));
}
containerIdsWithDeletedBlocks.add(blockID.getContainerID());
}, omKeyLocationInfoGroups);
}
-}
\ No newline at end of file
+}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 561c1b0..faa001d 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -80,7 +80,6 @@ import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper;
import org.apache.hadoop.hdds.utils.db.SequenceNumberNotFoundException;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.ipc.Client;
@@ -855,7 +854,7 @@ public final class OzoneManager extends
ServiceRuntimeInfoImpl
.setSecretManager(delegationTokenMgr)
.build();
- DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
+ HddsServerUtil.addPBProtocol(conf, protocol, instance, rpcServer);
if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
false)) {
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
index 4751672..6e5e627 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
@@ -39,13 +39,13 @@ import java.util.concurrent.atomic.AtomicLong;
import java.util.function.LongSupplier;
import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.client.OzoneQuota;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -273,7 +273,7 @@ public final class RandomKeyGenerator implements
Callable<Void> {
init(freon.createOzoneConfiguration());
}
- keyValueBuffer = DFSUtil.string2Bytes(
+ keyValueBuffer = StringUtils.string2Bytes(
RandomStringUtils.randomAscii(bufferSize));
// Compute the common initial digest for all keys without their UUID
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index cc31619..76b32b2 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -27,8 +27,7 @@ import org.apache.commons.cli.Options;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
@@ -344,7 +343,7 @@ public class SQLCLI extends Configured implements Tool {
executeSQL(conn, CREATE_KEY_INFO);
dbStore.iterate(null, (key, value) -> {
- String keyString = DFSUtilClient.bytes2String(key);
+ String keyString = StringUtils.bytes2String(key);
KeyType type = getKeyType(keyString);
try {
insertOMDB(conn, type, keyString, value);
@@ -528,9 +527,9 @@ public class SQLCLI extends Configured implements Tool {
executeSQL(conn, CREATE_OPEN_CONTAINER);
dbStore.iterate(null, (key, value) -> {
- String containerName = DFSUtil.bytes2String(key);
+ String containerName = StringUtils.bytes2String(key);
Long containerUsed =
- Long.parseLong(DFSUtil.bytes2String(value));
+ Long.parseLong(StringUtils.bytes2String(value));
String insertOpenContainer = String
.format(INSERT_OPEN_CONTAINER, containerName, containerUsed);
try {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]