This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new e87b8dbb2f HDDS-12417. Reduce duplication of createKey variants in 
TestDataUtil (#7999)
e87b8dbb2f is described below

commit e87b8dbb2faa0361813ddeb8e281ab64c3f380ca
Author: Chia-Chuan Yu <[email protected]>
AuthorDate: Wed Mar 5 16:55:52 2025 +0800

    HDDS-12417. Reduce duplication of createKey variants in TestDataUtil (#7999)
---
 .../fs/ozone/AbstractOzoneFileSystemTest.java      |  9 ++-
 .../apache/hadoop/hdds/scm/TestCloseContainer.java | 12 ++--
 .../scm/node/TestDecommissionAndMaintenance.java   |  3 +-
 .../java/org/apache/hadoop/ozone/TestDataUtil.java | 52 ++-------------
 .../ozone/client/rpc/OzoneRpcClientTests.java      | 13 ++--
 .../org/apache/hadoop/ozone/om/TestListKeys.java   |  3 +-
 .../hadoop/ozone/om/TestListKeysWithFSO.java       |  4 +-
 .../hadoop/ozone/om/TestOMDbCheckpointServlet.java |  4 +-
 .../org/apache/hadoop/ozone/om/TestOmAcls.java     |  5 +-
 .../hadoop/ozone/om/TestOmBlockVersioning.java     |  7 +-
 ...TestSnapshotDeletingServiceIntegrationTest.java | 74 ++++++++++++----------
 .../shell/TestOzoneContainerUpgradeShell.java      |  5 +-
 .../hadoop/ozone/shell/TestOzoneDebugShell.java    |  3 +-
 13 files changed, 85 insertions(+), 109 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
index 3f6f163b91..691c5cb1ec 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
@@ -374,7 +374,7 @@ public void testMakeDirsWithAnFakeDirectory() throws 
Exception {
     String fakeGrandpaKey = "dir1";
     String fakeParentKey = fakeGrandpaKey + "/dir2";
     String fullKeyName = fakeParentKey + "/key1";
-    TestDataUtil.createKey(ozoneBucket, fullKeyName, "");
+    TestDataUtil.createKey(ozoneBucket, fullKeyName, new byte[0]);
 
     // /dir1/dir2 should not exist
     assertFalse(fs.exists(new Path(fakeParentKey)));
@@ -888,7 +888,7 @@ public void testListStatusOnKeyNameContainDelimiter() 
throws Exception {
     * the "/dir1", "/dir1/dir2/" are fake directory
     * */
     String keyName = "dir1/dir2/key1";
-    TestDataUtil.createKey(ozoneBucket, keyName, "");
+    TestDataUtil.createKey(ozoneBucket, keyName, new byte[0]);
     FileStatus[] fileStatuses;
 
     fileStatuses = fs.listStatus(ROOT, EXCLUDE_TRASH);
@@ -1396,7 +1396,7 @@ public void testRenameContainDelimiterFile() throws 
Exception {
     String fakeParentKey = fakeGrandpaKey + "/dir2";
     String sourceKeyName = fakeParentKey + "/key1";
     String targetKeyName = fakeParentKey +  "/key2";
-    TestDataUtil.createKey(ozoneBucket, sourceKeyName, "");
+    TestDataUtil.createKey(ozoneBucket, sourceKeyName, new byte[0]);
 
     Path sourcePath = new Path(fs.getUri().toString() + "/" + sourceKeyName);
     Path targetPath = new Path(fs.getUri().toString() + "/" + targetKeyName);
@@ -1894,8 +1894,7 @@ public void testProcessingDetails() throws IOException, 
InterruptedException {
     GenericTestUtils.LogCapturer logCapturer =
         GenericTestUtils.LogCapturer.captureLogs(log);
     int keySize = 1024;
-    TestDataUtil.createKey(ozoneBucket, "key1", new String(new byte[keySize],
-        UTF_8));
+    TestDataUtil.createKey(ozoneBucket, "key1", new byte[keySize]);
     logCapturer.stopCapturing();
     String logContent = logCapturer.getOutput();
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCloseContainer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCloseContainer.java
index 91e8b5264a..0b8449cb08 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCloseContainer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCloseContainer.java
@@ -31,9 +31,11 @@
 import static org.junit.jupiter.api.Assertions.assertThrows;
 
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.time.Duration;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -107,8 +109,9 @@ public void 
testReplicasAreReportedForClosedContainerAfterRestart()
       throws Exception {
     // Create some keys to write data into the open containers
     for (int i = 0; i < 10; i++) {
-      TestDataUtil.createKey(bucket, "key" + i, ReplicationFactor.THREE,
-          ReplicationType.RATIS, "this is the content");
+      TestDataUtil.createKey(bucket, "key" + i, ReplicationConfig
+          .fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
+          "this is the content".getBytes(StandardCharsets.UTF_8));
     }
     StorageContainerManager scm = cluster.getStorageContainerManager();
 
@@ -152,8 +155,9 @@ public void testCloseClosedContainer()
       throws Exception {
     // Create some keys to write data into the open containers
     for (int i = 0; i < 10; i++) {
-      TestDataUtil.createKey(bucket, "key" + i, ReplicationFactor.THREE,
-          ReplicationType.RATIS, "this is the content");
+      TestDataUtil.createKey(bucket, "key" + i, ReplicationConfig
+          .fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
+          "this is the content".getBytes(StandardCharsets.UTF_8));
     }
     StorageContainerManager scm = cluster.getStorageContainerManager();
     // Pick any container on the cluster and close it via client
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
index acf4599a28..0b3e57d3d9 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
@@ -44,6 +44,7 @@
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.time.Duration;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -831,7 +832,7 @@ private void generateData(int keyCount, String keyPrefix,
       ReplicationConfig replicationConfig) throws IOException {
     for (int i = 0; i < keyCount; i++) {
       TestDataUtil.createKey(bucket, keyPrefix + i, replicationConfig,
-          "this is the content");
+          "this is the content".getBytes(StandardCharsets.UTF_8));
     }
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
index 643e996087..b54bcd6f62 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
@@ -25,7 +25,6 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -105,31 +104,14 @@ public static OzoneVolume createVolume(OzoneClient client,
 
   }
 
-  public static void createKey(OzoneBucket bucket, String keyName,
-                               String content) throws IOException {
-    createKey(bucket, keyName, ReplicationFactor.ONE,
-        ReplicationType.RATIS, content.getBytes(UTF_8));
-  }
-
   public static void createKey(OzoneBucket bucket, String keyName,
                                byte[] content) throws IOException {
-    createKey(bucket, keyName, ReplicationFactor.ONE,
-        ReplicationType.RATIS, content);
+    ReplicationConfig replicationConfig = ReplicationConfig.
+        fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.ONE);
+    createKey(bucket, keyName, replicationConfig, content);
 
   }
 
-  public static void createKey(OzoneBucket bucket, String keyName,
-                               ReplicationFactor repFactor, ReplicationType 
repType, byte[] content)
-      throws IOException {
-    ReplicationConfig repConfig = ReplicationConfig
-        .fromTypeAndFactor(repType, repFactor);
-    try (OutputStream stream = bucket
-        .createKey(keyName, content.length, repConfig,
-            new HashMap<>())) {
-      stream.write(content);
-    }
-  }
-
   public static void createKey(OzoneBucket bucket, String keyName,
                                ReplicationConfig repConfig, byte[] content)
       throws IOException {
@@ -140,32 +122,6 @@ public static void createKey(OzoneBucket bucket, String 
keyName,
     }
   }
 
-  public static void createKey(OzoneBucket bucket, String keyName,
-      ReplicationFactor repFactor, ReplicationType repType, String content)
-      throws IOException {
-    ReplicationConfig repConfig = ReplicationConfig
-        .fromTypeAndFactor(repType, repFactor);
-    createKey(bucket, keyName, repConfig, content.getBytes(UTF_8));
-  }
-
-  public static void createKey(OzoneBucket bucket, String keyName,
-      ReplicationConfig repConfig, String content)
-      throws IOException {
-    createKey(bucket, keyName, repConfig, content.getBytes(UTF_8));
-  }
-
-  public static void createKey(OzoneBucket bucket, String keyName,
-      ReplicationFactor repFactor, ReplicationType repType,
-      ByteBuffer data) throws IOException {
-    ReplicationConfig repConfig = ReplicationConfig
-        .fromTypeAndFactor(repType, repFactor);
-    try (OutputStream stream = bucket
-        .createKey(keyName, data.capacity(), repConfig,
-            new HashMap<>())) {
-      stream.write(data.array());
-    }
-  }
-
   public static String getKey(OzoneBucket bucket, String keyName)
       throws IOException {
     try (InputStream stream = bucket.readKey(keyName)) {
@@ -258,7 +214,7 @@ public static Map<String, OmKeyInfo> 
createKeys(MiniOzoneCluster cluster, int nu
       OzoneBucket bucket = createVolumeAndBucket(client);
       for (int i = 0; i < numOfKeys; i++) {
         String keyName = RandomStringUtils.randomAlphabetic(5) + i;
-        createKey(bucket, keyName, RandomStringUtils.randomAlphabetic(5));
+        createKey(bucket, keyName, 
RandomStringUtils.randomAlphabetic(5).getBytes(UTF_8));
         keyLocationMap.put(keyName, lookupOmKeyInfo(cluster, bucket, keyName));
       }
     }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
index 92bd630efb..898b40af53 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
@@ -1083,8 +1083,8 @@ public void testDeleteAuditLog() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
 
-    String value = "sample value";
-    int valueLength = value.getBytes(UTF_8).length;
+    byte[] value = "sample value".getBytes(UTF_8);
+    int valueLength = value.length;
     store.createVolume(volumeName);
     OzoneVolume volume = store.getVolume(volumeName);
     volume.createBucket(bucketName);
@@ -1092,7 +1092,8 @@ public void testDeleteAuditLog() throws Exception {
 
     // create a three replica file
     String keyName1 = "key1";
-    TestDataUtil.createKey(bucket, keyName1, THREE, RATIS, value);
+    TestDataUtil.createKey(bucket, keyName1, ReplicationConfig
+        .fromTypeAndFactor(RATIS, THREE), value);
 
     // create a EC replica file
     String keyName2 = "key2";
@@ -1103,7 +1104,8 @@ public void testDeleteAuditLog() throws Exception {
     String dirName = "dir1";
     bucket.createDirectory(dirName);
     String keyName3 = "key3";
-    TestDataUtil.createKey(bucket, keyName3, THREE, RATIS, value);
+    TestDataUtil.createKey(bucket, keyName3, ReplicationConfig
+        .fromTypeAndFactor(RATIS, THREE), value);
 
     // delete files and directory
     output.reset();
@@ -1113,7 +1115,8 @@ public void testDeleteAuditLog() throws Exception {
 
     // create keys for deleteKeys case
     String keyName4 = "key4";
-    TestDataUtil.createKey(bucket, dirName + "/" + keyName4, THREE, RATIS, 
value);
+    TestDataUtil.createKey(bucket, dirName + "/" + keyName4,
+        ReplicationConfig.fromTypeAndFactor(RATIS, THREE), value);
 
     String keyName5 = "key5";
     TestDataUtil.createKey(bucket, dirName + "/" + keyName5, 
replicationConfig, value);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java
index 7b2404e51f..2d6fbd1cfb 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java
@@ -377,7 +377,8 @@ private static void createAndAssertKeys(OzoneBucket 
ozoneBucket, List<String> ke
     byte[] input = new byte[length];
     Arrays.fill(input, (byte) 96);
     for (String key : keys) {
-      createKey(ozoneBucket, key, ReplicationFactor.THREE, 
ReplicationType.RATIS, input);
+      createKey(ozoneBucket, key,
+          ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, 
ReplicationFactor.THREE), input);
       // Read the key with given key name.
       readkey(ozoneBucket, key, length, input);
     }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java
index 6aa60365a7..1f69f647c3 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java
@@ -649,8 +649,8 @@ private static void createAndAssertKeys(OzoneBucket 
ozoneBucket, List<String> ke
     byte[] input = new byte[length];
     Arrays.fill(input, (byte) 96);
     for (String key : keys) {
-      createKey(ozoneBucket, key, ReplicationFactor.THREE,
-          ReplicationType.RATIS, input);
+      createKey(ozoneBucket, key, ReplicationConfig
+          .fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), 
input);
       // Read the key with given key name.
       readkey(ozoneBucket, key, length, input);
     }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
index 1d9b74e055..076bf2abd7 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
@@ -677,9 +677,9 @@ private void prepSnapshotData() throws Exception {
 
     // Create dummy keys for snapshotting.
     TestDataUtil.createKey(bucket, UUID.randomUUID().toString(),
-        "content");
+        "content".getBytes(StandardCharsets.UTF_8));
     TestDataUtil.createKey(bucket, UUID.randomUUID().toString(),
-        "content");
+        "content".getBytes(StandardCharsets.UTF_8));
 
     snapshotDirName =
         createSnapshot(bucket.getVolumeName(), bucket.getName());
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
index 5c5fd7777d..b0b44b8ea4 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
@@ -27,6 +27,7 @@
 import static org.junit.jupiter.api.Assertions.assertThrows;
 
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.IOUtils;
@@ -172,7 +173,7 @@ public void testCreateKeyPermissionDenied() throws 
Exception {
     OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client);
 
     OMException exception = assertThrows(OMException.class,
-            () -> TestDataUtil.createKey(bucket, "testKey", "testcontent"));
+            () -> TestDataUtil.createKey(bucket, "testKey", 
"testcontent".getBytes(StandardCharsets.UTF_8)));
     assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult());
     assertThat(logCapturer.getOutput()).contains("doesn't have CREATE " +
             "permission to access key");
@@ -181,7 +182,7 @@ public void testCreateKeyPermissionDenied() throws 
Exception {
   @Test
   public void testReadKeyPermissionDenied() throws Exception {
     OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client);
-    TestDataUtil.createKey(bucket, "testKey", "testcontent");
+    TestDataUtil.createKey(bucket, "testKey", 
"testcontent".getBytes(StandardCharsets.UTF_8));
 
     TestOmAcls.keyAclAllow = false;
     OMException exception = assertThrows(OMException.class,
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
index a043c9aa5e..8abb4d27f8 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
@@ -20,6 +20,7 @@
 import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.List;
 import org.apache.commons.lang3.RandomStringUtils;
@@ -166,7 +167,7 @@ public void testReadLatestVersion() throws Exception {
 
     String dataString = RandomStringUtils.randomAlphabetic(100);
 
-    TestDataUtil.createKey(bucket, keyName, dataString);
+    TestDataUtil.createKey(bucket, keyName, 
dataString.getBytes(StandardCharsets.UTF_8));
     assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
     OmKeyInfo keyInfo = ozoneManager.lookupKey(omKeyArgs);
     assertEquals(0, keyInfo.getLatestVersionLocations().getVersion());
@@ -175,7 +176,7 @@ public void testReadLatestVersion() throws Exception {
 
     // When bucket versioning is disabled, overwriting a key doesn't increment
     // its version count. Rather it always resets the version to 0
-    TestDataUtil.createKey(bucket, keyName, dataString);
+    TestDataUtil.createKey(bucket, keyName, 
dataString.getBytes(StandardCharsets.UTF_8));
 
     keyInfo = ozoneManager.lookupKey(omKeyArgs);
     assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
@@ -184,7 +185,7 @@ public void testReadLatestVersion() throws Exception {
         keyInfo.getLatestVersionLocations().getLocationList().size());
 
     dataString = RandomStringUtils.randomAlphabetic(200);
-    TestDataUtil.createKey(bucket, keyName, dataString);
+    TestDataUtil.createKey(bucket, keyName, 
dataString.getBytes(StandardCharsets.UTF_8));
 
     keyInfo = ozoneManager.lookupKey(omKeyArgs);
     assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java
index a22ae2c144..be7a929a83 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java
@@ -49,6 +49,7 @@
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.stream.Collectors;
 import org.apache.commons.compress.utils.Lists;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -201,10 +202,10 @@ public void testMultipleSnapshotKeyReclaim() throws 
Exception {
     OzoneBucket bucket2 = TestDataUtil.createBucket(
         client, VOLUME_NAME, bucketArgs, BUCKET_NAME_TWO);
     // Create key1 and key2
-    TestDataUtil.createKey(bucket2, "bucket2key1", ReplicationFactor.THREE,
-        ReplicationType.RATIS, CONTENT);
-    TestDataUtil.createKey(bucket2, "bucket2key2", ReplicationFactor.THREE,
-        ReplicationType.RATIS, CONTENT);
+    TestDataUtil.createKey(bucket2, "bucket2key1", ReplicationConfig.
+        fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), 
CONTENT.array());
+    TestDataUtil.createKey(bucket2, "bucket2key2", ReplicationConfig.
+        fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), 
CONTENT.array());
 
     // Create Snapshot
     client.getObjectStore().createSnapshot(VOLUME_NAME, BUCKET_NAME_TWO,
@@ -264,14 +265,16 @@ public void testSnapshotWithFSO() throws Exception {
 
     // Create 10 keys
     for (int i = 1; i <= 10; i++) {
-      TestDataUtil.createKey(bucket2, "key" + i, ReplicationFactor.THREE,
-          ReplicationType.RATIS, CONTENT);
+      TestDataUtil.createKey(bucket2, "key" + i, ReplicationConfig.
+              fromTypeAndFactor(ReplicationType.RATIS, 
ReplicationFactor.THREE),
+          CONTENT.array());
     }
 
     // Create 5 keys to overwrite
     for (int i = 11; i <= 15; i++) {
-      TestDataUtil.createKey(bucket2, "key" + i, ReplicationFactor.THREE,
-          ReplicationType.RATIS, CONTENT);
+      TestDataUtil.createKey(bucket2, "key" + i, ReplicationConfig.
+          fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
+          CONTENT.array());
     }
 
     // Create Directory and Sub
@@ -284,8 +287,8 @@ public void testSnapshotWithFSO() throws Exception {
         String childDir = "/childDir" + j;
         client.getProxy().createDirectory(VOLUME_NAME,
             BUCKET_NAME_FSO, parent + childDir);
-        TestDataUtil.createKey(bucket2, parent + childFile,
-            ReplicationFactor.THREE, ReplicationType.RATIS, CONTENT);
+        TestDataUtil.createKey(bucket2, parent + childFile, ReplicationConfig.
+                fromTypeAndFactor(ReplicationType.RATIS, 
ReplicationFactor.THREE), CONTENT.array());
       }
     }
 
@@ -301,8 +304,8 @@ public void testSnapshotWithFSO() throws Exception {
 
     // Overwrite 3 keys -> Moves previous version to deletedTable
     for (int i = 11; i <= 13; i++) {
-      TestDataUtil.createKey(bucket2, "key" + i, ReplicationFactor.THREE,
-          ReplicationType.RATIS, CONTENT);
+      TestDataUtil.createKey(bucket2, "key" + i, ReplicationConfig.
+          fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), 
CONTENT.array());
     }
     assertTableRowCount(keyTable, 24);
 
@@ -366,8 +369,9 @@ public void testSnapshotWithFSO() throws Exception {
 
     // Overwrite 2 keys
     for (int i = 14; i <= 15; i++) {
-      TestDataUtil.createKey(bucket2, "key" + i, ReplicationFactor.THREE,
-          ReplicationType.RATIS, CONTENT);
+      TestDataUtil.createKey(bucket2, "key" + i, ReplicationConfig.
+              fromTypeAndFactor(ReplicationType.RATIS, 
ReplicationFactor.THREE),
+          CONTENT.array());
     }
 
     // Delete 2 more keys
@@ -723,10 +727,12 @@ private synchronized void 
createSnapshotDataForBucket(OzoneBucket bucket) throws
     OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl)
         om.getMetadataManager();
 
-    TestDataUtil.createKey(bucket, bucket.getName() + "key0", 
ReplicationFactor.THREE,
-        ReplicationType.RATIS, CONTENT);
-    TestDataUtil.createKey(bucket, bucket.getName() + "key1", 
ReplicationFactor.THREE,
-        ReplicationType.RATIS, CONTENT);
+    TestDataUtil.createKey(bucket, bucket.getName() + "key0", 
ReplicationConfig.
+            fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
+        CONTENT.array());
+    TestDataUtil.createKey(bucket, bucket.getName() + "key1", 
ReplicationConfig.
+            fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
+        CONTENT.array());
     assertTableRowCount(keyTable, 2);
 
     // Create Snapshot 1.
@@ -736,10 +742,10 @@ private synchronized void 
createSnapshotDataForBucket(OzoneBucket bucket) throws
 
     // Overwrite bucket1key0, This is a newer version of the key which should
     // reclaimed as this is a different version of the key.
-    TestDataUtil.createKey(bucket, bucket.getName() + "key0", 
ReplicationFactor.THREE,
-        ReplicationType.RATIS, CONTENT);
-    TestDataUtil.createKey(bucket, bucket.getName() + "key2", 
ReplicationFactor.THREE,
-        ReplicationType.RATIS, CONTENT);
+    TestDataUtil.createKey(bucket, bucket.getName() + "key0", 
ReplicationConfig.
+        fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), 
CONTENT.array());
+    TestDataUtil.createKey(bucket, bucket.getName() + "key2", 
ReplicationConfig.
+        fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), 
CONTENT.array());
 
     // Key 1 cannot be reclaimed as it is still referenced by Snapshot 1.
     client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(),
@@ -763,10 +769,10 @@ private synchronized void 
createSnapshotDataForBucket(OzoneBucket bucket) throws
     // deletedTable when Snapshot 2 is taken.
     assertTableRowCount(deletedTable, 0);
 
-    TestDataUtil.createKey(bucket, bucket.getName() + "key3", 
ReplicationFactor.THREE,
-        ReplicationType.RATIS, CONTENT);
-    TestDataUtil.createKey(bucket, bucket.getName() + "key4", 
ReplicationFactor.THREE,
-        ReplicationType.RATIS, CONTENT);
+    TestDataUtil.createKey(bucket, bucket.getName() + "key3", 
ReplicationConfig.
+        fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), 
CONTENT.array());
+    TestDataUtil.createKey(bucket, bucket.getName() + "key4", 
ReplicationConfig.
+        fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), 
CONTENT.array());
     client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(),
         bucket.getName() + "key4", false);
     assertTableRowCount(keyTable, 1);
@@ -826,19 +832,19 @@ private synchronized void 
createSnapshotFSODataForBucket(OzoneBucket bucket) thr
                 throw new RuntimeException(ex);
               }
             }));
-    TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", 
ReplicationFactor.THREE,
-        ReplicationType.RATIS, CONTENT);
-    TestDataUtil.createKey(bucket, "dir1/" + bucket.getName() + "key1", 
ReplicationFactor.THREE,
-        ReplicationType.RATIS, CONTENT);
+    TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", 
ReplicationConfig.
+        fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), 
CONTENT.array());
+    TestDataUtil.createKey(bucket, "dir1/" + bucket.getName() + "key1", 
ReplicationConfig.
+        fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), 
CONTENT.array());
     assertTableRowCount(keyTable, countMap.get(keyTable.getName()) + 2);
     assertTableRowCount(dirTable, countMap.get(dirTable.getName()) + 2);
 
     // Overwrite bucket1key0, This is a newer version of the key which should
     // reclaimed as this is a different version of the key.
-    TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", 
ReplicationFactor.THREE,
-        ReplicationType.RATIS, CONTENT);
-    TestDataUtil.createKey(bucket, "dir2/" + bucket.getName() + "key2", 
ReplicationFactor.THREE,
-        ReplicationType.RATIS, CONTENT);
+    TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", 
ReplicationConfig.
+        fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), 
CONTENT.array());
+    TestDataUtil.createKey(bucket, "dir2/" + bucket.getName() + "key2", 
ReplicationConfig.
+        fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), 
CONTENT.array());
     assertTableRowCount(keyTable, countMap.get(keyTable.getName()) + 3);
     assertTableRowCount(dirTable, countMap.get(dirTable.getName()) + 3);
     assertTableRowCount(deletedTable, countMap.get(deletedTable.getName()) + 
1);
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java
index 4cd2923a8f..e6ada443cc 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java
@@ -34,6 +34,7 @@
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.StringWriter;
+import java.nio.charset.StandardCharsets;
 import java.time.Duration;
 import java.util.ArrayList;
 import java.util.List;
@@ -41,6 +42,7 @@
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.stream.Collectors;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -181,7 +183,8 @@ private static void writeKey(String keyName) throws 
IOException {
       TestDataUtil.createVolumeAndBucket(client, VOLUME_NAME, BUCKET_NAME);
       TestDataUtil.createKey(
           
client.getObjectStore().getVolume(VOLUME_NAME).getBucket(BUCKET_NAME),
-          keyName, ReplicationFactor.THREE, ReplicationType.RATIS, "test");
+          keyName, ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, 
ReplicationFactor.THREE),
+          "test".getBytes(StandardCharsets.UTF_8));
     }
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
index 021bb251d7..a1ace9f97f 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
@@ -35,6 +35,7 @@
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.StringWriter;
+import java.nio.charset.StandardCharsets;
 import java.time.Duration;
 import java.util.HashSet;
 import java.util.Set;
@@ -191,7 +192,7 @@ private static void writeKey(String volumeName, String 
bucketName,
           BucketLayout.LEGACY);
       TestDataUtil.createKey(
           client.getObjectStore().getVolume(volumeName).getBucket(bucketName),
-          keyName, repConfig, "test");
+          keyName, repConfig, "test".getBytes(StandardCharsets.UTF_8));
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to