This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new a4f48211b3 HDDS-11969. getFilechecksum() API fails if checksum type is 
NONE. (#7656)
a4f48211b3 is described below

commit a4f48211b35522a30e42f6df918409495f0652e2
Author: Sadanand Shenoy <[email protected]>
AuthorDate: Thu Jan 9 16:42:35 2025 +0530

    HDDS-11969. getFilechecksum() API fails if checksum type is NONE. (#7656)
---
 .../apache/hadoop/hdds/scm/OzoneClientConfig.java  | 12 ++--
 .../fs/ozone/AbstractOzoneFileSystemTest.java      | 77 ++++++++++++++--------
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |  8 ++-
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |  8 ++-
 4 files changed, 67 insertions(+), 38 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
index a4b53a80a1..e31a2942cb 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
@@ -485,14 +485,16 @@ public class OzoneClientConfig {
     try {
       return ChecksumCombineMode.valueOf(checksumCombineMode);
     } catch (IllegalArgumentException iae) {
-      LOG.warn("Bad checksum combine mode: {}. Using default {}",
-          checksumCombineMode,
-          ChecksumCombineMode.COMPOSITE_CRC.name());
-      return ChecksumCombineMode.valueOf(
-          ChecksumCombineMode.COMPOSITE_CRC.name());
+      LOG.warn("Bad checksum combine mode: {}.",
+          checksumCombineMode);
+      return null;
     }
   }
 
+  public void setChecksumCombineMode(String checksumCombineMode) {
+    this.checksumCombineMode = checksumCombineMode;
+  }
+
   public void setEcReconstructStripeReadPoolLimit(int poolLimit) {
     this.ecReconstructStripeReadPoolLimit = poolLimit;
   }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
index caba8b3519..ee004af1fc 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.hdds.utils.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -1581,40 +1582,42 @@ abstract class AbstractOzoneFileSystemTest {
     Configuration conf = new OzoneConfiguration(cluster.getConf());
     conf.set(FS_DEFAULT_NAME_KEY, rootPath);
     // Set the number of keys to be processed during batch operate.
-    OzoneFileSystem o3FS = (OzoneFileSystem) FileSystem.get(conf);
+    try (FileSystem fileSystem = FileSystem.get(conf)) {
+      OzoneFileSystem o3FS = (OzoneFileSystem) fileSystem;
 
-    //Let's reset the clock to control the time.
-    ((BasicOzoneClientAdapterImpl) (o3FS.getAdapter())).setClock(testClock);
+      //Let's reset the clock to control the time.
+      ((BasicOzoneClientAdapterImpl) (o3FS.getAdapter())).setClock(testClock);
 
-    createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key"),
-        ReplicationType.RATIS);
+      createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key"),
+          ReplicationType.RATIS);
 
-    bucket.setReplicationConfig(new ECReplicationConfig("rs-3-2-1024k"));
+      bucket.setReplicationConfig(new ECReplicationConfig("rs-3-2-1024k"));
 
-    //After changing the bucket policy, it should create ec key, but o3fs will
-    // refresh after some time. So, it will be sill old type.
-    createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key1"),
-        ReplicationType.RATIS);
+      //After changing the bucket policy, it should create ec key, but o3fs 
will
+      // refresh after some time. So, it will be sill old type.
+      createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key1"),
+          ReplicationType.RATIS);
 
-    testClock.fastForward(300 * 1000 + 1);
+      testClock.fastForward(300 * 1000 + 1);
 
-    //After client bucket refresh time, it should create new type what is
-    // available on bucket at that moment.
-    createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key2"),
-        ReplicationType.EC);
+      //After client bucket refresh time, it should create new type what is
+      // available on bucket at that moment.
+      createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key2"),
+          ReplicationType.EC);
 
-    // Rechecking the same steps with changing to Ratis again to check the
-    // behavior is consistent.
-    bucket.setReplicationConfig(
-        
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE));
+      // Rechecking the same steps with changing to Ratis again to check the
+      // behavior is consistent.
+      bucket.setReplicationConfig(RatisReplicationConfig.getInstance(
+          HddsProtos.ReplicationFactor.THREE));
 
-    createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key3"),
-        ReplicationType.EC);
+      createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key3"),
+          ReplicationType.EC);
 
-    testClock.fastForward(300 * 1000 + 1);
+      testClock.fastForward(300 * 1000 + 1);
 
-    createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key4"),
-        ReplicationType.RATIS);
+      createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key4"),
+          ReplicationType.RATIS);
+    }
   }
 
   private void createKeyAndAssertKeyType(OzoneBucket bucket,
@@ -1668,9 +1671,11 @@ abstract class AbstractOzoneFileSystemTest {
     OzoneConfiguration conf2 = new OzoneConfiguration(cluster.getConf());
     conf2.setClass("fs.trash.classname", TrashPolicyDefault.class,
         TrashPolicy.class);
-    Trash trashPolicyDefault = new Trash(conf2);
-    assertThrows(IOException.class,
-        () -> trashPolicyDefault.moveToTrash(root));
+    try (FileSystem fs = FileSystem.get(conf2)) {
+      Trash trashPolicyDefault = new Trash(fs, conf2);
+      assertThrows(IOException.class,
+          () -> trashPolicyDefault.moveToTrash(root));
+    }
   }
 
   /**
@@ -2275,6 +2280,24 @@ abstract class AbstractOzoneFileSystemTest {
     }
   }
 
+  @Test
+  public void testGetFileChecksumWithInvalidCombineMode() throws IOException {
+    final String root = "/root";
+    Path rootPath = new Path(fs.getUri().toString() + root);
+    fs.mkdirs(rootPath);
+    Path file = new Path(fs.getUri().toString() + root
+        + "/dummy");
+    ContractTestUtils.touch(fs, file);
+    OzoneClientConfig clientConfig = 
cluster.getConf().getObject(OzoneClientConfig.class);
+    clientConfig.setChecksumCombineMode("NONE");
+    OzoneConfiguration conf = cluster.getConf();
+    conf.setFromObject(clientConfig);
+    conf.setBoolean("fs.o3fs.impl.disable.cache", true);
+    try (FileSystem fileSystem = FileSystem.get(conf)) {
+      assertNull(fileSystem.getFileChecksum(file));
+    }
+  }
+
   private String getCurrentUser() {
     try {
       return UserGroupInformation.getCurrentUser().getShortUserName();
diff --git 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index 689e340ff5..d824abc28f 100644
--- 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -647,11 +647,13 @@ public class BasicOzoneClientAdapterImpl implements 
OzoneClientAdapter {
       throws IOException {
     OzoneClientConfig.ChecksumCombineMode combineMode =
         config.getObject(OzoneClientConfig.class).getChecksumCombineMode();
-
+    if (combineMode == null) {
+      return null;
+    }
     return OzoneClientUtils.getFileChecksumWithCombineMode(
         volume, bucket, keyName,
-        length, combineMode, ozoneClient.getObjectStore().getClientProxy());
-
+        length, combineMode,
+        ozoneClient.getObjectStore().getClientProxy());
   }
 
   @Override
diff --git 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 9896ab722d..c5cb003a56 100644
--- 
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ 
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -1298,14 +1298,16 @@ public class BasicRootedOzoneClientAdapterImpl
       throws IOException {
     OzoneClientConfig.ChecksumCombineMode combineMode =
         config.getObject(OzoneClientConfig.class).getChecksumCombineMode();
-
+    if (combineMode == null) {
+      return null;
+    }
     OFSPath ofsPath = new OFSPath(keyName, config);
-
     OzoneVolume volume = objectStore.getVolume(ofsPath.getVolumeName());
     OzoneBucket bucket = getBucket(ofsPath, false);
     return OzoneClientUtils.getFileChecksumWithCombineMode(
         volume, bucket, ofsPath.getKeyName(),
-        length, combineMode, ozoneClient.getObjectStore().getClientProxy());
+        length, combineMode,
+        ozoneClient.getObjectStore().getClientProxy());
 
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to