[ 
https://issues.apache.org/jira/browse/HADOOP-19604?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=18015139#comment-18015139
 ] 

ASF GitHub Bot commented on HADOOP-19604:
-----------------------------------------

anmolanmol1234 commented on code in PR #7853:
URL: https://github.com/apache/hadoop/pull/7853#discussion_r2287903415


##########
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java:
##########
@@ -113,121 +114,129 @@ public void testReadFile() throws Exception {
     boolean[] createFileWithAbfs = new boolean[]{false, true, false, true};
     boolean[] readFileWithAbfs = new boolean[]{false, true, true, false};
 
-    AzureBlobFileSystem abfs = getFileSystem();
-    // test only valid for non-namespace enabled account
-    Assume.assumeFalse("Namespace enabled account does not support this test",
-        getIsNamespaceEnabled(abfs));
-    Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled());
-
-    NativeAzureFileSystem wasb = getWasbFileSystem();
+    Configuration conf = getRawConfiguration();
+    conf.setBoolean(FS_AZURE_ENABLE_FULL_BLOB_CHECKSUM_VALIDATION, true);
+    FileSystem fileSystem = FileSystem.newInstance(conf);
+    try (AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem) {
+      // test only valid for non-namespace enabled account
+      Assume.assumeFalse("Namespace enabled account does not support this 
test",
+          getIsNamespaceEnabled(abfs));
+      Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled());
+
+      NativeAzureFileSystem wasb = getWasbFileSystem();
+
+      Path testFile = path("/testReadFile");
+      for (int i = 0; i < 4; i++) {
+        Path path = new Path(testFile + "/~12/!008/testfile" + i);
+        final FileSystem createFs = createFileWithAbfs[i] ? abfs : wasb;
+        // Read
+        final FileSystem readFs = readFileWithAbfs[i] ? abfs : wasb;
+        // Write
+        try (FSDataOutputStream nativeFsStream = createFs.create(path, true)) {
+          nativeFsStream.write(TEST_CONTEXT.getBytes());
+          nativeFsStream.flush();
+          nativeFsStream.hsync();
+        }
+
+        // Check file status
+        ContractTestUtils.assertIsFile(createFs, path);
+
+        try (BufferedReader br = new BufferedReader(
+            new InputStreamReader(readFs.open(path)))) {
+          String line = br.readLine();
+          assertEquals("Wrong text from " + readFs,
+              TEST_CONTEXT, line);
+        }
+
+        // Remove file
+        assertDeleted(readFs, path, true);
+      }
+    }
+  }
 
-    Path testFile = path("/testReadFile");
-    for (int i = 0; i < 4; i++) {
-      Path path = new Path(testFile + "/~12/!008/testfile" + i);
-      final FileSystem createFs = createFileWithAbfs[i] ? abfs : wasb;
-      // Read
-      final FileSystem readFs = readFileWithAbfs[i] ? abfs : wasb;
+  /**
+   * Flow: Create and write a file using WASB, then read and append to it 
using ABFS. Finally, delete the file via ABFS after verifying content 
consistency.
+   * Expected: WASB successfully creates the file and writes content. ABFS 
reads, appends, and deletes the file without data loss or errors.
+   */
+  @Test
+  public void testwriteFile() throws Exception {
+    try (AzureBlobFileSystem abfs = getFileSystem()) {
+      Assume.assumeFalse("Namespace enabled account does not support this 
test",
+          getIsNamespaceEnabled(abfs));
+      assumeBlobServiceType();
+      Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled());
+      NativeAzureFileSystem wasb = getWasbFileSystem();
+
+      Path testFile = path("/testReadFile");
+      Path path = new Path(
+          testFile + "/~12/!008/testfile_" + UUID.randomUUID());
       // Write
-      try (FSDataOutputStream nativeFsStream = createFs.create(path, true)) {
+      try (FSDataOutputStream nativeFsStream = wasb.create(path, true)) {
         nativeFsStream.write(TEST_CONTEXT.getBytes());
         nativeFsStream.flush();
         nativeFsStream.hsync();
       }
 
       // Check file status
-      ContractTestUtils.assertIsFile(createFs, path);
+      ContractTestUtils.assertIsFile(wasb, path);
 
       try (BufferedReader br = new BufferedReader(
-          new InputStreamReader(readFs.open(path)))) {
+          new InputStreamReader(abfs.open(path)))) {
         String line = br.readLine();
-        assertEquals("Wrong text from " + readFs,
+        assertEquals("Wrong text from " + abfs,
             TEST_CONTEXT, line);
       }
-
+      try (FSDataOutputStream abfsOutputStream = abfs.append(path)) {
+        abfsOutputStream.write(TEST_CONTEXT.getBytes());
+        abfsOutputStream.flush();
+        abfsOutputStream.hsync();
+      }
       // Remove file
-      assertDeleted(readFs, path, true);
+      assertDeleted(abfs, path, true);
     }
   }
 
-  /**
-   * Flow: Create and write a file using WASB, then read and append to it 
using ABFS. Finally, delete the file via ABFS after verifying content 
consistency.
-   * Expected: WASB successfully creates the file and writes content. ABFS 
reads, appends, and deletes the file without data loss or errors.
-   */
-  @Test
-  public void testwriteFile() throws Exception {
-    AzureBlobFileSystem abfs = getFileSystem();
-    Assume.assumeFalse("Namespace enabled account does not support this test",
-        getIsNamespaceEnabled(abfs));
-    assumeBlobServiceType();
-    Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled());
-    NativeAzureFileSystem wasb = getWasbFileSystem();
-
-    Path testFile = path("/testReadFile");
-    Path path = new Path(testFile + "/~12/!008/testfile_" + UUID.randomUUID());
-    // Write
-    try (FSDataOutputStream nativeFsStream = wasb.create(path, true)) {
-      nativeFsStream.write(TEST_CONTEXT.getBytes());
-      nativeFsStream.flush();
-      nativeFsStream.hsync();
-    }
-
-    // Check file status
-    ContractTestUtils.assertIsFile(wasb, path);
-
-    try (BufferedReader br = new BufferedReader(
-        new InputStreamReader(abfs.open(path)))) {
-      String line = br.readLine();
-      assertEquals("Wrong text from " + abfs,
-          TEST_CONTEXT, line);
-    }
-    try (FSDataOutputStream abfsOutputStream = abfs.append(path)) {
-      abfsOutputStream.write(TEST_CONTEXT.getBytes());
-      abfsOutputStream.flush();
-      abfsOutputStream.hsync();
-    }
-    // Remove file
-    assertDeleted(abfs, path, true);
-  }
-
   /**
    * Flow: Create and write a file using ABFS, append to the file using WASB, 
then write again using ABFS.
    * Expected: File is created and written correctly by ABFS, appended by 
WASB, and final ABFS write reflects all updates without errors.
    */
 
   @Test
   public void testwriteFile1() throws Exception {
-    AzureBlobFileSystem abfs = getFileSystem();
-    Assume.assumeFalse("Namespace enabled account does not support this test",
-        getIsNamespaceEnabled(abfs));
-    assumeBlobServiceType();
-    Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled());
-    NativeAzureFileSystem wasb = getWasbFileSystem();
-
-    Path testFile = path("/testReadFile");
-    Path path = new Path(testFile + "/~12/!008/testfile_" + UUID.randomUUID());
-    // Write
-    try (FSDataOutputStream nativeFsStream = abfs.create(path, true)) {
-      nativeFsStream.write(TEST_CONTEXT.getBytes());
-      nativeFsStream.flush();
-      nativeFsStream.hsync();
-    }
+    try (AzureBlobFileSystem abfs = getFileSystem()) {

Review Comment:
   taken





> ABFS: Fix WASB ABFS compatibility issues
> ----------------------------------------
>
>                 Key: HADOOP-19604
>                 URL: https://issues.apache.org/jira/browse/HADOOP-19604
>             Project: Hadoop Common
>          Issue Type: Sub-task
>    Affects Versions: 3.4.1
>            Reporter: Anmol Asrani
>            Assignee: Anmol Asrani
>            Priority: Major
>              Labels: pull-request-available
>             Fix For: 3.4.1
>
>
> Fix WASB ABFS compatibility issues. Fix issues such as:-
>  # BlockId computation to be consistent across clients for PutBlock and 
> PutBlockList
>  # Restrict url encoding of certain json metadata during setXAttr calls.
>  # Maintain the md5 hash of whole block to validate data integrity during 
> flush.



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-issues-h...@hadoop.apache.org

Reply via email to