[
https://issues.apache.org/jira/browse/HADOOP-18910?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17783000#comment-17783000
]
ASF GitHub Bot commented on HADOOP-18910:
-----------------------------------------
anujmodi2021 commented on code in PR #6069:
URL: https://github.com/apache/hadoop/pull/6069#discussion_r1382593149
##########
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemChecksum.java:
##########
@@ -0,0 +1,259 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azurebfs;
+
+import java.security.SecureRandom;
+import java.util.Arrays;
+import java.util.HashSet;
+
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import
org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsInvalidChecksumException;
+import
org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
+import
org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters;
+import org.apache.hadoop.fs.azurebfs.services.AbfsClient;
+import org.apache.hadoop.fs.impl.OpenFileParameters;
+
+import static
org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.MD5_ERROR_SERVER_MESSAGE;
+import static
org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_BUFFERED_PREAD_DISABLE;
+import static
org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.ONE_MB;
+import static
org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters.Mode.APPEND_MODE;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.mockito.ArgumentMatchers.any;
+
+/**
+ * Test For Verifying Checksum Related Operations
+ */
+public class ITestAzureBlobFileSystemChecksum extends
AbstractAbfsIntegrationTest {
+
+ public ITestAzureBlobFileSystemChecksum() throws Exception {
+ super();
+ }
+
+ @Test
+ public void testWriteReadWithChecksum() throws Exception {
+ testWriteReadWithChecksumInternal(true);
+ testWriteReadWithChecksumInternal(false);
+ }
+
+ @Test
+ public void testAppendWithChecksumAtDifferentOffsets() throws Exception {
+ AzureBlobFileSystem fs = getConfiguredFileSystem(4 * ONE_MB, 4 * ONE_MB,
true);
+ AbfsClient client = fs.getAbfsStore().getClient();
+ Path path = path("testPath");
+ fs.create(path);
+ byte[] data= generateRandomBytes(4 * ONE_MB);
+
+ appendWithOffsetHelper(client, path, data, fs, 0);
+ appendWithOffsetHelper(client, path, data, fs, 1 * ONE_MB);
+ appendWithOffsetHelper(client, path, data, fs, 2 * ONE_MB);
+ appendWithOffsetHelper(client, path, data, fs, 4 * ONE_MB - 1);
+ }
+
+ @Test
+ public void testReadWithChecksumAtDifferentOffsets() throws Exception {
+ AzureBlobFileSystem fs = getConfiguredFileSystem(4 * ONE_MB, 4 * ONE_MB,
true);
+ AbfsClient client = fs.getAbfsStore().getClient();
+ fs.getAbfsStore().setClient(client);
+ Path path = path("testPath");
+
+ byte[] data = generateRandomBytes(16 * ONE_MB);
+ FSDataOutputStream out = fs.create(path);
+ out.write(data);
+ out.hflush();
+ out.close();
+
+ readWithOffsetAndPositionHelper(client, path, data, fs, 0, 0);
+ readWithOffsetAndPositionHelper(client, path, data, fs, 4 * ONE_MB, 0);
+ readWithOffsetAndPositionHelper(client, path, data, fs, 4 * ONE_MB, 1 *
ONE_MB);
+ readWithOffsetAndPositionHelper(client, path, data, fs, 8 * ONE_MB, 2 *
ONE_MB);
+ readWithOffsetAndPositionHelper(client, path, data, fs, 15 * ONE_MB, 4 *
ONE_MB - 1);
+ }
+
+ @Test
+ public void testWriteReadWithChecksumAndOptions() throws Exception {
+ testWriteReadWithChecksumAndOptionsInternal(true);
+ testWriteReadWithChecksumAndOptionsInternal(false);
+ }
+
+ @Test
+ public void testAbfsInvalidChecksumExceptionInAppend() throws Exception {
+ AzureBlobFileSystem fs = getConfiguredFileSystem(4 * ONE_MB, 4 * ONE_MB,
true);
+ AbfsClient spiedClient = Mockito.spy(fs.getAbfsStore().getClient());
+ fs.getAbfsStore().setClient(spiedClient);
+ Path path = path("testPath");
+ fs.create(path);
+ byte[] data= generateRandomBytes(4 * ONE_MB);
+ String invalidMD5Hash =
spiedClient.computeMD5Hash("InvalidData".getBytes(), 0, 11);
+ Mockito.doReturn(invalidMD5Hash).when(spiedClient).computeMD5Hash(any(),
+ any(Integer.class), any(Integer.class));
+ AbfsRestOperationException ex =
intercept(AbfsInvalidChecksumException.class, () -> {
+ appendWithOffsetHelper(spiedClient, path, data, fs, 0);
+ });
+
+ Assertions.assertThat(ex.getErrorMessage())
+ .describedAs("Exception Message should contain MD5Mismatch")
+ .contains(MD5_ERROR_SERVER_MESSAGE);
+ }
+
+ @Test
+ public void testAbfsInvalidChecksumExceptionInRead() throws Exception {
+ AzureBlobFileSystem fs = getConfiguredFileSystem(4 * ONE_MB, 4 * ONE_MB,
true);
+ AbfsClient spiedClient = Mockito.spy(fs.getAbfsStore().getClient());
+ fs.getAbfsStore().setClient(spiedClient);
+ Path path = path("testPath");
+ byte[] data = generateRandomBytes(3 * ONE_MB);
+ FSDataOutputStream out = fs.create(path);
+ out.write(data);
+ out.hflush();
+ out.close();
+
+ String invalidMD5Hash =
spiedClient.computeMD5Hash("InvalidData".getBytes(), 0, 11);
+ Mockito.doReturn(invalidMD5Hash).when(spiedClient).computeMD5Hash(any(),
+ any(Integer.class), any(Integer.class));
+
+ intercept(AbfsInvalidChecksumException.class, () -> {
+ readWithOffsetAndPositionHelper(spiedClient, path, data, fs, 0, 0);
+ });
+ }
+
+ private void testWriteReadWithChecksumInternal(final boolean
readAheadEnabled)
+ throws Exception {
+ AzureBlobFileSystem fs = getConfiguredFileSystem(4 * ONE_MB, 4 * ONE_MB,
readAheadEnabled);
+ final int dataSize = 16 * ONE_MB + 1000;
+
+ Path testPath = path("testPath");
+ byte[] bytesUploaded = generateRandomBytes(dataSize);
+ FSDataOutputStream out = fs.create(testPath);
+ out.write(bytesUploaded);
+ out.hflush();
+ out.close();
+
+ FSDataInputStream in = fs.open(testPath);
Review Comment:
Added inside try
##########
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemChecksum.java:
##########
@@ -0,0 +1,259 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azurebfs;
+
+import java.security.SecureRandom;
+import java.util.Arrays;
+import java.util.HashSet;
+
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import
org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsInvalidChecksumException;
+import
org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
+import
org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters;
+import org.apache.hadoop.fs.azurebfs.services.AbfsClient;
+import org.apache.hadoop.fs.impl.OpenFileParameters;
+
+import static
org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.MD5_ERROR_SERVER_MESSAGE;
+import static
org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_BUFFERED_PREAD_DISABLE;
+import static
org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.ONE_MB;
+import static
org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters.Mode.APPEND_MODE;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.mockito.ArgumentMatchers.any;
+
+/**
+ * Test For Verifying Checksum Related Operations
+ */
+public class ITestAzureBlobFileSystemChecksum extends
AbstractAbfsIntegrationTest {
+
+ public ITestAzureBlobFileSystemChecksum() throws Exception {
+ super();
+ }
+
+ @Test
+ public void testWriteReadWithChecksum() throws Exception {
+ testWriteReadWithChecksumInternal(true);
+ testWriteReadWithChecksumInternal(false);
+ }
+
+ @Test
+ public void testAppendWithChecksumAtDifferentOffsets() throws Exception {
+ AzureBlobFileSystem fs = getConfiguredFileSystem(4 * ONE_MB, 4 * ONE_MB,
true);
+ AbfsClient client = fs.getAbfsStore().getClient();
+ Path path = path("testPath");
+ fs.create(path);
Review Comment:
Added inside try
> ABFS: Adding Support for MD5 Hash based integrity verification of the request
> content during transport
> -------------------------------------------------------------------------------------------------------
>
> Key: HADOOP-18910
> URL: https://issues.apache.org/jira/browse/HADOOP-18910
> Project: Hadoop Common
> Issue Type: Sub-task
> Components: fs/azure
> Reporter: Anuj Modi
> Assignee: Anuj Modi
> Priority: Major
> Labels: pull-request-available
>
> Azure Storage Supports Content-MD5 Request Headers in Both Read and Append
> APIs.
> Read: [Path - Read - REST API (Azure Storage Services) | Microsoft
> Learn|https://learn.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/read]
> Append: [Path - Update - REST API (Azure Storage Services) | Microsoft
> Learn|https://learn.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/update]
> This change is to make client-side changes to support them. In Read request,
> we will send the appropriate header in response to which server will return
> the MD5 Hash of the data it sends back. On Client we will tally this with the
> MD5 hash computed from the data received.
> In Append request, we will compute the MD5 Hash of the data that we are
> sending to the server and specify that in appropriate header. Server on
> finding that header will tally this with the MD5 hash it will compute on the
> data received.
> This whole Checksum Validation Support is guarded behind a config, Config is
> by default disabled because with the use of "https" integrity of data is
> preserved anyways. This is introduced as an additional data integrity check
> which will have a performance impact as well.
> Users can decide if they want to enable this or not by setting the following
> config to *"true"* or *"false"* respectively. *Config:
> "fs.azure.enable.checksum.validation"*
--
This message was sent by Atlassian Jira
(v8.20.10#820010)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]