ahmarsuhail commented on code in PR #6407:
URL: https://github.com/apache/hadoop/pull/6407#discussion_r1469491275


##########
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java:
##########
@@ -581,6 +583,12 @@ public void initialize(URI name, Configuration 
originalConf)
 
       s3aInternals = createS3AInternals();
 
+      s3ObjectStorageClassFilter = 
Optional.ofNullable(conf.get(READ_RESTORED_GLACIER_OBJECTS))

Review Comment:
   would prefer `conf.get(READ_RESTORED_GLACIER_OBJECTS, READ_ALL)` , meaning 
READ_ALL is the default. and then you can get rid of the `orElse()`



##########
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java:
##########
@@ -411,4 +416,8 @@ public RequestFactory getRequestFactory() {
   public boolean isCSEEnabled() {
     return isCSEEnabled;
   }
+
+  public S3ObjectStorageClassFilter s3ObjectsStorageClassFilter() {

Review Comment:
   nit: rename to getS3ObjectStorageClassFilter(), and add java docs for the 
method



##########
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/list/ITestS3AReadRestoredGlacierObjects.java:
##########
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.list;
+
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER;
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER_ARRAY;
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER_DISK;
+import static org.apache.hadoop.fs.s3a.Constants.READ_RESTORED_GLACIER_OBJECTS;
+import static org.apache.hadoop.fs.s3a.Constants.STORAGE_CLASS;
+import static org.apache.hadoop.fs.s3a.Constants.STORAGE_CLASS_GLACIER;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching;
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides;
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfStorageClassTestsDisabled;
+import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.XA_STORAGE_CLASS;
+import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.decodeBytes;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+
+import java.io.File;
+import java.nio.file.AccessDeniedException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Map;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.contract.s3a.S3AContract;
+import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.s3a.S3ObjectStorageClassFilter;
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+@RunWith(Parameterized.class)
+public class ITestS3AReadRestoredGlacierObjects extends AbstractS3ATestBase {
+
+  @Parameterized.Parameters(name = "fast-upload-buffer-{0}")
+  public static Collection<Object[]> params() {
+    return Arrays.asList(new Object[][]{
+        {FAST_UPLOAD_BUFFER_DISK},
+        {FAST_UPLOAD_BUFFER_ARRAY}
+    });
+  }
+
+  private final String fastUploadBufferType;
+
+  public ITestS3AReadRestoredGlacierObjects(String fastUploadBufferType) {
+    this.fastUploadBufferType = fastUploadBufferType;
+  }
+
+  @Test
+  public void testIgnoreGlacierObject() throws Throwable {
+    try (FileSystem fs = 
createFiles(S3ObjectStorageClassFilter.SKIP_ALL_GLACIER.name())) {
+      Assertions.assertThat(
+          fs.listStatus(methodPath()))
+        .describedAs("FileStatus List of %s", methodPath()).isEmpty();
+    }
+  }
+
+  @Test
+  public void testIgnoreRestoringGlacierObject() throws Throwable {
+    try (FileSystem fs = 
createFiles(S3ObjectStorageClassFilter.READ_RESTORED_GLACIER_OBJECTS.name())) {
+      Assertions.assertThat(
+              fs.listStatus(
+                  methodPath()))
+          .describedAs("FileStatus List of %s", methodPath()).isEmpty();
+    }
+  }
+
+  @Test
+  public void testDefault() throws Throwable {
+    try (FileSystem fs = 
createFiles(S3ObjectStorageClassFilter.READ_ALL.name())) {
+      Assertions.assertThat(
+              fs.listStatus(methodPath()))
+          .describedAs("FileStatus List of %s", methodPath()).isNotEmpty();
+    }
+  }
+
+  @Override
+  protected Configuration createConfiguration() {
+    Configuration newConf = super.createConfiguration();
+    newConf.set(STORAGE_CLASS, STORAGE_CLASS_GLACIER); // Create Glacier 
objects
+    skipIfStorageClassTestsDisabled(newConf);
+    disableFilesystemCaching(newConf);
+    removeBaseAndBucketOverrides(newConf, STORAGE_CLASS, FAST_UPLOAD_BUFFER);

Review Comment:
   remove line 103 and 104, don't think they're needed ..



##########
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ObjectStorageClassFilter.java:
##########
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
+import java.util.Set;
+import java.util.function.Function;
+import software.amazon.awssdk.services.s3.model.ObjectStorageClass;
+import software.amazon.awssdk.services.s3.model.S3Object;
+
+
+/**
+ * <pre>
+ * {@link S3ObjectStorageClassFilter} will filter the S3 files based on the 
{@code fs.s3a.glacier.read.restored.objects} configuration set in {@link 
S3AFileSystem}
+ * The config can have 3 values:
+ * {@code READ_ALL}: This would conform to the current default behavior of not 
taking into account the storage classes retrieved from S3. This will be done to 
keep the current behavior for the customers and not changing the experience for 
them.

Review Comment:
   remove `This will be done to keep the current behavior for the customers and 
not changing the experience for them.`  and add something like "Retrieval of 
Galcier files will fail with xxx ` whatever the error is currently



##########
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/list/ITestS3AReadRestoredGlacierObjects.java:
##########
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.list;
+
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER;
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER_ARRAY;
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER_DISK;
+import static org.apache.hadoop.fs.s3a.Constants.READ_RESTORED_GLACIER_OBJECTS;
+import static org.apache.hadoop.fs.s3a.Constants.STORAGE_CLASS;
+import static org.apache.hadoop.fs.s3a.Constants.STORAGE_CLASS_GLACIER;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching;
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides;
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfStorageClassTestsDisabled;
+import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.XA_STORAGE_CLASS;
+import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.decodeBytes;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+
+import java.io.File;
+import java.nio.file.AccessDeniedException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Map;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.contract.s3a.S3AContract;
+import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.s3a.S3ObjectStorageClassFilter;
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+@RunWith(Parameterized.class)
+public class ITestS3AReadRestoredGlacierObjects extends AbstractS3ATestBase {
+
+  @Parameterized.Parameters(name = "fast-upload-buffer-{0}")
+  public static Collection<Object[]> params() {
+    return Arrays.asList(new Object[][]{

Review Comment:
   You don't need to parameterize here as we already test this in 
ITestS3AStorageClass. here we just want to focus on this glacier specific 
behaviour.



##########
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/list/ITestS3AReadRestoredGlacierObjects.java:
##########
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.list;
+
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER;
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER_ARRAY;
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER_DISK;
+import static org.apache.hadoop.fs.s3a.Constants.READ_RESTORED_GLACIER_OBJECTS;
+import static org.apache.hadoop.fs.s3a.Constants.STORAGE_CLASS;
+import static org.apache.hadoop.fs.s3a.Constants.STORAGE_CLASS_GLACIER;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching;
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides;
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfStorageClassTestsDisabled;
+import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.XA_STORAGE_CLASS;
+import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.decodeBytes;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+
+import java.io.File;
+import java.nio.file.AccessDeniedException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Map;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.contract.s3a.S3AContract;
+import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.s3a.S3ObjectStorageClassFilter;
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+@RunWith(Parameterized.class)
+public class ITestS3AReadRestoredGlacierObjects extends AbstractS3ATestBase {
+
+  @Parameterized.Parameters(name = "fast-upload-buffer-{0}")
+  public static Collection<Object[]> params() {
+    return Arrays.asList(new Object[][]{
+        {FAST_UPLOAD_BUFFER_DISK},
+        {FAST_UPLOAD_BUFFER_ARRAY}
+    });
+  }
+
+  private final String fastUploadBufferType;
+
+  public ITestS3AReadRestoredGlacierObjects(String fastUploadBufferType) {
+    this.fastUploadBufferType = fastUploadBufferType;
+  }
+
+  @Test
+  public void testIgnoreGlacierObject() throws Throwable {
+    try (FileSystem fs = 
createFiles(S3ObjectStorageClassFilter.SKIP_ALL_GLACIER.name())) {
+      Assertions.assertThat(
+          fs.listStatus(methodPath()))
+        .describedAs("FileStatus List of %s", methodPath()).isEmpty();
+    }
+  }
+
+  @Test
+  public void testIgnoreRestoringGlacierObject() throws Throwable {
+    try (FileSystem fs = 
createFiles(S3ObjectStorageClassFilter.READ_RESTORED_GLACIER_OBJECTS.name())) {
+      Assertions.assertThat(
+              fs.listStatus(
+                  methodPath()))
+          .describedAs("FileStatus List of %s", methodPath()).isEmpty();
+    }
+  }
+
+  @Test
+  public void testDefault() throws Throwable {
+    try (FileSystem fs = 
createFiles(S3ObjectStorageClassFilter.READ_ALL.name())) {
+      Assertions.assertThat(
+              fs.listStatus(methodPath()))
+          .describedAs("FileStatus List of %s", methodPath()).isNotEmpty();
+    }
+  }
+
+  @Override
+  protected Configuration createConfiguration() {

Review Comment:
   move method to the top



##########
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/list/ITestS3AReadRestoredGlacierObjects.java:
##########
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.list;
+
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER;
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER_ARRAY;
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER_DISK;
+import static org.apache.hadoop.fs.s3a.Constants.READ_RESTORED_GLACIER_OBJECTS;
+import static org.apache.hadoop.fs.s3a.Constants.STORAGE_CLASS;
+import static org.apache.hadoop.fs.s3a.Constants.STORAGE_CLASS_GLACIER;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching;
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides;
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfStorageClassTestsDisabled;
+import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.XA_STORAGE_CLASS;
+import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.decodeBytes;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+
+import java.io.File;
+import java.nio.file.AccessDeniedException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Map;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.contract.s3a.S3AContract;
+import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.s3a.S3ObjectStorageClassFilter;
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+@RunWith(Parameterized.class)
+public class ITestS3AReadRestoredGlacierObjects extends AbstractS3ATestBase {
+
+  @Parameterized.Parameters(name = "fast-upload-buffer-{0}")
+  public static Collection<Object[]> params() {
+    return Arrays.asList(new Object[][]{
+        {FAST_UPLOAD_BUFFER_DISK},
+        {FAST_UPLOAD_BUFFER_ARRAY}
+    });
+  }
+
+  private final String fastUploadBufferType;
+
+  public ITestS3AReadRestoredGlacierObjects(String fastUploadBufferType) {
+    this.fastUploadBufferType = fastUploadBufferType;
+  }
+
+  @Test
+  public void testIgnoreGlacierObject() throws Throwable {
+    try (FileSystem fs = 
createFiles(S3ObjectStorageClassFilter.SKIP_ALL_GLACIER.name())) {
+      Assertions.assertThat(
+          fs.listStatus(methodPath()))
+        .describedAs("FileStatus List of %s", methodPath()).isEmpty();
+    }
+  }
+
+  @Test
+  public void testIgnoreRestoringGlacierObject() throws Throwable {
+    try (FileSystem fs = 
createFiles(S3ObjectStorageClassFilter.READ_RESTORED_GLACIER_OBJECTS.name())) {
+      Assertions.assertThat(
+              fs.listStatus(
+                  methodPath()))
+          .describedAs("FileStatus List of %s", methodPath()).isEmpty();
+    }
+  }
+
+  @Test
+  public void testDefault() throws Throwable {
+    try (FileSystem fs = 
createFiles(S3ObjectStorageClassFilter.READ_ALL.name())) {
+      Assertions.assertThat(
+              fs.listStatus(methodPath()))
+          .describedAs("FileStatus List of %s", methodPath()).isNotEmpty();
+    }
+  }
+
+  @Override
+  protected Configuration createConfiguration() {
+    Configuration newConf = super.createConfiguration();
+    newConf.set(STORAGE_CLASS, STORAGE_CLASS_GLACIER); // Create Glacier 
objects
+    skipIfStorageClassTestsDisabled(newConf);
+    disableFilesystemCaching(newConf);
+    removeBaseAndBucketOverrides(newConf, STORAGE_CLASS, FAST_UPLOAD_BUFFER);
+    newConf.set(FAST_UPLOAD_BUFFER, fastUploadBufferType);
+    return newConf;
+  }
+
+  private FileSystem createFiles(String s3ObjectStorageClassFilter) throws 
Throwable {
+    Configuration conf = this.createConfiguration();
+    conf.set(STORAGE_CLASS, STORAGE_CLASS_GLACIER);
+    conf.set(READ_RESTORED_GLACIER_OBJECTS, s3ObjectStorageClassFilter);
+    S3AContract contract = (S3AContract) createContract(conf);
+    contract.init();
+
+    FileSystem fs = contract.getTestFileSystem();
+    Path dir = methodPath();
+    fs.mkdirs(dir);

Review Comment:
   you don't need to test for this here



##########
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/list/ITestS3AReadRestoredGlacierObjects.java:
##########
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.list;
+
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER;
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER_ARRAY;
+import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER_DISK;
+import static org.apache.hadoop.fs.s3a.Constants.READ_RESTORED_GLACIER_OBJECTS;
+import static org.apache.hadoop.fs.s3a.Constants.STORAGE_CLASS;
+import static org.apache.hadoop.fs.s3a.Constants.STORAGE_CLASS_GLACIER;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching;
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides;
+import static 
org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfStorageClassTestsDisabled;
+import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.XA_STORAGE_CLASS;
+import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.decodeBytes;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+
+import java.io.File;
+import java.nio.file.AccessDeniedException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Map;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.contract.s3a.S3AContract;
+import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.s3a.S3ObjectStorageClassFilter;
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+@RunWith(Parameterized.class)
+public class ITestS3AReadRestoredGlacierObjects extends AbstractS3ATestBase {
+
+  @Parameterized.Parameters(name = "fast-upload-buffer-{0}")
+  public static Collection<Object[]> params() {
+    return Arrays.asList(new Object[][]{
+        {FAST_UPLOAD_BUFFER_DISK},
+        {FAST_UPLOAD_BUFFER_ARRAY}
+    });
+  }
+
+  private final String fastUploadBufferType;
+
+  public ITestS3AReadRestoredGlacierObjects(String fastUploadBufferType) {
+    this.fastUploadBufferType = fastUploadBufferType;
+  }
+
+  @Test
+  public void testIgnoreGlacierObject() throws Throwable {
+    try (FileSystem fs = 
createFiles(S3ObjectStorageClassFilter.SKIP_ALL_GLACIER.name())) {
+      Assertions.assertThat(
+          fs.listStatus(methodPath()))
+        .describedAs("FileStatus List of %s", methodPath()).isEmpty();
+    }
+  }
+
+  @Test
+  public void testIgnoreRestoringGlacierObject() throws Throwable {
+    try (FileSystem fs = 
createFiles(S3ObjectStorageClassFilter.READ_RESTORED_GLACIER_OBJECTS.name())) {
+      Assertions.assertThat(
+              fs.listStatus(
+                  methodPath()))
+          .describedAs("FileStatus List of %s", methodPath()).isEmpty();
+    }
+  }
+
+  @Test
+  public void testDefault() throws Throwable {
+    try (FileSystem fs = 
createFiles(S3ObjectStorageClassFilter.READ_ALL.name())) {
+      Assertions.assertThat(
+              fs.listStatus(methodPath()))
+          .describedAs("FileStatus List of %s", methodPath()).isNotEmpty();
+    }
+  }
+
+  @Override
+  protected Configuration createConfiguration() {
+    Configuration newConf = super.createConfiguration();
+    newConf.set(STORAGE_CLASS, STORAGE_CLASS_GLACIER); // Create Glacier 
objects

Review Comment:
   we should test for Glaicer and deep archive though..as that's in your 
StorageClassFilterMap



##########
hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md:
##########
@@ -1261,6 +1261,20 @@ The switch to turn S3A auditing on or off.
   </description>
 </property>
 
+<!--
+The switch to control how S3A handles glacier storage classes.
+-->
+<property>
+<name>fs.s3a.glacier.read.restored.objects</name>
+<value>READ_ALL</value>
+<description>
+  The config can have 3 values:
+
+  * READ_ALL: This would conform to the current default behavior of not taking 
into account the storage classes retrieved from S3. This will be done to keep 
the current behavior (i.e failing for an unrestored glacier class file) for the 
customers and not changing the experience for them.

Review Comment:
   similar to above, remove This will be done to keep the current behavior for 
the customers and not changing the experience for them. and add something like 
"Retrieval of Galcier files will fail with xxx ` whatever the error is currently



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to