[ 
https://issues.apache.org/jira/browse/HADOOP-18085?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17480720#comment-17480720
 ] 

Steve Loughran commented on HADOOP-18085:
-----------------------------------------

i'm now getting another test failure. this irritates me as it implies i didn't 
get the builds right, or this would have surfaced. anyway, another arn test 
setup, i'm afraid

{code}
[ERROR] Tests run: 6, Failures: 0, Errors: 2, Skipped: 0, Time elapsed: 13.81 s 
<<< FAILURE! - in org.apache.hadoop.fs.s3a.ITestS3ABucketExistence
[ERROR] 
testAccessPointProbingV2(org.apache.hadoop.fs.s3a.ITestS3ABucketExistence)  
Time elapsed: 5.51 s  <<< ERROR!
java.lang.IllegalArgumentException: The region field of the ARN being passed as 
a bucket parameter to an S3 operation does not match the region the client was 
configured with. Provided region: 'eu-west-1'; client region: 
'accesspoint-eu-west-1'.
        at 
com.amazonaws.services.s3.AmazonS3Client.validateIsTrue(AmazonS3Client.java:6584)
        at 
com.amazonaws.services.s3.AmazonS3Client.validateS3ResourceArn(AmazonS3Client.java:5155)
        at 
com.amazonaws.services.s3.AmazonS3Client.createRequest(AmazonS3Client.java:4956)
        at 
com.amazonaws.services.s3.AmazonS3Client.createRequest(AmazonS3Client.java:4920)
        at 
com.amazonaws.services.s3.AmazonS3Client.getAcl(AmazonS3Client.java:4040)
        at 
com.amazonaws.services.s3.AmazonS3Client.getBucketAcl(AmazonS3Client.java:1278)
        at 
com.amazonaws.services.s3.AmazonS3Client.getBucketAcl(AmazonS3Client.java:1268)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.lambda$verifyBucketExistsV2$2(S3AFileSystem.java:731)
        at 
org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.lambda$trackDurationOfOperation$5(IOStatisticsBinding.java:499)
        at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:119)
        at org.apache.hadoop.fs.s3a.Invoker.lambda$retry$4(Invoker.java:348)
        at org.apache.hadoop.fs.s3a.Invoker.retryUntranslated(Invoker.java:440)
        at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:344)
        at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:319)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.verifyBucketExistsV2(S3AFileSystem.java:724)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.doBucketProbing(S3AFileSystem.java:611)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:506)
        at 
org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3459)
        at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:535)
        at 
org.apache.hadoop.fs.s3a.ITestS3ABucketExistence.lambda$testAccessPointProbingV2$12(ITestS3ABucketExistence.java:172)
        at 
org.apache.hadoop.test.LambdaTestUtils.intercept(LambdaTestUtils.java:498)
        at 
org.apache.hadoop.test.LambdaTestUtils.intercept(LambdaTestUtils.java:384)
        at 
org.apache.hadoop.fs.s3a.ITestS3ABucketExistence.expectUnknownStore(ITestS3ABucketExistence.java:103)
        at 
org.apache.hadoop.fs.s3a.ITestS3ABucketExistence.testAccessPointProbingV2(ITestS3ABucketExistence.java:171)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
        at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
        at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61)
        at 
org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299)
        at 
org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.lang.Thread.run(Thread.java:748)

[INFO] Running org.apache.hadoop.fs.s3a.ITestDowngradeSyncable
[ERROR] 
testAccessPointRequired(org.apache.hadoop.fs.s3a.ITestS3ABucketExistence)  Time 
elapsed: 0.612 s  <<< ERROR!
java.lang.IllegalArgumentException: The region field of the ARN being passed as 
a bucket parameter to an S3 operation does not match the region the client was 
configured with. Provided region: 'eu-west-1'; client region: 
'accesspoint-eu-west-1'.
        at 
com.amazonaws.services.s3.AmazonS3Client.validateIsTrue(AmazonS3Client.java:6584)
        at 
com.amazonaws.services.s3.AmazonS3Client.validateS3ResourceArn(AmazonS3Client.java:5155)
        at 
com.amazonaws.services.s3.AmazonS3Client.createRequest(AmazonS3Client.java:4956)
        at 
com.amazonaws.services.s3.AmazonS3Client.createRequest(AmazonS3Client.java:4920)
        at 
com.amazonaws.services.s3.AmazonS3Client.getAcl(AmazonS3Client.java:4040)
        at 
com.amazonaws.services.s3.AmazonS3Client.getBucketAcl(AmazonS3Client.java:1278)
        at 
com.amazonaws.services.s3.AmazonS3Client.getBucketAcl(AmazonS3Client.java:1268)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.lambda$verifyBucketExistsV2$2(S3AFileSystem.java:731)
        at 
org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.lambda$trackDurationOfOperation$5(IOStatisticsBinding.java:499)
        at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:119)
        at org.apache.hadoop.fs.s3a.Invoker.lambda$retry$4(Invoker.java:348)
        at org.apache.hadoop.fs.s3a.Invoker.retryUntranslated(Invoker.java:440)
        at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:344)
        at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:319)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.verifyBucketExistsV2(S3AFileSystem.java:724)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.doBucketProbing(S3AFileSystem.java:611)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:506)
        at 
org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3459)
        at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:535)
        at 
org.apache.hadoop.fs.s3a.ITestS3ABucketExistence.lambda$testAccessPointRequired$14(ITestS3ABucketExistence.java:189)
        at 
org.apache.hadoop.test.LambdaTestUtils.intercept(LambdaTestUtils.java:498)
        at 
org.apache.hadoop.test.LambdaTestUtils.intercept(LambdaTestUtils.java:384)
        at 
org.apache.hadoop.fs.s3a.ITestS3ABucketExistence.expectUnknownStore(ITestS3ABucketExistence.java:103)
        at 
org.apache.hadoop.fs.s3a.ITestS3ABucketExistence.testAccessPointRequired(ITestS3ABucketExistence.java:188)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
        at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
        at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61)
        at 
org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299)
        at 
org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.lang.Thread.run(Thread.java:748)

{code}


> S3 SDK Upgrade causes AccessPoint ARN endpoint mistranslation
> -------------------------------------------------------------
>
>                 Key: HADOOP-18085
>                 URL: https://issues.apache.org/jira/browse/HADOOP-18085
>             Project: Hadoop Common
>          Issue Type: Bug
>            Reporter: Bogdan Stolojan
>            Assignee: Bogdan Stolojan
>            Priority: Minor
>              Labels: pull-request-available
>          Time Spent: 0.5h
>  Remaining Estimate: 0h
>
> Since upgrading the [SDK to 
> 1.12.132|https://github.com/apache/hadoop/pull/3864] the access point 
> endpoint translation was broken.
> Correct endpoints should start with "s3-accesspoint.", after SDK upgrade they 
> start with "s3.accesspoint-" which messes up tests + region detection by the 
> SDK.



--
This message was sent by Atlassian Jira
(v8.20.1#820001)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to