[ 
https://issues.apache.org/jira/browse/HADOOP-17451?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17259716#comment-17259716
 ] 

Steve Loughran commented on HADOOP-17451:
-----------------------------------------

got some failures in a test run; looks network related but since the asserts 
were on metrics, deserves a look

{code}
ERROR] 
testRandomIORandomPolicy(org.apache.hadoop.fs.s3a.scale.ITestS3AInputStreamPerformance)
  Time elapsed: 147.537 s  <<< FAILURE!
java.lang.AssertionError: 
open operations in
org.apache.hadoop.fs.FSDataInputStream@373a267d: 
S3AInputStream{s3a://landsat-pds/scene_list.gz wrappedStream=closed read 
policy=random pos=2097152 nextReadPos=0 contentLength=45603307 
contentRangeStart=2085201 contentRangeFinish=2097152 
remainingInCurrentRequest=0 ChangeTracker{ETagChangeDetectionPolicy 
mode=Server, revisionId='39c34d489777a595b36d0af5726007db'}
StreamStatistics{counters=((stream_read_total_bytes=1376256) 
(stream_read_seek_bytes_skipped=4980736) (stream_read_unbuffered=0) 
(stream_aborted=7) (stream_read_operations=165) (stream_read_seek_operations=3) 
(stream_read_seek_forward_operations=1) 
(stream_read_bytes_backwards_on_seek=6356992) 
(stream_read_seek_backward_operations=2) (action_http_get_request=11) 
(stream_read_bytes_discarded_in_abort=1962375) 
(stream_read_seek_policy_changed=1) (stream_read_fully_operations=4) 
(action_http_get_request.failures=0) (stream_read_opened=11) 
(stream_read_seek_bytes_discarded=0) (stream_read_exceptions=7) 
(stream_read_bytes=1376256) (stream_read_close_operations=1) 
(stream_read_closed=4) (stream_read_operations_incomplete=161) 
(stream_read_bytes_discarded_in_close=0) (stream_read_version_mismatches=0));
gauges=((stream_read_gauge_input_policy=2));
minimums=((action_http_get_request.failures.min=-1) 
(action_http_get_request.min=174));
maximums=((action_http_get_request.failures.max=-1) 
(action_http_get_request.max=8063));
means=((action_http_get_request.failures.mean=(samples=0, sum=0, mean=0.0000)) 
(action_http_get_request.mean=(samples=11, sum=27469, mean=2497.1818)));
}} expected:<4> but was:<11>
        at org.junit.Assert.fail(Assert.java:88)
        at org.junit.Assert.failNotEquals(Assert.java:834)
        at org.junit.Assert.assertEquals(Assert.java:645)
        at 
org.apache.hadoop.fs.s3a.scale.ITestS3AInputStreamPerformance.assertOpenOperationCount(ITestS3AInputStreamPerformance.java:225)
        at 
org.apache.hadoop.fs.s3a.scale.ITestS3AInputStreamPerformance.executeRandomIO(ITestS3AInputStreamPerformance.java:517)
        at 
org.apache.hadoop.fs.s3a.scale.ITestS3AInputStreamPerformance.testRandomIORandomPolicy(ITestS3AInputStreamPerformance.java:461)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:50)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
        at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
        at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
        at 
org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:298)
        at 
org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:292)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.lang.Thread.run(Thread.java:748)

[ERROR] 
testDecompressionSequential128K(org.apache.hadoop.fs.s3a.scale.ITestS3AInputStreamPerformance)
  Time elapsed: 204.334 s  <<< ERROR!
org.apache.hadoop.fs.s3a.AWSClientIOException: read on 
s3a://landsat-pds/scene_list.gz: com.amazonaws.SdkClientException: Data read 
has a different length than the expected: dataLength=0; 
expectedLength=43236817; includeSkipped=true; in.getClass()=class 
com.amazonaws.services.s3.AmazonS3Client$2; markedSupported=false; marked=0; 
resetSinceLastMarked=false; markCount=0; resetCount=0: Data read has a 
different length than the expected: dataLength=0; expectedLength=43236817; 
includeSkipped=true; in.getClass()=class 
com.amazonaws.services.s3.AmazonS3Client$2; markedSupported=false; marked=0; 
resetSinceLastMarked=false; markCount=0; resetCount=0
        at 
org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:208)
        at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:117)
        at org.apache.hadoop.fs.s3a.Invoker.lambda$retry$4(Invoker.java:320)
        at org.apache.hadoop.fs.s3a.Invoker.retryUntranslated(Invoker.java:412)
        at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:316)
        at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:291)
        at org.apache.hadoop.fs.s3a.S3AInputStream.read(S3AInputStream.java:516)
        at java.io.DataInputStream.read(DataInputStream.java:149)
        at 
org.apache.hadoop.io.compress.DecompressorStream.getCompressedData(DecompressorStream.java:179)
        at 
org.apache.hadoop.io.compress.DecompressorStream.decompress(DecompressorStream.java:163)
        at 
org.apache.hadoop.io.compress.DecompressorStream.read(DecompressorStream.java:105)
        at java.io.InputStream.read(InputStream.java:101)
        at org.apache.hadoop.util.LineReader.fillBuffer(LineReader.java:191)
        at 
org.apache.hadoop.util.LineReader.readDefaultLine(LineReader.java:227)
        at org.apache.hadoop.util.LineReader.readLine(LineReader.java:185)
        at org.apache.hadoop.util.LineReader.readLine(LineReader.java:391)
        at 
org.apache.hadoop.fs.s3a.scale.ITestS3AInputStreamPerformance.executeDecompression(ITestS3AInputStreamPerformance.java:385)
        at 
org.apache.hadoop.fs.s3a.scale.ITestS3AInputStreamPerformance.testDecompressionSequential128K(ITestS3AInputStreamPerformance.java:359)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:50)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
        at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
        at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
        at 
org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:298)
        at 
org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:292)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.lang.Thread.run(Thread.java:748)
Caused by: com.amazonaws.SdkClientException: Data read has a different length 
than the expected: dataLength=0; expectedLength=43236817; includeSkipped=true; 
in.getClass()=class com.amazonaws.services.s3.AmazonS3Client$2; 
markedSupported=false; marked=0; resetSinceLastMarked=false; markCount=0; 
resetCount=0
        at 
com.amazonaws.util.LengthCheckInputStream.checkLength(LengthCheckInputStream.java:151)
        at 
com.amazonaws.util.LengthCheckInputStream.read(LengthCheckInputStream.java:109)
        at 
com.amazonaws.internal.SdkFilterInputStream.read(SdkFilterInputStream.java:90)
        at 
com.amazonaws.services.s3.internal.S3AbortableInputStream.read(S3AbortableInputStream.java:125)
        at 
com.amazonaws.internal.SdkFilterInputStream.read(SdkFilterInputStream.java:90)
        at 
org.apache.hadoop.fs.s3a.S3AInputStream.lambda$read$3(S3AInputStream.java:520)
        at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:115)
        ... 31 more

[ERROR] 
testReadWithNormalPolicy(org.apache.hadoop.fs.s3a.scale.ITestS3AInputStreamPerformance)
  Time elapsed: 23.765 s  <<< ERROR!
java.net.SocketTimeoutException: Read timed out
        at java.net.SocketInputStream.socketRead0(Native Method)
        at java.net.SocketInputStream.socketRead(SocketInputStream.java:116)
        at java.net.SocketInputStream.read(SocketInputStream.java:171)
        at java.net.SocketInputStream.read(SocketInputStream.java:141)
        at org.wildfly.openssl.OpenSSLSocket.read(OpenSSLSocket.java:423)
        at 
org.wildfly.openssl.OpenSSLInputStream.read(OpenSSLInputStream.java:41)
        at 
com.amazonaws.thirdparty.apache.http.impl.io.SessionInputBufferImpl.streamRead(SessionInputBufferImpl.java:137)
        at 
com.amazonaws.thirdparty.apache.http.impl.io.SessionInputBufferImpl.read(SessionInputBufferImpl.java:197)
        at 
com.amazonaws.thirdparty.apache.http.impl.io.ContentLengthInputStream.read(ContentLengthInputStream.java:176)
        at 
com.amazonaws.thirdparty.apache.http.conn.EofSensorInputStream.read(EofSensorInputStream.java:135)
        at java.io.InputStream.skip(InputStream.java:224)
        at 
com.amazonaws.internal.SdkFilterInputStream.skip(SdkFilterInputStream.java:96)
        at 
com.amazonaws.internal.SdkFilterInputStream.skip(SdkFilterInputStream.java:96)
        at 
com.amazonaws.internal.SdkFilterInputStream.skip(SdkFilterInputStream.java:96)
        at 
com.amazonaws.internal.SdkFilterInputStream.skip(SdkFilterInputStream.java:96)
        at 
com.amazonaws.internal.SdkFilterInputStream.skip(SdkFilterInputStream.java:96)
        at 
com.amazonaws.util.LengthCheckInputStream.skip(LengthCheckInputStream.java:182)
        at 
com.amazonaws.internal.SdkFilterInputStream.skip(SdkFilterInputStream.java:96)
        at 
com.amazonaws.services.s3.internal.S3AbortableInputStream.skip(S3AbortableInputStream.java:155)
        at 
com.amazonaws.internal.SdkFilterInputStream.skip(SdkFilterInputStream.java:96)
        at 
org.apache.hadoop.fs.s3a.S3AInputStream.seekInStream(S3AInputStream.java:321)
        at 
org.apache.hadoop.fs.s3a.S3AInputStream.lambda$lazySeek$1(S3AInputStream.java:388)
        at 
org.apache.hadoop.fs.s3a.Invoker.lambda$maybeRetry$3(Invoker.java:228)
        at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:115)
        at org.apache.hadoop.fs.s3a.Invoker.maybeRetry(Invoker.java:354)
        at org.apache.hadoop.fs.s3a.Invoker.maybeRetry(Invoker.java:226)
        at org.apache.hadoop.fs.s3a.Invoker.maybeRetry(Invoker.java:270)
        at 
org.apache.hadoop.fs.s3a.S3AInputStream.lazySeek(S3AInputStream.java:384)
        at org.apache.hadoop.fs.s3a.S3AInputStream.read(S3AInputStream.java:418)
        at java.io.FilterInputStream.read(FilterInputStream.java:83)
        at 
org.apache.hadoop.fs.s3a.scale.ITestS3AInputStreamPerformance.executeSeekReadSequence(ITestS3AInputStreamPerformance.java:427)
        at 
org.apache.hadoop.fs.s3a.scale.ITestS3AInputStreamPerformance.testReadWithNormalPolicy(ITestS3AInputStreamPerformance.java:351)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:50)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
        at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
        at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
        at 
org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:298)
        at 
org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:292)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.lang.Thread.run(Thread.java:748)

{code}

> intermittent failure of S3A huge file upload tests: count of bytes uploaded 
> == 0
> --------------------------------------------------------------------------------
>
>                 Key: HADOOP-17451
>                 URL: https://issues.apache.org/jira/browse/HADOOP-17451
>             Project: Hadoop Common
>          Issue Type: Sub-task
>          Components: fs/s3
>    Affects Versions: 3.4.0
>            Reporter: Steve Loughran
>            Assignee: Steve Loughran
>            Priority: Major
>              Labels: pull-request-available
>          Time Spent: 0.5h
>  Remaining Estimate: 0h
>
> Intermittent failure of ITestHuge* upload tests, when doing parallel test 
> runs.
> The count of bytes uploaded through StorageStatistics isn't updated. Maybe 
> the expected counter isn't updated, and somehow in a parallel run with 
> recycled FS instances/set up directory structure this surfaces the way it 
> doesn't in a single test run.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to