anmolanmol1234 commented on code in PR #7837: URL: https://github.com/apache/hadoop/pull/7837#discussion_r2250972747
########## hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TracingHeaderValidator.java: ########## @@ -206,7 +207,7 @@ public void updateReadType(ReadType readType) { } /** - * Sets the value of the number of blobs operated on + * Sets the value of the number of blobs operated on976345 Review Comment: typo issue ########## hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java: ########## @@ -886,40 +928,54 @@ private void testReadTypeInTracingContextHeaderInternal(AzureBlobFileSystem fs, ArgumentCaptor<ContextEncryptionAdapter> captor8 = ArgumentCaptor.forClass(ContextEncryptionAdapter.class); ArgumentCaptor<TracingContext> captor9 = ArgumentCaptor.forClass(TracingContext.class); - verify(fs.getAbfsStore().getClient(), times(numOfReadCalls)).read( + verify(fs.getAbfsStore().getClient(), times(totalReadCalls)).read( captor1.capture(), captor2.capture(), captor3.capture(), captor4.capture(), captor5.capture(), captor6.capture(), captor7.capture(), captor8.capture(), captor9.capture()); - TracingContext tracingContext = captor9.getAllValues().get(numOfReadCalls - 1); - verifyHeaderForReadTypeInTracingContextHeader(tracingContext, readType); + List<TracingContext> tracingContextList = captor9.getAllValues(); + if (readType == PREFETCH_READ) { + /* + * For Prefetch Enabled, first read can be Normal or Missed Cache Read. + * Sow e will assert only for last 2 calls which should be Prefetched Read. + * Since calls are asynchronous, we can not guarantee the order of calls. + * Therefore, we cannot assert on exact position here. + */ + for (int i = tracingContextList.size() - (numOfReadCalls - 1); i < tracingContextList.size(); i++) { + verifyHeaderForReadTypeInTracingContextHeader(tracingContextList.get(i), readType, -1); + } + } else if (readType == DIRECT_READ) { + int expectedReadPos = ONE_MB/3; Review Comment: comment for why are we starting with this position will help in clarity ########## hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestApacheHttpClientFallback.java: ########## @@ -61,15 +61,15 @@ private TracingContext getSampleTracingContext(int[] jdkCallsRegister, answer.callRealMethod(); AbfsHttpOperation op = answer.getArgument(0); if (op instanceof AbfsAHCHttpOperation) { - Assertions.assertThat(tc.getHeader()).contains(APACHE_IMPL); + Assertions.assertThat(tc.getHeader()).endsWith(APACHE_IMPL); apacheCallsRegister[0]++; } if (op instanceof AbfsJdkHttpOperation) { jdkCallsRegister[0]++; if (AbfsApacheHttpClient.usable()) { - Assertions.assertThat(tc.getHeader()).contains(JDK_IMPL); + Assertions.assertThat(tc.getHeader()).endsWith(JDK_IMPL); Review Comment: this might fail if we add new header where the network library is not maintained as the last header, so contains looks better to me -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-issues-h...@hadoop.apache.org