Copilot commented on code in PR #7837: URL: https://github.com/apache/hadoop/pull/7837#discussion_r2244303281
########## hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/TracingContext.java: ########## @@ -265,6 +289,34 @@ private String addFailureReasons(final String header, return String.format("%s_%s", header, previousFailure); } + private String getRetryHeader(final String previousFailure, String retryPolicyAbbreviation) { + String retryHeader = String.format("%d", retryCount); + if (previousFailure == null) { + return retryHeader; + } + if (CONNECTION_TIMEOUT_ABBREVIATION.equals(previousFailure) && retryPolicyAbbreviation != null) { + return String.format("%s_%s_%s", retryHeader, previousFailure, retryPolicyAbbreviation); + } + return String.format("%s_%s", retryHeader, previousFailure); + } + + private String getOperationSpecificHeader(FSOperationType opType) { + // Similar header can be added for other operations in the future. + switch (opType) { + case READ: + return readSpecificHeader(); + default: + return EMPTY_STRING; // no operation specific header + } + } + + private String readSpecificHeader() { + // More information on read can be added to this header in the future. + // As underscore separated values. + String readHeader = String.format("%s", readType.toString()); Review Comment: The String.format with "%s" is unnecessary here. Use readType.toString() directly for better readability and performance. ```suggestion String readHeader = readType.toString(); ``` ########## hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/TracingContext.java: ########## @@ -193,31 +213,35 @@ public void setListener(Listener listener) { public void constructHeader(AbfsHttpOperation httpOperation, String previousFailure, String retryPolicyAbbreviation) { clientRequestId = UUID.randomUUID().toString(); switch (format) { - case ALL_ID_FORMAT: // Optional IDs (e.g. streamId) may be empty + case ALL_ID_FORMAT: header = - clientCorrelationID + ":" + clientRequestId + ":" + fileSystemID + ":" - + getPrimaryRequestIdForHeader(retryCount > 0) + ":" + streamID - + ":" + opType + ":" + retryCount; - header = addFailureReasons(header, previousFailure, retryPolicyAbbreviation); - if (!(ingressHandler.equals(EMPTY_STRING))) { - header += ":" + ingressHandler; - } - if (!(position.equals(EMPTY_STRING))) { - header += ":" + position; - } - if (operatedBlobCount != null) { - header += (":" + operatedBlobCount); - } - header += (":" + httpOperation.getTracingContextSuffix()); + AbfsHttpConstants.TracingHeaderVersion.V1 + ":" + + clientCorrelationID + ":" + + clientRequestId + ":" + + fileSystemID + ":" + + getPrimaryRequestIdForHeader(retryCount > 0) + ":" + + streamID + ":" + + opType + ":" + + getRetryHeader(previousFailure, retryPolicyAbbreviation) + ":" + + ingressHandler + ":" + + position + ":" + + operatedBlobCount + ":" + + httpOperation.getTracingContextSuffix() + ":" + + getOperationSpecificHeader(opType); + metricHeader += !(metricResults.trim().isEmpty()) ? metricResults : ""; break; case TWO_ID_FORMAT: - header = clientCorrelationID + ":" + clientRequestId; + header = + AbfsHttpConstants.TracingHeaderVersion.V1 + ":" + + clientCorrelationID + ":" + clientRequestId; metricHeader += !(metricResults.trim().isEmpty()) ? metricResults : ""; break; default: //case SINGLE_ID_FORMAT - header = clientRequestId; + header = + AbfsHttpConstants.TracingHeaderVersion.V1 + ":" + Review Comment: The hardcoded V1 version is used in multiple places. Consider using TracingHeaderVersion.getCurrentVersion() consistently to centralize version management. ########## hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/TracingContext.java: ########## @@ -193,31 +213,35 @@ public void setListener(Listener listener) { public void constructHeader(AbfsHttpOperation httpOperation, String previousFailure, String retryPolicyAbbreviation) { clientRequestId = UUID.randomUUID().toString(); switch (format) { - case ALL_ID_FORMAT: // Optional IDs (e.g. streamId) may be empty + case ALL_ID_FORMAT: header = - clientCorrelationID + ":" + clientRequestId + ":" + fileSystemID + ":" - + getPrimaryRequestIdForHeader(retryCount > 0) + ":" + streamID - + ":" + opType + ":" + retryCount; - header = addFailureReasons(header, previousFailure, retryPolicyAbbreviation); - if (!(ingressHandler.equals(EMPTY_STRING))) { - header += ":" + ingressHandler; - } - if (!(position.equals(EMPTY_STRING))) { - header += ":" + position; - } - if (operatedBlobCount != null) { - header += (":" + operatedBlobCount); - } - header += (":" + httpOperation.getTracingContextSuffix()); + AbfsHttpConstants.TracingHeaderVersion.V1 + ":" + Review Comment: The hardcoded V1 version is used in multiple places. Consider using TracingHeaderVersion.getCurrentVersion() consistently to centralize version management. ########## hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/TracingContext.java: ########## @@ -193,31 +213,35 @@ public void setListener(Listener listener) { public void constructHeader(AbfsHttpOperation httpOperation, String previousFailure, String retryPolicyAbbreviation) { clientRequestId = UUID.randomUUID().toString(); switch (format) { - case ALL_ID_FORMAT: // Optional IDs (e.g. streamId) may be empty + case ALL_ID_FORMAT: header = - clientCorrelationID + ":" + clientRequestId + ":" + fileSystemID + ":" - + getPrimaryRequestIdForHeader(retryCount > 0) + ":" + streamID - + ":" + opType + ":" + retryCount; - header = addFailureReasons(header, previousFailure, retryPolicyAbbreviation); - if (!(ingressHandler.equals(EMPTY_STRING))) { - header += ":" + ingressHandler; - } - if (!(position.equals(EMPTY_STRING))) { - header += ":" + position; - } - if (operatedBlobCount != null) { - header += (":" + operatedBlobCount); - } - header += (":" + httpOperation.getTracingContextSuffix()); + AbfsHttpConstants.TracingHeaderVersion.V1 + ":" + + clientCorrelationID + ":" + + clientRequestId + ":" + + fileSystemID + ":" + + getPrimaryRequestIdForHeader(retryCount > 0) + ":" + + streamID + ":" + + opType + ":" + + getRetryHeader(previousFailure, retryPolicyAbbreviation) + ":" + + ingressHandler + ":" + + position + ":" + + operatedBlobCount + ":" + + httpOperation.getTracingContextSuffix() + ":" + + getOperationSpecificHeader(opType); + metricHeader += !(metricResults.trim().isEmpty()) ? metricResults : ""; break; case TWO_ID_FORMAT: - header = clientCorrelationID + ":" + clientRequestId; + header = + AbfsHttpConstants.TracingHeaderVersion.V1 + ":" + + clientCorrelationID + ":" + clientRequestId; metricHeader += !(metricResults.trim().isEmpty()) ? metricResults : ""; break; default: //case SINGLE_ID_FORMAT - header = clientRequestId; + header = + AbfsHttpConstants.TracingHeaderVersion.V1 + ":" + Review Comment: The hardcoded V1 version is used in multiple places. Consider using TracingHeaderVersion.getCurrentVersion() consistently to centralize version management. ########## hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TracingHeaderValidator.java: ########## @@ -81,82 +85,93 @@ public TracingHeaderValidator(String clientCorrelationId, String fileSystemId, } private void validateTracingHeader(String tracingContextHeader) { - String[] idList = tracingContextHeader.split(":"); + String[] idList = tracingContextHeader.split(":", -1); Review Comment: [nitpick] Consider defining the split limit (-1) as a named constant to improve code readability and maintainability. ```suggestion String[] idList = tracingContextHeader.split(":", SPLIT_NO_LIMIT); ``` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-issues-h...@hadoop.apache.org