jayantdb commented on code in PR #52445:
URL: https://github.com/apache/spark/pull/52445#discussion_r2387080165
##########
sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryStatusAndProgressSuite.scala:
##########
@@ -436,6 +436,70 @@ class StreamingQueryStatusAndProgressSuite extends
StreamTest with Eventually wi
processedRowsPerSecondJSON shouldBe processedRowsPerSecondExpected +-
epsilon
}
+ test("SPARK-53690: avgOffsetsBehindLatest should never be in scientific
notation") {
+ val progress = testProgress5.jsonValue
+ val progressPretty = testProgress5.prettyJson
+
+ // Actual values
+ val avgOffsetsBehindLatest: Double = 2.8366294E8
+
+ // Get values from progress metrics JSON and cast back to Double
+ // for numeric comparison
+ val metricsJSON = (progress \ "sources")(0) \ "metrics"
+ val avgOffsetsBehindLatestJSON = (metricsJSON \ "avgOffsetsBehindLatest")
+ .values.toString
+
+ // Get expected values after type casting
+ val avgOffsetsBehindLatestExpected = BigDecimal(avgOffsetsBehindLatest)
+ .setScale(1, RoundingMode.HALF_UP).toDouble
+
+ // This should fail if avgOffsetsBehindLatest contains E notation
+ avgOffsetsBehindLatestJSON should not include "E"
+
+ // Value in progress metrics should be equal to the Decimal conversion of
the same
+ // Using epsilon to compare floating-point values
+ val epsilon = 1e-6
+ avgOffsetsBehindLatestJSON.toDouble shouldBe
avgOffsetsBehindLatestExpected +- epsilon
+
+ // Validating that the pretty JSON of metrics reported is same as defined
+ progressPretty shouldBe
+ s"""
+ |{
+ | "id" : "${testProgress5.id.toString}",
+ | "runId" : "${testProgress5.runId.toString}",
+ | "name" : "KafkaMetricsTest",
+ | "timestamp" : "2025-09-23T06:00:00.000Z",
+ | "batchId" : 1,
+ | "batchDuration" : 100,
+ | "numInputRows" : 800000,
+ | "inputRowsPerSecond" : 78886.1,
+ | "processedRowsPerSecond" : 41622.0,
+ | "durationMs" : {
+ | "total" : 100
+ | },
+ | "stateOperators" : [ ],
+ | "sources" : [ {
Review Comment:
Hm, i gave a try to run a very large input batch from kafka, however, the
test case got stuck for 10+ minutes. I am unsure if having a real bulky test
case would be optimal for code builds. What are your thoughts?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]