anishshri-db commented on code in PR #45778:
URL: https://github.com/apache/spark/pull/45778#discussion_r1548956564
##########
sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/RocksDBStateStoreSuite.scala:
##########
@@ -294,6 +295,60 @@ class RocksDBStateStoreSuite extends
StateStoreSuiteBase[RocksDBStateStoreProvid
}
}
+ testWithColumnFamilies("rocksdb range scan - variable size non-ordering
columns with " +
+ "double type values are supported",
+ TestWithBothChangelogCheckpointingEnabledAndDisabled) { colFamiliesEnabled
=>
+
+ val testSchema: StructType = StructType(
+ Seq(StructField("key1", DoubleType, false),
+ StructField("key2", StringType, false)))
+
+ val schemaProj = UnsafeProjection.create(Array[DataType](DoubleType,
StringType))
+ tryWithProviderResource(newStoreProvider(testSchema,
+ RangeKeyScanStateEncoderSpec(testSchema, 1), colFamiliesEnabled)) {
provider =>
+ val store = provider.getStore(0)
+
+ val cfName = if (colFamiliesEnabled) "testColFamily" else "default"
+ if (colFamiliesEnabled) {
+ store.createColFamilyIfAbsent(cfName,
+ testSchema, valueSchema,
+ RangeKeyScanStateEncoderSpec(testSchema, 1))
+ }
+
+ // Verify that the sort ordering here is as follows:
+ // -NaN, -Infinity, -ve values, -0, 0, +0, +ve values, +Infinity, +NaN
Review Comment:
Updated to also check for the NaN positions in the returned list. The sorted
comparison doesn't work for the collections - so we need to filter while
performing that check
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]