Github user JoshRosen commented on a diff in the pull request:
https://github.com/apache/spark/pull/7948#discussion_r36272567
--- Diff:
sql/core/src/test/scala/org/apache/spark/sql/execution/UnsafeFixedWidthAggregationMapSuite.scala
---
@@ -231,4 +231,109 @@ class UnsafeFixedWidthAggregationMapSuite extends
SparkFunSuite with Matchers {
map.free()
}
+
+ testWithMemoryLeakDetection("test external sorting with an empty map") {
+ // Calling this make sure we have block manager and everything else
setup.
+ TestSQLContext
+
+ val map = new UnsafeFixedWidthAggregationMap(
+ emptyAggregationBuffer,
+ aggBufferSchema,
+ groupKeySchema,
+ taskMemoryManager,
+ shuffleMemoryManager,
+ 128, // initial capacity
+ PAGE_SIZE_BYTES,
+ false // disable perf metrics
+ )
+
+ // Convert the map into a sorter
+ val sorter = map.destructAndCreateExternalSorter()
+
+ // Add more keys to the sorter and make sure the results come out
sorted.
+ val additionalKeys = randomStrings(1024)
+ val keyConverter = UnsafeProjection.create(groupKeySchema)
+ val valueConverter = UnsafeProjection.create(aggBufferSchema)
+
+ additionalKeys.zipWithIndex.foreach { case (str, i) =>
+ val k = InternalRow(UTF8String.fromString(str))
+ val v = InternalRow(str.length)
+ sorter.insertKV(keyConverter.apply(k), valueConverter.apply(v))
+
+ if ((i % 100) == 0) {
+ shuffleMemoryManager.markAsOutOfMemory()
+ sorter.closeCurrentPage()
+ }
+ }
+
+ val out = new scala.collection.mutable.ArrayBuffer[String]
+ val iter = sorter.sortedIterator()
+ while (iter.next()) {
+ // At here, we also test if copy is correct.
+ val key = iter.getKey.copy()
+ val value = iter.getValue.copy()
+ assert(key.getString(0).length === value.getInt(0))
+ out += key.getString(0)
+ }
+
+ assert(out === (additionalKeys).sorted)
+
+ map.free()
+ }
+
+ testWithMemoryLeakDetection("test external sorting with empty records") {
+ // Calling this make sure we have block manager and everything else
setup.
+ TestSQLContext
+
+ // Memory consumption in the beginning of the task.
+ val initialMemoryConsumption =
shuffleMemoryManager.getMemoryConsumptionForThisTask()
+
+ val map = new UnsafeFixedWidthAggregationMap(
+ emptyAggregationBuffer,
+ StructType(Nil),
+ StructType(Nil),
+ taskMemoryManager,
+ shuffleMemoryManager,
+ 128, // initial capacity
+ PAGE_SIZE_BYTES,
+ false // disable perf metrics
+ )
+
+ (1 to 10).foreach { i =>
+ val buf = map.getAggregationBuffer(InternalRow(0))
+ assert(buf != null)
+ }
+
+ // Convert the map into a sorter
+ val sorter = map.destructAndCreateExternalSorter()
+
+ withClue(s"destructAndCreateExternalSorter should release memory used
by the map") {
+ // 4096 * 16 is the initial size allocated for the pointer/prefix
array in the in-mem sorter.
+ assert(shuffleMemoryManager.getMemoryConsumptionForThisTask() ===
+ initialMemoryConsumption + 4096 * 16)
+ }
+
+ // Add more keys to the sorter and make sure the results come out
sorted.
+ (1 to 4096).foreach { i =>
+ sorter.insertKV(UnsafeRow.createFromByteArray(0, 0),
UnsafeRow.createFromByteArray(0, 0))
+
+ if ((i % 100) == 0) {
+ shuffleMemoryManager.markAsOutOfMemory()
+ sorter.closeCurrentPage()
+ }
+ }
+
+ var count = 0
+ val iter = sorter.sortedIterator()
+ while (iter.next()) {
+ // At here, we also test if copy is correct.
+ iter.getKey.copy()
+ iter.getValue.copy()
+ count += 1;
+ }
+
+ assert(count === 4097)
--- End diff --
To clarify: maybe add a comment saying that one row comes from the map,
plus added directly to the KV sorter after creating it.
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]