Github user cloud-fan commented on a diff in the pull request: https://github.com/apache/spark/pull/21931#discussion_r211990712 --- Diff: sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala --- @@ -1444,6 +1444,31 @@ object SQLConf { .intConf .createWithDefault(20) + val FAST_HASH_AGGREGATE_MAX_ROWS_CAPACITY_BIT = + buildConf("spark.sql.codegen.aggregate.fastHashMap.capacityBit") + .internal() + .doc("Capacity for the max number of rows to be held in memory by the fast hash aggregate " + + "product operator. The bit is not for actual value, but the actual numBuckets is determined " + + "by loadFactor (e.g: default bit value 16 , the actual numBuckets is ((1 << 16) / 0.5).") + .intConf + .checkValue(bit => bit >= 10 && bit <= 30, "The bit value must be in [10, 30].") + .createWithDefault(16) + + object AvroOutputTimestampType extends Enumeration { --- End diff -- why add it?
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org