pralabhkumar commented on a change in pull request #23447: [SPARK-26462][CORE] 
Use ConfigEntry for hardcoded configs for execution categories
URL: https://github.com/apache/spark/pull/23447#discussion_r245732701
 
 

 ##########
 File path: core/src/main/scala/org/apache/spark/internal/config/package.scala
 ##########
 @@ -780,4 +880,146 @@ package object config {
     ConfigBuilder("spark.executor.logs.rolling.enableCompression")
       .booleanConf
       .createWithDefault(false)
+
+  private[spark] val IO_COMPRESSION_SNAPPY_BLOCKSIZE =
+    ConfigBuilder("spark.io.compression.snappy.blockSize")
+      .doc("Block size in bytes used in Snappy compression, in the case when " 
+
+        "Snappy compression codec is used. Lowering this block size " +
+        "will also lower shuffle memory usage when Snappy is used")
+      .bytesConf(ByteUnit.BYTE)
+      .createWithDefaultString("32k")
+
+  private[spark] val IO_COMPRESSION_SNAPPY_BLOCK_SIZE =
+    ConfigBuilder("spark.io.compression.snappy.block.size")
+      .doc("Block size in bytes used in Snappy compression, in the case when " 
+
+        "Snappy compression codec is used. Lowering this block size " +
+        "will also lower shuffle memory usage when Snappy is used. This used 
in older version 1.4")
+      .bytesConf(ByteUnit.BYTE)
+      .createWithDefaultString("32k")
+
+  private[spark] val IO_COMPRESSION_LZ4_BLOCKSIZE =
+    ConfigBuilder("spark.io.compression.lz4.blockSize")
+      .doc("Block size in bytes used in LZ4 compression, in the case when LZ4 
compression" +
+        "codec is used. Lowering this block size will also lower shuffle 
memory " +
+        "usage when LZ4 is used.")
+      .bytesConf(ByteUnit.BYTE)
+      .createWithDefaultString("32k")
+
+  private[spark] val IO_COMPRESSION_LZ4_BLOCK_SIZE =
+    ConfigBuilder("spark.io.compression.lz4.block.size")
+      .doc("Block size in bytes used in LZ4 compression, in the case when LZ4 
compression" +
+        "codec is used. Lowering this block size will also lower shuffle 
memory " +
+        "usage when LZ4 is used. This used in older version 1.4")
+      .bytesConf(ByteUnit.BYTE)
+      .createWithDefaultString("32k")
+
+  private[spark] val IO_COMPRESSION_CODEC =
+    ConfigBuilder("spark.io.compression.codec")
+      .doc("The codec used to compress internal data such as RDD partitions, 
event log, " +
+        "broadcast variables and shuffle outputs. By default, Spark provides 
four codecs: " +
+        "lz4, lzf, snappy, and zstd. You can also use fully qualified class 
names to specify " +
+        "the codec")
+      .stringConf
+      .createWithDefaultString("lz4")
+
+  private[spark] val IO_COMPRESSION_ZSTD_BUFFERSIZE =
+    ConfigBuilder("spark.io.compression.zstd.bufferSize")
+      .doc("Buffer size in bytes used in Zstd compression, in the case when 
Zstd " +
+        "compression codec is used. Lowering this size will lower the shuffle 
" +
+        "memory usage when Zstd is used, but it might increase the compression 
" +
+        "cost because of excessive JNI call overhead")
+      .bytesConf(ByteUnit.BYTE)
+      .createWithDefaultString("32k")
+
+  private[spark] val IO_COMPRESSION_ZSTD_LEVEL =
+    ConfigBuilder("spark.io.compression.zstd.level")
+      .doc("Compression level for Zstd compression codec. Increasing the 
compression" +
+        " level will result in better compression at the expense of more CPU 
and memory")
 
 Review comment:
   done

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to