Yohahaha commented on code in PR #2532:
URL: https://github.com/apache/fluss/pull/2532#discussion_r2761985682
##########
fluss-spark/fluss-spark-common/src/main/scala/org/apache/fluss/spark/SparkFlussConf.scala:
##########
@@ -17,23 +17,37 @@
package org.apache.fluss.spark
+import org.apache.fluss.config.{ConfigBuilder, ConfigOption}
import org.apache.fluss.config.ConfigBuilder.key
-import org.apache.fluss.config.ConfigOption
-import org.apache.spark.sql.internal.SQLConf.buildConf
+import java.time.Duration
object SparkFlussConf {
- val READ_OPTIMIZED = buildConf("spark.sql.fluss.readOptimized")
- .internal()
- .doc("If true, Spark will only read data from data lake snapshot or kv
snapshot, not execute merge them with log changes. This is a temporary
configuration that will be deprecated when read-optimized table(e.g.
`mytbl$ro`) is supported.")
- .booleanConf
- .createWithDefault(false)
+ val SPARK_FLUSS_CONF_PREFIX = "spark.sql.fluss."
val READ_OPTIMIZED_OPTION: ConfigOption[java.lang.Boolean] =
- key(READ_OPTIMIZED.key)
+ key("read.optimized")
.booleanType()
- .defaultValue(READ_OPTIMIZED.defaultValue.get)
- .withDescription(READ_OPTIMIZED.doc)
+ .defaultValue(false)
+ .withDescription(
+ "If true, Spark will only read data from data lake snapshot or kv
snapshot, not execute merge them with log changes. This is a temporary
configuration that will be deprecated when read-optimized table(e.g.
`mytbl$ro`) is supported.")
+ object StartUpMode extends Enumeration {
+ val FULL, EARLIEST, LATEST, TIMESTAMP = Value
+ }
+
+ val SCAN_START_UP_MODE: ConfigOption[String] =
Review Comment:
good advice, let's refactor these common config into fluss-common module in
another PR, flink and spark can share it. cc @wuchong
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]