leesf commented on code in PR #7304:
URL: https://github.com/apache/hudi/pull/7304#discussion_r1033150889
##########
hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RunClusteringProcedure.scala:
##########
@@ -100,24 +111,59 @@ class RunClusteringProcedure extends BaseProcedure
logInfo("No order columns")
}
+ orderStrategy match {
+ case Some(o) =>
+ val strategy =
LayoutOptimizationStrategy.fromValue(o.asInstanceOf[String])
+ conf = conf ++ Map(
+ HoodieClusteringConfig.LAYOUT_OPTIMIZE_STRATEGY.key() ->
strategy.getValue
+ )
+ case _ =>
+ logInfo("No order strategy")
+ }
+
+ options match {
+ case Some(p) =>
+ val paramPairs = StringUtils.split(p.asInstanceOf[String], ",").asScala
+ paramPairs.foreach{ pair =>
+ val values = StringUtils.split(pair, "=")
+ conf = conf ++ Map(values.get(0) -> values.get(1))
+ }
+ case _ =>
+ logInfo("No options")
+ }
+
// Get all pending clustering instants
var pendingClustering =
ClusteringUtils.getAllPendingClusteringPlans(metaClient)
.iterator().asScala.map(_.getLeft.getTimestamp).toSeq.sortBy(f => f)
+
+ pendingClustering = instantsStr match {
+ case Some(inst) =>
+ operator = ClusteringOperator.EXECUTE
Review Comment:
I think we need check if users specify the instants with SCHEDULE and
SCHEDULE_AND_EXECUTE, we should throw exception instead of set it to EXECUTE
when specify instants.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]