kfaraz commented on code in PR #18819: URL: https://github.com/apache/druid/pull/18819#discussion_r2624068326
########## indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/autoscaler/CostBasedAutoScaler.java: ########## @@ -0,0 +1,360 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.indexing.seekablestream.supervisor.autoscaler; + +import org.apache.druid.indexing.common.stats.DropwizardRowIngestionMeters; +import org.apache.druid.indexing.overlord.supervisor.SupervisorSpec; +import org.apache.druid.indexing.overlord.supervisor.autoscaler.LagStats; +import org.apache.druid.indexing.overlord.supervisor.autoscaler.SupervisorTaskAutoScaler; +import org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner; +import org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisor; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.concurrent.Execs; +import org.apache.druid.java.util.emitter.EmittingLogger; +import org.apache.druid.java.util.emitter.service.ServiceEmitter; +import org.apache.druid.java.util.emitter.service.ServiceMetricEvent; +import org.apache.druid.query.DruidMetrics; +import org.apache.druid.segment.incremental.RowIngestionMeters; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * Cost-based auto-scaler for seekable stream supervisors. + * Uses a weighted cost function combining lag recovery time (seconds) and idleness cost (seconds) + * to determine optimal task counts. + * <p> + * Candidate task counts are derived by scanning a bounded window of partitions-per-task (PPT) values + * around the current PPT, then converting those to task counts. This allows non-divisor task counts + * while keeping changes gradual (no large jumps). + * <p> + * Scale-up and scale-down are both evaluated proactively. + * Future versions may perform scale-down on task rollover only. + */ +public class CostBasedAutoScaler implements SupervisorTaskAutoScaler +{ + private static final EmittingLogger log = new EmittingLogger(CostBasedAutoScaler.class); + + private static final int MAX_INCREASE_IN_PARTITIONS_PER_TASK = 2; + private static final int MAX_DECREASE_IN_PARTITIONS_PER_TASK = MAX_INCREASE_IN_PARTITIONS_PER_TASK * 2; + public static final String AVG_LAG_METRIC = "task/autoScaler/costBased/avgLag"; + public static final String AVG_IDLE_METRIC = "task/autoScaler/costBased/pollIdleAvg"; + public static final String OPTIMAL_TASK_COUNT_METRIC = "task/autoScaler/costBased/optimalTaskCount"; + + private final String supervisorId; + private final SeekableStreamSupervisor supervisor; + private final ServiceEmitter emitter; + private final SupervisorSpec spec; + private final CostBasedAutoScalerConfig config; + private final ServiceMetricEvent.Builder metricBuilder; + private final ScheduledExecutorService autoscalerExecutor; + private final WeightedCostFunction costFunction; + + public CostBasedAutoScaler( + SeekableStreamSupervisor supervisor, + CostBasedAutoScalerConfig config, + SupervisorSpec spec, + ServiceEmitter emitter + ) + { + this.config = config; + this.spec = spec; + this.supervisor = supervisor; + this.supervisorId = spec.getId(); + this.emitter = emitter; + + this.costFunction = new WeightedCostFunction(); + + this.autoscalerExecutor = Execs.scheduledSingleThreaded("CostBasedAutoScaler-" + StringUtils.encodeForFormat(spec.getId())); + this.metricBuilder = ServiceMetricEvent.builder() + .setDimension(DruidMetrics.SUPERVISOR_ID, supervisorId) + .setDimension( + DruidMetrics.STREAM, + this.supervisor.getIoConfig().getStream() + ); + } + + @Override + public void start() + { + Callable<Integer> scaleAction = () -> computeOptimalTaskCount(this.collectMetrics()); + Runnable onSuccessfulScale = () -> { + }; + + autoscalerExecutor.scheduleAtFixedRate( + supervisor.buildDynamicAllocationTask(scaleAction, onSuccessfulScale, emitter), + config.getScaleActionPeriodMillis(), + config.getScaleActionPeriodMillis(), + TimeUnit.MILLISECONDS + ); + + log.info( + "CostBasedAutoScaler started for supervisorId[%s]: evaluating scaling every [%d]ms", + supervisorId, + config.getScaleActionPeriodMillis() + ); + } + + @Override + public void stop() + { + autoscalerExecutor.shutdownNow(); + log.info("CostBasedAutoScaler stopped for supervisorId [%s]", supervisorId); + } + + @Override + public void reset() + { + // No-op. + } + + private CostMetrics collectMetrics() + { + if (spec.isSuspended()) { + log.debug("Supervisor [%s] is suspended, skipping a metrics collection", supervisorId); + return null; + } + + final LagStats lagStats = supervisor.computeLagStats(); + if (lagStats == null) { + log.debug("Lag stats unavailable for supervisorId [%s], skipping collection", supervisorId); + return null; + } + + final int currentTaskCount = supervisor.getIoConfig().getTaskCount(); + final int partitionCount = supervisor.getPartitionCount(); + + final Map<String, Map<String, Object>> taskStats = supervisor.getStats(); + final double movingAvgRate = extractMovingAverage(taskStats, DropwizardRowIngestionMeters.ONE_MINUTE_NAME); Review Comment: Moving averages over a longer time window (say 15 mins) might be more stable and thus more reliable. If not available, then fallback to 5 minute, then 1 minute. ########## indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/autoscaler/CostBasedAutoScaler.java: ########## @@ -0,0 +1,360 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.indexing.seekablestream.supervisor.autoscaler; + +import org.apache.druid.indexing.common.stats.DropwizardRowIngestionMeters; +import org.apache.druid.indexing.overlord.supervisor.SupervisorSpec; +import org.apache.druid.indexing.overlord.supervisor.autoscaler.LagStats; +import org.apache.druid.indexing.overlord.supervisor.autoscaler.SupervisorTaskAutoScaler; +import org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner; +import org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisor; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.concurrent.Execs; +import org.apache.druid.java.util.emitter.EmittingLogger; +import org.apache.druid.java.util.emitter.service.ServiceEmitter; +import org.apache.druid.java.util.emitter.service.ServiceMetricEvent; +import org.apache.druid.query.DruidMetrics; +import org.apache.druid.segment.incremental.RowIngestionMeters; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * Cost-based auto-scaler for seekable stream supervisors. + * Uses a weighted cost function combining lag recovery time (seconds) and idleness cost (seconds) + * to determine optimal task counts. + * <p> + * Candidate task counts are derived by scanning a bounded window of partitions-per-task (PPT) values + * around the current PPT, then converting those to task counts. This allows non-divisor task counts + * while keeping changes gradual (no large jumps). + * <p> + * Scale-up and scale-down are both evaluated proactively. + * Future versions may perform scale-down on task rollover only. + */ +public class CostBasedAutoScaler implements SupervisorTaskAutoScaler +{ + private static final EmittingLogger log = new EmittingLogger(CostBasedAutoScaler.class); + + private static final int MAX_INCREASE_IN_PARTITIONS_PER_TASK = 2; + private static final int MAX_DECREASE_IN_PARTITIONS_PER_TASK = MAX_INCREASE_IN_PARTITIONS_PER_TASK * 2; + public static final String AVG_LAG_METRIC = "task/autoScaler/costBased/avgLag"; + public static final String AVG_IDLE_METRIC = "task/autoScaler/costBased/pollIdleAvg"; + public static final String OPTIMAL_TASK_COUNT_METRIC = "task/autoScaler/costBased/optimalTaskCount"; + + private final String supervisorId; + private final SeekableStreamSupervisor supervisor; + private final ServiceEmitter emitter; + private final SupervisorSpec spec; + private final CostBasedAutoScalerConfig config; + private final ServiceMetricEvent.Builder metricBuilder; + private final ScheduledExecutorService autoscalerExecutor; + private final WeightedCostFunction costFunction; + + public CostBasedAutoScaler( + SeekableStreamSupervisor supervisor, + CostBasedAutoScalerConfig config, + SupervisorSpec spec, + ServiceEmitter emitter + ) + { + this.config = config; + this.spec = spec; + this.supervisor = supervisor; + this.supervisorId = spec.getId(); + this.emitter = emitter; + + this.costFunction = new WeightedCostFunction(); + + this.autoscalerExecutor = Execs.scheduledSingleThreaded("CostBasedAutoScaler-" + StringUtils.encodeForFormat(spec.getId())); + this.metricBuilder = ServiceMetricEvent.builder() + .setDimension(DruidMetrics.SUPERVISOR_ID, supervisorId) + .setDimension( + DruidMetrics.STREAM, + this.supervisor.getIoConfig().getStream() + ); + } + + @Override + public void start() + { + Callable<Integer> scaleAction = () -> computeOptimalTaskCount(this.collectMetrics()); + Runnable onSuccessfulScale = () -> { + }; + + autoscalerExecutor.scheduleAtFixedRate( + supervisor.buildDynamicAllocationTask(scaleAction, onSuccessfulScale, emitter), + config.getScaleActionPeriodMillis(), + config.getScaleActionPeriodMillis(), + TimeUnit.MILLISECONDS + ); + + log.info( + "CostBasedAutoScaler started for supervisorId[%s]: evaluating scaling every [%d]ms", + supervisorId, + config.getScaleActionPeriodMillis() + ); + } + + @Override + public void stop() + { + autoscalerExecutor.shutdownNow(); + log.info("CostBasedAutoScaler stopped for supervisorId [%s]", supervisorId); + } + + @Override + public void reset() + { + // No-op. + } + + private CostMetrics collectMetrics() + { + if (spec.isSuspended()) { + log.debug("Supervisor [%s] is suspended, skipping a metrics collection", supervisorId); + return null; + } + + final LagStats lagStats = supervisor.computeLagStats(); + if (lagStats == null) { + log.debug("Lag stats unavailable for supervisorId [%s], skipping collection", supervisorId); + return null; + } + + final int currentTaskCount = supervisor.getIoConfig().getTaskCount(); + final int partitionCount = supervisor.getPartitionCount(); + + final Map<String, Map<String, Object>> taskStats = supervisor.getStats(); + final double movingAvgRate = extractMovingAverage(taskStats, DropwizardRowIngestionMeters.ONE_MINUTE_NAME); + final double pollIdleRatio = extractPollIdleRatio(taskStats); + + final double avgPartitionLag = lagStats.getAvgLag(); + + // Use an actual 15-minute moving average processing rate if available + final double avgProcessingRate; + if (movingAvgRate > 0) { + avgProcessingRate = movingAvgRate; + } else { + // Fallback: estimate processing rate based on idle ratio + final double utilizationRatio = Math.max(0.01, 1.0 - pollIdleRatio); + avgProcessingRate = config.getDefaultProcessingRate() * utilizationRatio; Review Comment: It might be weird to have this be fed from a config (even as a fallback mechanism). In the future, we can consider computing this based on the stats of previously completed tasks of this supervisor. OR just use the last known processing rate. ########## indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/autoscaler/CostBasedAutoScaler.java: ########## @@ -0,0 +1,360 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.indexing.seekablestream.supervisor.autoscaler; + +import org.apache.druid.indexing.common.stats.DropwizardRowIngestionMeters; +import org.apache.druid.indexing.overlord.supervisor.SupervisorSpec; +import org.apache.druid.indexing.overlord.supervisor.autoscaler.LagStats; +import org.apache.druid.indexing.overlord.supervisor.autoscaler.SupervisorTaskAutoScaler; +import org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner; +import org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisor; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.concurrent.Execs; +import org.apache.druid.java.util.emitter.EmittingLogger; +import org.apache.druid.java.util.emitter.service.ServiceEmitter; +import org.apache.druid.java.util.emitter.service.ServiceMetricEvent; +import org.apache.druid.query.DruidMetrics; +import org.apache.druid.segment.incremental.RowIngestionMeters; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * Cost-based auto-scaler for seekable stream supervisors. + * Uses a weighted cost function combining lag recovery time (seconds) and idleness cost (seconds) + * to determine optimal task counts. + * <p> + * Candidate task counts are derived by scanning a bounded window of partitions-per-task (PPT) values + * around the current PPT, then converting those to task counts. This allows non-divisor task counts + * while keeping changes gradual (no large jumps). + * <p> + * Scale-up and scale-down are both evaluated proactively. + * Future versions may perform scale-down on task rollover only. + */ +public class CostBasedAutoScaler implements SupervisorTaskAutoScaler +{ + private static final EmittingLogger log = new EmittingLogger(CostBasedAutoScaler.class); + + private static final int MAX_INCREASE_IN_PARTITIONS_PER_TASK = 2; + private static final int MAX_DECREASE_IN_PARTITIONS_PER_TASK = MAX_INCREASE_IN_PARTITIONS_PER_TASK * 2; + public static final String AVG_LAG_METRIC = "task/autoScaler/costBased/avgLag"; + public static final String AVG_IDLE_METRIC = "task/autoScaler/costBased/pollIdleAvg"; + public static final String OPTIMAL_TASK_COUNT_METRIC = "task/autoScaler/costBased/optimalTaskCount"; + + private final String supervisorId; + private final SeekableStreamSupervisor supervisor; + private final ServiceEmitter emitter; + private final SupervisorSpec spec; + private final CostBasedAutoScalerConfig config; + private final ServiceMetricEvent.Builder metricBuilder; + private final ScheduledExecutorService autoscalerExecutor; + private final WeightedCostFunction costFunction; + + public CostBasedAutoScaler( + SeekableStreamSupervisor supervisor, + CostBasedAutoScalerConfig config, + SupervisorSpec spec, + ServiceEmitter emitter + ) + { + this.config = config; + this.spec = spec; + this.supervisor = supervisor; + this.supervisorId = spec.getId(); + this.emitter = emitter; + + this.costFunction = new WeightedCostFunction(); + + this.autoscalerExecutor = Execs.scheduledSingleThreaded("CostBasedAutoScaler-" + StringUtils.encodeForFormat(spec.getId())); + this.metricBuilder = ServiceMetricEvent.builder() + .setDimension(DruidMetrics.SUPERVISOR_ID, supervisorId) + .setDimension( + DruidMetrics.STREAM, + this.supervisor.getIoConfig().getStream() + ); + } + + @Override + public void start() + { + Callable<Integer> scaleAction = () -> computeOptimalTaskCount(this.collectMetrics()); + Runnable onSuccessfulScale = () -> { + }; + + autoscalerExecutor.scheduleAtFixedRate( + supervisor.buildDynamicAllocationTask(scaleAction, onSuccessfulScale, emitter), + config.getScaleActionPeriodMillis(), + config.getScaleActionPeriodMillis(), + TimeUnit.MILLISECONDS + ); + + log.info( + "CostBasedAutoScaler started for supervisorId[%s]: evaluating scaling every [%d]ms", + supervisorId, + config.getScaleActionPeriodMillis() + ); + } + + @Override + public void stop() + { + autoscalerExecutor.shutdownNow(); + log.info("CostBasedAutoScaler stopped for supervisorId [%s]", supervisorId); + } + + @Override + public void reset() + { + // No-op. + } + + private CostMetrics collectMetrics() + { + if (spec.isSuspended()) { + log.debug("Supervisor [%s] is suspended, skipping a metrics collection", supervisorId); + return null; + } + + final LagStats lagStats = supervisor.computeLagStats(); + if (lagStats == null) { + log.debug("Lag stats unavailable for supervisorId [%s], skipping collection", supervisorId); + return null; + } + + final int currentTaskCount = supervisor.getIoConfig().getTaskCount(); + final int partitionCount = supervisor.getPartitionCount(); + + final Map<String, Map<String, Object>> taskStats = supervisor.getStats(); + final double movingAvgRate = extractMovingAverage(taskStats, DropwizardRowIngestionMeters.ONE_MINUTE_NAME); + final double pollIdleRatio = extractPollIdleRatio(taskStats); + + final double avgPartitionLag = lagStats.getAvgLag(); + + // Use an actual 15-minute moving average processing rate if available + final double avgProcessingRate; + if (movingAvgRate > 0) { + avgProcessingRate = movingAvgRate; + } else { + // Fallback: estimate processing rate based on idle ratio + final double utilizationRatio = Math.max(0.01, 1.0 - pollIdleRatio); + avgProcessingRate = config.getDefaultProcessingRate() * utilizationRatio; + } + + return new CostMetrics( + avgPartitionLag, + currentTaskCount, + partitionCount, + pollIdleRatio, + supervisor.getIoConfig().getTaskDuration().getStandardSeconds(), + avgProcessingRate + ); + } + + /** + * Computes the optimal task count based on current metrics. + * <p> + * Returns -1 (no scaling needed) in the following cases: + * <ul> + * <li>Metrics are not available</li> + * <li>Task count already optimal</li> + * <li>The current idle ratio is in the ideal range and lag considered low</li> + * <li>Optimal task count equals current task count</li> + * </ul> + * + * @return optimal task count for scale-up, or -1 if no scaling action needed + */ + public int computeOptimalTaskCount(CostMetrics metrics) + { + if (metrics == null) { + log.debug("No metrics available yet for supervisorId [%s]", supervisorId); + return -1; + } + + final int partitionCount = metrics.getPartitionCount(); + final int currentTaskCount = metrics.getCurrentTaskCount(); + if (partitionCount <= 0 || currentTaskCount <= 0) { + return -1; + } + + final int[] validTaskCounts = CostBasedAutoScaler.computeValidTaskCounts(partitionCount, currentTaskCount); + + if (validTaskCounts.length == 0) { + log.warn("No valid task counts after applying constraints for supervisorId [%s]", supervisorId); + return -1; + } + + // If idle is already in the ideal range [0.2, 0.6], optimal utilization has been achieved. + // No scaling is needed - maintain stability by staying at the current task count. + final double currentIdleRatio = metrics.getPollIdleRatio(); + if (currentIdleRatio >= 0 && WeightedCostFunction.isIdleInIdealRange(currentIdleRatio)) { + log.debug( + "Idle ratio [%.3f] is in ideal range for supervisorId [%s], no scaling needed", + currentIdleRatio, + supervisorId + ); + return -1; + } + + int optimalTaskCount = -1; + double optimalCost = Double.POSITIVE_INFINITY; + + for (int taskCount : validTaskCounts) { + double cost = costFunction.computeCost(metrics, taskCount, config); + log.debug("Proposed task count: %d, Cost: %.4f", taskCount, cost); + if (cost < optimalCost) { + optimalTaskCount = taskCount; + optimalCost = cost; + } + } + + emitter.emit(metricBuilder.setMetric(AVG_LAG_METRIC, metrics.getAvgPartitionLag())); Review Comment: This is already emitted as a metric. What would be more useful is the computed terms `lagCost` and `idleCost`. Getting these out as metrics would enable users to choose better values for `lagWeight` and `idleWeight`. ########## indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/autoscaler/CostBasedAutoScaler.java: ########## @@ -0,0 +1,360 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.indexing.seekablestream.supervisor.autoscaler; + +import org.apache.druid.indexing.common.stats.DropwizardRowIngestionMeters; +import org.apache.druid.indexing.overlord.supervisor.SupervisorSpec; +import org.apache.druid.indexing.overlord.supervisor.autoscaler.LagStats; +import org.apache.druid.indexing.overlord.supervisor.autoscaler.SupervisorTaskAutoScaler; +import org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner; +import org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisor; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.concurrent.Execs; +import org.apache.druid.java.util.emitter.EmittingLogger; +import org.apache.druid.java.util.emitter.service.ServiceEmitter; +import org.apache.druid.java.util.emitter.service.ServiceMetricEvent; +import org.apache.druid.query.DruidMetrics; +import org.apache.druid.segment.incremental.RowIngestionMeters; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * Cost-based auto-scaler for seekable stream supervisors. + * Uses a weighted cost function combining lag recovery time (seconds) and idleness cost (seconds) + * to determine optimal task counts. + * <p> + * Candidate task counts are derived by scanning a bounded window of partitions-per-task (PPT) values + * around the current PPT, then converting those to task counts. This allows non-divisor task counts + * while keeping changes gradual (no large jumps). + * <p> + * Scale-up and scale-down are both evaluated proactively. + * Future versions may perform scale-down on task rollover only. + */ +public class CostBasedAutoScaler implements SupervisorTaskAutoScaler +{ + private static final EmittingLogger log = new EmittingLogger(CostBasedAutoScaler.class); + + private static final int MAX_INCREASE_IN_PARTITIONS_PER_TASK = 2; + private static final int MAX_DECREASE_IN_PARTITIONS_PER_TASK = MAX_INCREASE_IN_PARTITIONS_PER_TASK * 2; + public static final String AVG_LAG_METRIC = "task/autoScaler/costBased/avgLag"; + public static final String AVG_IDLE_METRIC = "task/autoScaler/costBased/pollIdleAvg"; + public static final String OPTIMAL_TASK_COUNT_METRIC = "task/autoScaler/costBased/optimalTaskCount"; + + private final String supervisorId; + private final SeekableStreamSupervisor supervisor; + private final ServiceEmitter emitter; + private final SupervisorSpec spec; + private final CostBasedAutoScalerConfig config; + private final ServiceMetricEvent.Builder metricBuilder; + private final ScheduledExecutorService autoscalerExecutor; + private final WeightedCostFunction costFunction; + + public CostBasedAutoScaler( + SeekableStreamSupervisor supervisor, + CostBasedAutoScalerConfig config, + SupervisorSpec spec, + ServiceEmitter emitter + ) + { + this.config = config; + this.spec = spec; + this.supervisor = supervisor; + this.supervisorId = spec.getId(); + this.emitter = emitter; + + this.costFunction = new WeightedCostFunction(); + + this.autoscalerExecutor = Execs.scheduledSingleThreaded("CostBasedAutoScaler-" + StringUtils.encodeForFormat(spec.getId())); + this.metricBuilder = ServiceMetricEvent.builder() + .setDimension(DruidMetrics.SUPERVISOR_ID, supervisorId) + .setDimension( + DruidMetrics.STREAM, + this.supervisor.getIoConfig().getStream() + ); + } + + @Override + public void start() + { + Callable<Integer> scaleAction = () -> computeOptimalTaskCount(this.collectMetrics()); + Runnable onSuccessfulScale = () -> { + }; + + autoscalerExecutor.scheduleAtFixedRate( + supervisor.buildDynamicAllocationTask(scaleAction, onSuccessfulScale, emitter), + config.getScaleActionPeriodMillis(), Review Comment: We need to revisit the start delay. ########## indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/autoscaler/CostBasedAutoScaler.java: ########## @@ -0,0 +1,360 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.indexing.seekablestream.supervisor.autoscaler; + +import org.apache.druid.indexing.common.stats.DropwizardRowIngestionMeters; +import org.apache.druid.indexing.overlord.supervisor.SupervisorSpec; +import org.apache.druid.indexing.overlord.supervisor.autoscaler.LagStats; +import org.apache.druid.indexing.overlord.supervisor.autoscaler.SupervisorTaskAutoScaler; +import org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner; +import org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisor; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.concurrent.Execs; +import org.apache.druid.java.util.emitter.EmittingLogger; +import org.apache.druid.java.util.emitter.service.ServiceEmitter; +import org.apache.druid.java.util.emitter.service.ServiceMetricEvent; +import org.apache.druid.query.DruidMetrics; +import org.apache.druid.segment.incremental.RowIngestionMeters; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * Cost-based auto-scaler for seekable stream supervisors. + * Uses a weighted cost function combining lag recovery time (seconds) and idleness cost (seconds) + * to determine optimal task counts. + * <p> + * Candidate task counts are derived by scanning a bounded window of partitions-per-task (PPT) values + * around the current PPT, then converting those to task counts. This allows non-divisor task counts + * while keeping changes gradual (no large jumps). + * <p> + * Scale-up and scale-down are both evaluated proactively. + * Future versions may perform scale-down on task rollover only. + */ +public class CostBasedAutoScaler implements SupervisorTaskAutoScaler +{ + private static final EmittingLogger log = new EmittingLogger(CostBasedAutoScaler.class); + + private static final int MAX_INCREASE_IN_PARTITIONS_PER_TASK = 2; + private static final int MAX_DECREASE_IN_PARTITIONS_PER_TASK = MAX_INCREASE_IN_PARTITIONS_PER_TASK * 2; + public static final String AVG_LAG_METRIC = "task/autoScaler/costBased/avgLag"; + public static final String AVG_IDLE_METRIC = "task/autoScaler/costBased/pollIdleAvg"; + public static final String OPTIMAL_TASK_COUNT_METRIC = "task/autoScaler/costBased/optimalTaskCount"; + + private final String supervisorId; + private final SeekableStreamSupervisor supervisor; + private final ServiceEmitter emitter; + private final SupervisorSpec spec; + private final CostBasedAutoScalerConfig config; + private final ServiceMetricEvent.Builder metricBuilder; + private final ScheduledExecutorService autoscalerExecutor; + private final WeightedCostFunction costFunction; + + public CostBasedAutoScaler( + SeekableStreamSupervisor supervisor, + CostBasedAutoScalerConfig config, + SupervisorSpec spec, + ServiceEmitter emitter + ) + { + this.config = config; + this.spec = spec; + this.supervisor = supervisor; + this.supervisorId = spec.getId(); + this.emitter = emitter; + + this.costFunction = new WeightedCostFunction(); + + this.autoscalerExecutor = Execs.scheduledSingleThreaded("CostBasedAutoScaler-" + StringUtils.encodeForFormat(spec.getId())); + this.metricBuilder = ServiceMetricEvent.builder() + .setDimension(DruidMetrics.SUPERVISOR_ID, supervisorId) + .setDimension( + DruidMetrics.STREAM, + this.supervisor.getIoConfig().getStream() + ); + } + + @Override + public void start() + { + Callable<Integer> scaleAction = () -> computeOptimalTaskCount(this.collectMetrics()); + Runnable onSuccessfulScale = () -> { + }; + + autoscalerExecutor.scheduleAtFixedRate( + supervisor.buildDynamicAllocationTask(scaleAction, onSuccessfulScale, emitter), + config.getScaleActionPeriodMillis(), + config.getScaleActionPeriodMillis(), + TimeUnit.MILLISECONDS + ); + + log.info( + "CostBasedAutoScaler started for supervisorId[%s]: evaluating scaling every [%d]ms", + supervisorId, + config.getScaleActionPeriodMillis() + ); + } + + @Override + public void stop() + { + autoscalerExecutor.shutdownNow(); + log.info("CostBasedAutoScaler stopped for supervisorId [%s]", supervisorId); + } + + @Override + public void reset() + { + // No-op. + } + + private CostMetrics collectMetrics() + { + if (spec.isSuspended()) { + log.debug("Supervisor [%s] is suspended, skipping a metrics collection", supervisorId); + return null; + } + + final LagStats lagStats = supervisor.computeLagStats(); + if (lagStats == null) { + log.debug("Lag stats unavailable for supervisorId [%s], skipping collection", supervisorId); + return null; + } + + final int currentTaskCount = supervisor.getIoConfig().getTaskCount(); + final int partitionCount = supervisor.getPartitionCount(); + + final Map<String, Map<String, Object>> taskStats = supervisor.getStats(); + final double movingAvgRate = extractMovingAverage(taskStats, DropwizardRowIngestionMeters.ONE_MINUTE_NAME); + final double pollIdleRatio = extractPollIdleRatio(taskStats); + + final double avgPartitionLag = lagStats.getAvgLag(); + + // Use an actual 15-minute moving average processing rate if available + final double avgProcessingRate; + if (movingAvgRate > 0) { + avgProcessingRate = movingAvgRate; + } else { + // Fallback: estimate processing rate based on idle ratio + final double utilizationRatio = Math.max(0.01, 1.0 - pollIdleRatio); + avgProcessingRate = config.getDefaultProcessingRate() * utilizationRatio; + } + + return new CostMetrics( + avgPartitionLag, + currentTaskCount, + partitionCount, + pollIdleRatio, + supervisor.getIoConfig().getTaskDuration().getStandardSeconds(), + avgProcessingRate + ); + } + + /** + * Computes the optimal task count based on current metrics. + * <p> + * Returns -1 (no scaling needed) in the following cases: + * <ul> + * <li>Metrics are not available</li> + * <li>Task count already optimal</li> + * <li>The current idle ratio is in the ideal range and lag considered low</li> + * <li>Optimal task count equals current task count</li> + * </ul> + * + * @return optimal task count for scale-up, or -1 if no scaling action needed + */ + public int computeOptimalTaskCount(CostMetrics metrics) + { + if (metrics == null) { + log.debug("No metrics available yet for supervisorId [%s]", supervisorId); + return -1; + } + + final int partitionCount = metrics.getPartitionCount(); + final int currentTaskCount = metrics.getCurrentTaskCount(); + if (partitionCount <= 0 || currentTaskCount <= 0) { + return -1; + } + + final int[] validTaskCounts = CostBasedAutoScaler.computeValidTaskCounts(partitionCount, currentTaskCount); + + if (validTaskCounts.length == 0) { + log.warn("No valid task counts after applying constraints for supervisorId [%s]", supervisorId); + return -1; + } + + // If idle is already in the ideal range [0.2, 0.6], optimal utilization has been achieved. + // No scaling is needed - maintain stability by staying at the current task count. + final double currentIdleRatio = metrics.getPollIdleRatio(); + if (currentIdleRatio >= 0 && WeightedCostFunction.isIdleInIdealRange(currentIdleRatio)) { + log.debug( + "Idle ratio [%.3f] is in ideal range for supervisorId [%s], no scaling needed", + currentIdleRatio, + supervisorId + ); + return -1; + } + + int optimalTaskCount = -1; + double optimalCost = Double.POSITIVE_INFINITY; + + for (int taskCount : validTaskCounts) { + double cost = costFunction.computeCost(metrics, taskCount, config); + log.debug("Proposed task count: %d, Cost: %.4f", taskCount, cost); + if (cost < optimalCost) { + optimalTaskCount = taskCount; + optimalCost = cost; + } + } + + emitter.emit(metricBuilder.setMetric(AVG_LAG_METRIC, metrics.getAvgPartitionLag())); + emitter.emit(metricBuilder.setMetric(AVG_IDLE_METRIC, metrics.getPollIdleRatio())); + emitter.emit(metricBuilder.setMetric(OPTIMAL_TASK_COUNT_METRIC, (long) optimalTaskCount)); + + log.debug( + "Cost-based scaling evaluation for supervisorId [%s]: current=%d, optimal=%d, cost=%.4f, " + + "avgPartitionLag=%.2f, pollIdleRatio=%.3f", + supervisorId, + metrics.getCurrentTaskCount(), + optimalTaskCount, + optimalCost, + metrics.getAvgPartitionLag(), + metrics.getPollIdleRatio() + ); + + if (optimalTaskCount == currentTaskCount) { + return -1; + } + // Perform both scale-up and scale-down proactively + // Future versions may perform scale-down on task rollover only + return optimalTaskCount; + } + + /** + * Generates valid task counts based on partitions-per-task ratios. + * This enables gradual scaling and avoids large jumps. + * Limits the range of task counts considered to avoid excessive computation. + * + * @return sorted list of valid task counts within bounds + */ + static int[] computeValidTaskCounts(int partitionCount, int currentTaskCount) + { + if (partitionCount <= 0) { + return new int[]{}; + } + + List<Integer> result = new ArrayList<>(); + final int currentPartitionsPerTask = partitionCount / currentTaskCount; + // Minimum partitions per task correspond to the maximum number of tasks (scale up) and vice versa. + final int minPartitionsPerTask = Math.max(1, currentPartitionsPerTask - MAX_INCREASE_IN_PARTITIONS_PER_TASK); + final int maxPartitionsPerTask = Math.min(partitionCount, currentPartitionsPerTask + MAX_DECREASE_IN_PARTITIONS_PER_TASK); + + for (int partitionsPerTask = maxPartitionsPerTask; partitionsPerTask >= minPartitionsPerTask; partitionsPerTask--) { + final int taskCount = (partitionCount + partitionsPerTask - 1) / partitionsPerTask; + if (result.isEmpty() || result.get(result.size() - 1) != taskCount) { + result.add(taskCount); + } Review Comment: Use a set to simplify this logic. ########## indexing-service/src/main/java/org/apache/druid/indexing/seekablestream/supervisor/autoscaler/CostBasedAutoScaler.java: ########## @@ -0,0 +1,360 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.indexing.seekablestream.supervisor.autoscaler; + +import org.apache.druid.indexing.common.stats.DropwizardRowIngestionMeters; +import org.apache.druid.indexing.overlord.supervisor.SupervisorSpec; +import org.apache.druid.indexing.overlord.supervisor.autoscaler.LagStats; +import org.apache.druid.indexing.overlord.supervisor.autoscaler.SupervisorTaskAutoScaler; +import org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner; +import org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisor; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.java.util.common.concurrent.Execs; +import org.apache.druid.java.util.emitter.EmittingLogger; +import org.apache.druid.java.util.emitter.service.ServiceEmitter; +import org.apache.druid.java.util.emitter.service.ServiceMetricEvent; +import org.apache.druid.query.DruidMetrics; +import org.apache.druid.segment.incremental.RowIngestionMeters; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * Cost-based auto-scaler for seekable stream supervisors. + * Uses a weighted cost function combining lag recovery time (seconds) and idleness cost (seconds) + * to determine optimal task counts. + * <p> + * Candidate task counts are derived by scanning a bounded window of partitions-per-task (PPT) values + * around the current PPT, then converting those to task counts. This allows non-divisor task counts + * while keeping changes gradual (no large jumps). + * <p> + * Scale-up and scale-down are both evaluated proactively. + * Future versions may perform scale-down on task rollover only. + */ +public class CostBasedAutoScaler implements SupervisorTaskAutoScaler +{ + private static final EmittingLogger log = new EmittingLogger(CostBasedAutoScaler.class); + + private static final int MAX_INCREASE_IN_PARTITIONS_PER_TASK = 2; + private static final int MAX_DECREASE_IN_PARTITIONS_PER_TASK = MAX_INCREASE_IN_PARTITIONS_PER_TASK * 2; + public static final String AVG_LAG_METRIC = "task/autoScaler/costBased/avgLag"; + public static final String AVG_IDLE_METRIC = "task/autoScaler/costBased/pollIdleAvg"; + public static final String OPTIMAL_TASK_COUNT_METRIC = "task/autoScaler/costBased/optimalTaskCount"; + + private final String supervisorId; + private final SeekableStreamSupervisor supervisor; + private final ServiceEmitter emitter; + private final SupervisorSpec spec; + private final CostBasedAutoScalerConfig config; + private final ServiceMetricEvent.Builder metricBuilder; + private final ScheduledExecutorService autoscalerExecutor; + private final WeightedCostFunction costFunction; + + public CostBasedAutoScaler( + SeekableStreamSupervisor supervisor, + CostBasedAutoScalerConfig config, + SupervisorSpec spec, + ServiceEmitter emitter + ) + { + this.config = config; + this.spec = spec; + this.supervisor = supervisor; + this.supervisorId = spec.getId(); + this.emitter = emitter; + + this.costFunction = new WeightedCostFunction(); + + this.autoscalerExecutor = Execs.scheduledSingleThreaded("CostBasedAutoScaler-" + StringUtils.encodeForFormat(spec.getId())); + this.metricBuilder = ServiceMetricEvent.builder() + .setDimension(DruidMetrics.SUPERVISOR_ID, supervisorId) + .setDimension( + DruidMetrics.STREAM, + this.supervisor.getIoConfig().getStream() + ); + } + + @Override + public void start() + { + Callable<Integer> scaleAction = () -> computeOptimalTaskCount(this.collectMetrics()); + Runnable onSuccessfulScale = () -> { + }; + + autoscalerExecutor.scheduleAtFixedRate( + supervisor.buildDynamicAllocationTask(scaleAction, onSuccessfulScale, emitter), + config.getScaleActionPeriodMillis(), + config.getScaleActionPeriodMillis(), + TimeUnit.MILLISECONDS + ); + + log.info( + "CostBasedAutoScaler started for supervisorId[%s]: evaluating scaling every [%d]ms", + supervisorId, + config.getScaleActionPeriodMillis() + ); + } + + @Override + public void stop() + { + autoscalerExecutor.shutdownNow(); + log.info("CostBasedAutoScaler stopped for supervisorId [%s]", supervisorId); + } + + @Override + public void reset() + { + // No-op. + } + + private CostMetrics collectMetrics() + { + if (spec.isSuspended()) { + log.debug("Supervisor [%s] is suspended, skipping a metrics collection", supervisorId); + return null; + } + + final LagStats lagStats = supervisor.computeLagStats(); + if (lagStats == null) { + log.debug("Lag stats unavailable for supervisorId [%s], skipping collection", supervisorId); + return null; + } + + final int currentTaskCount = supervisor.getIoConfig().getTaskCount(); + final int partitionCount = supervisor.getPartitionCount(); + + final Map<String, Map<String, Object>> taskStats = supervisor.getStats(); + final double movingAvgRate = extractMovingAverage(taskStats, DropwizardRowIngestionMeters.ONE_MINUTE_NAME); + final double pollIdleRatio = extractPollIdleRatio(taskStats); + + final double avgPartitionLag = lagStats.getAvgLag(); + + // Use an actual 15-minute moving average processing rate if available + final double avgProcessingRate; + if (movingAvgRate > 0) { + avgProcessingRate = movingAvgRate; + } else { + // Fallback: estimate processing rate based on idle ratio + final double utilizationRatio = Math.max(0.01, 1.0 - pollIdleRatio); + avgProcessingRate = config.getDefaultProcessingRate() * utilizationRatio; + } + + return new CostMetrics( + avgPartitionLag, + currentTaskCount, + partitionCount, + pollIdleRatio, + supervisor.getIoConfig().getTaskDuration().getStandardSeconds(), + avgProcessingRate + ); + } + + /** + * Computes the optimal task count based on current metrics. + * <p> + * Returns -1 (no scaling needed) in the following cases: + * <ul> + * <li>Metrics are not available</li> + * <li>Task count already optimal</li> + * <li>The current idle ratio is in the ideal range and lag considered low</li> + * <li>Optimal task count equals current task count</li> + * </ul> + * + * @return optimal task count for scale-up, or -1 if no scaling action needed + */ + public int computeOptimalTaskCount(CostMetrics metrics) + { + if (metrics == null) { + log.debug("No metrics available yet for supervisorId [%s]", supervisorId); + return -1; + } + + final int partitionCount = metrics.getPartitionCount(); + final int currentTaskCount = metrics.getCurrentTaskCount(); + if (partitionCount <= 0 || currentTaskCount <= 0) { + return -1; + } + + final int[] validTaskCounts = CostBasedAutoScaler.computeValidTaskCounts(partitionCount, currentTaskCount); + + if (validTaskCounts.length == 0) { + log.warn("No valid task counts after applying constraints for supervisorId [%s]", supervisorId); + return -1; + } + + // If idle is already in the ideal range [0.2, 0.6], optimal utilization has been achieved. Review Comment: We may not always want to skip scaling even if idleness is in the accepted range. For example, if current idleness is 0.5 and there is no lag, a cluster admin might prefer to scale down the tasks, so that idleness is more like 0.2 or so. They should be allowed to control this via the `idleWeight`. For the initial testing of this auto-scaler, let's remove this guardrail. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
