m-trieu commented on code in PR #30695:
URL: https://github.com/apache/beam/pull/30695#discussion_r1539863662


##########
runners/google-cloud-dataflow-java/worker/src/main/java/org/apache/beam/runners/dataflow/worker/streaming/harness/StreamingWorkerStatusReporter.java:
##########
@@ -0,0 +1,370 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.runners.dataflow.worker.streaming.harness;
+
+import static 
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.base.Preconditions.checkNotNull;
+
+import com.google.api.services.dataflow.model.CounterUpdate;
+import com.google.api.services.dataflow.model.PerStepNamespaceMetrics;
+import com.google.api.services.dataflow.model.PerWorkerMetrics;
+import com.google.api.services.dataflow.model.Status;
+import com.google.api.services.dataflow.model.StreamingScalingReport;
+import com.google.api.services.dataflow.model.WorkItemStatus;
+import com.google.api.services.dataflow.model.WorkerMessage;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+import java.util.function.Supplier;
+import javax.annotation.concurrent.ThreadSafe;
+import org.apache.beam.runners.dataflow.worker.DataflowSystemMetrics;
+import org.apache.beam.runners.dataflow.worker.StreamingStepMetricsContainer;
+import org.apache.beam.runners.dataflow.worker.WorkUnitClient;
+import org.apache.beam.runners.dataflow.worker.counters.CounterSet;
+import 
org.apache.beam.runners.dataflow.worker.counters.DataflowCounterUpdateExtractor;
+import 
org.apache.beam.runners.dataflow.worker.logging.DataflowWorkerLoggingMDC;
+import org.apache.beam.runners.dataflow.worker.streaming.StageInfo;
+import org.apache.beam.runners.dataflow.worker.util.BoundedQueueExecutor;
+import org.apache.beam.runners.dataflow.worker.util.MemoryMonitor;
+import org.apache.beam.sdk.annotations.Internal;
+import 
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.annotations.VisibleForTesting;
+import 
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.collect.ImmutableList;
+import 
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.collect.ListMultimap;
+import 
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.collect.MultimapBuilder;
+import 
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/** Reports the status of the worker to Dataflow Service. */
+@Internal
+@ThreadSafe
+public final class StreamingWorkerStatusReporter {
+  private static final Logger LOG = 
LoggerFactory.getLogger(StreamingWorkerStatusReporter.class);
+
+  // Reserved ID for counter updates.
+  // Matches kWindmillCounterUpdate in workflow_worker_service_multi_hubs.cc.
+  private static final String WINDMILL_COUNTER_UPDATE_WORK_ID = "3";
+  private static final int COUNTER_UPDATES_SIZE = 128;
+  private static final String WORKER_MESSAGE_REPORTER_THREAD = 
"ReportWorkerMessage";
+  private static final String GLOBAL_WORKER_UPDATE_REPORTER_THREAD = 
"GlobalWorkerUpdates";
+
+  private final boolean publishCounters;
+  private final WorkUnitClient dataflowServiceClient;
+  private final Supplier<Long> windmillQuotaThrottleTime;
+  private final Supplier<Collection<StageInfo>> allStageInfo;
+  private final Supplier<ImmutableList<Status>> pendingErrorsToReport;
+  private final StreamingCounters streamingCounters;
+  private final MemoryMonitor memoryMonitor;
+  private final BoundedQueueExecutor workExecutor;
+  private final AtomicLong previousTimeAtMaxThreads;
+  private final Supplier<Integer> maxThreads;
+  private final ScheduledExecutorService globalWorkerUpdateReporter;
+  private final ScheduledExecutorService workerMessageReporter;
+
+  private StreamingWorkerStatusReporter(
+      boolean publishCounters,
+      WorkUnitClient dataflowServiceClient,
+      Supplier<Long> windmillQuotaThrottleTime,
+      Supplier<Collection<StageInfo>> allStageInfo,
+      Supplier<ImmutableList<Status>> pendingErrorsToReport,
+      StreamingCounters streamingCounters,
+      MemoryMonitor memoryMonitor,
+      BoundedQueueExecutor workExecutor,
+      AtomicLong previousTimeAtMaxThreads,
+      Supplier<Integer> maxThreads,
+      Function<String, ScheduledExecutorService> executorFactory) {
+    this.publishCounters = publishCounters;
+    this.dataflowServiceClient = dataflowServiceClient;
+    this.windmillQuotaThrottleTime = windmillQuotaThrottleTime;
+    this.allStageInfo = allStageInfo;
+    this.pendingErrorsToReport = pendingErrorsToReport;
+    this.streamingCounters = streamingCounters;
+    this.memoryMonitor = memoryMonitor;
+    this.workExecutor = workExecutor;
+    this.previousTimeAtMaxThreads = previousTimeAtMaxThreads;
+    this.maxThreads = maxThreads;
+    this.globalWorkerUpdateReporter = 
executorFactory.apply(GLOBAL_WORKER_UPDATE_REPORTER_THREAD);
+    this.workerMessageReporter = 
executorFactory.apply(WORKER_MESSAGE_REPORTER_THREAD);
+  }
+
+  public static StreamingWorkerStatusReporter create(
+      boolean publishCounters,
+      WorkUnitClient workUnitClient,
+      Supplier<Long> windmillQuotaThrottleTime,
+      Supplier<Collection<StageInfo>> allStageInfo,
+      Supplier<ImmutableList<Status>> pendingErrorsToReport,
+      StreamingCounters streamingCounters,
+      MemoryMonitor memoryMonitor,
+      BoundedQueueExecutor workExecutor,
+      Supplier<Integer> maxThreads) {
+    return new StreamingWorkerStatusReporter(
+        publishCounters,
+        workUnitClient,
+        windmillQuotaThrottleTime,
+        allStageInfo,
+        pendingErrorsToReport,
+        streamingCounters,
+        memoryMonitor,
+        workExecutor,
+        new AtomicLong(),
+        maxThreads,
+        threadName ->
+            Executors.newSingleThreadScheduledExecutor(
+                new ThreadFactoryBuilder().setNameFormat(threadName).build()));
+  }
+
+  @VisibleForTesting
+  public static StreamingWorkerStatusReporter forTesting(
+      boolean publishCounters,
+      WorkUnitClient workUnitClient,
+      Supplier<Long> windmillQuotaThrottleTime,
+      Supplier<Collection<StageInfo>> allStageInfo,
+      Supplier<ImmutableList<Status>> pendingErrorsToReport,
+      StreamingCounters streamingCounters,
+      MemoryMonitor memoryMonitor,
+      BoundedQueueExecutor workExecutor,
+      Supplier<Integer> maxThreads,
+      Function<String, ScheduledExecutorService> executorFactory) {
+    return new StreamingWorkerStatusReporter(
+        publishCounters,
+        workUnitClient,
+        windmillQuotaThrottleTime,
+        allStageInfo,
+        pendingErrorsToReport,
+        streamingCounters,
+        memoryMonitor,
+        workExecutor,
+        new AtomicLong(),
+        maxThreads,
+        executorFactory);
+  }
+
+  /**
+   * Returns key for a counter update. It is a String in case of legacy 
counter and
+   * CounterStructuredName in the case of a structured counter.
+   */
+  private static Object getCounterUpdateKey(CounterUpdate counterUpdate) {
+    Object key = null;
+    if (counterUpdate.getNameAndKind() != null) {
+      key = counterUpdate.getNameAndKind().getName();
+    } else if (counterUpdate.getStructuredNameAndMetadata() != null) {
+      key = counterUpdate.getStructuredNameAndMetadata().getName();
+    }
+    return checkNotNull(key, "Could not find name for CounterUpdate: %s", 
counterUpdate);
+  }
+
+  /**
+   * Clears counterUpdates and enqueues unique counters from counterMultimap. 
If a counter appears
+   * more than once, one of them is extracted leaving the remaining in the map.
+   */
+  private static void extractUniqueCounters(
+      List<CounterUpdate> counterUpdates, ListMultimap<Object, CounterUpdate> 
counterMultimap) {
+    counterUpdates.clear();
+    for (Iterator<Object> iter = counterMultimap.keySet().iterator(); 
iter.hasNext(); ) {
+      List<CounterUpdate> counters = counterMultimap.get(iter.next());
+      counterUpdates.add(counters.get(0));
+      if (counters.size() == 1) {
+        // There is single value. Remove the entry through the iterator.
+        iter.remove();
+      } else {
+        // Otherwise remove the first value.
+        counters.remove(0);
+      }
+    }
+  }
+
+  private static void shutdownExecutor(ScheduledExecutorService executor) {
+    executor.shutdown();
+    try {
+      executor.awaitTermination(10, TimeUnit.SECONDS);
+    } catch (InterruptedException e) {
+      LOG.warn("Error occurred trying to gracefully shutdown executor={}", 
executor, e);
+      executor.shutdownNow();
+    }
+  }
+
+  @SuppressWarnings("FutureReturnValueIgnored")
+  public void start(long windmillHarnessUpdateReportingPeriod) {
+    reportHarnessStartup();
+    // Periodically report workers counters and other updates.
+    globalWorkerUpdateReporter.scheduleWithFixedDelay(
+        this::reportPeriodicWorkerUpdates,
+        0,
+        windmillHarnessUpdateReportingPeriod,
+        TimeUnit.MILLISECONDS);
+
+    if (windmillHarnessUpdateReportingPeriod > 0) {
+      workerMessageReporter.scheduleWithFixedDelay(
+          this::reportPeriodicWorkerMessage,
+          0,
+          windmillHarnessUpdateReportingPeriod,
+          TimeUnit.MILLISECONDS);
+    }
+  }
+
+  public void stop() {
+    shutdownExecutor(globalWorkerUpdateReporter);
+    shutdownExecutor(workerMessageReporter);
+    // one last send
+    reportPeriodicWorkerUpdates();
+    reportPeriodicWorkerMessage();
+  }
+
+  private void reportHarnessStartup() {
+    DataflowWorkerLoggingMDC.setStageName("startup");
+    CounterSet restartCounter = new CounterSet();
+    restartCounter
+        .longSum(
+            
DataflowSystemMetrics.StreamingSystemCounterNames.JAVA_HARNESS_RESTARTS.counterName())
+        .addValue(1L);
+    try {
+      // Sending a one time update. Use empty counter set for 
cumulativeCounters (2nd arg).
+      sendWorkerUpdatesToDataflowService(restartCounter, new CounterSet());

Review Comment:
   i agree
   
   to be fair, i think we should utilize more async processing paired with the 
concurrency techniques.
   
   i.e i think currently we use blocking grpc etc.
   
   https://github.com/apache/beam/issues/30755 filed this issue



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to