jon-wei commented on a change in pull request #8107: Add CliIndexer process 
type and initial task runner implementation
URL: https://github.com/apache/incubator-druid/pull/8107#discussion_r306088857
 
 

 ##########
 File path: 
indexing-service/src/main/java/org/apache/druid/indexing/overlord/ThreadingTaskRunner.java
 ##########
 @@ -0,0 +1,710 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.indexing.overlord;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Joiner;
+import com.google.common.base.Optional;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import com.google.common.io.ByteSource;
+import com.google.common.io.Files;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.errorprone.annotations.concurrent.GuardedBy;
+import com.google.inject.Inject;
+import org.apache.commons.io.FileUtils;
+import org.apache.druid.guice.annotations.Self;
+import org.apache.druid.indexer.RunnerTaskState;
+import org.apache.druid.indexer.TaskLocation;
+import org.apache.druid.indexer.TaskStatus;
+import org.apache.druid.indexing.common.TaskReportFileWriter;
+import org.apache.druid.indexing.common.TaskToolbox;
+import org.apache.druid.indexing.common.TaskToolboxFactory;
+import org.apache.druid.indexing.common.config.TaskConfig;
+import org.apache.druid.indexing.common.task.Task;
+import org.apache.druid.indexing.overlord.autoscaling.ScalingStats;
+import org.apache.druid.indexing.worker.config.WorkerConfig;
+import org.apache.druid.java.util.common.DateTimes;
+import org.apache.druid.java.util.common.IOE;
+import org.apache.druid.java.util.common.ISE;
+import org.apache.druid.java.util.common.Pair;
+import org.apache.druid.java.util.common.StringUtils;
+import org.apache.druid.java.util.common.concurrent.Execs;
+import org.apache.druid.java.util.common.io.Closer;
+import org.apache.druid.java.util.emitter.EmittingLogger;
+import org.apache.druid.query.Query;
+import org.apache.druid.query.QueryRunner;
+import org.apache.druid.query.QuerySegmentWalker;
+import org.apache.druid.query.SegmentDescriptor;
+import org.apache.druid.segment.realtime.appenderator.AppenderatorsManager;
+import org.apache.druid.server.DruidNode;
+import org.apache.druid.tasklogs.TaskLogPusher;
+import org.apache.druid.tasklogs.TaskLogStreamer;
+import org.joda.time.DateTime;
+import org.joda.time.Interval;
+
+import javax.annotation.Nullable;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Executor;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * TaskRunner implemention for the Indexer task execution service, which runs 
all tasks in a single process.
+ *
+ * Two thread pools are used:
+ * - A task execution pool, sized to number of worker slots. This is used to 
execute the Task run() methods.
+ * - A control thread pool, sized to worker slots * 2. The control threads 
setup and submit work to the
+ *   task execution pool, and are also responsible for running graceful 
shutdown on the Task objects.
+ *   Only one shutdown per-task can be running at a given time, and there is 
one control thread per task,
+ *   thus the pool has 2 * worker slots.
+ *
+ * Note that separate task logs are not supported, all task log entries will 
be written to the Indexer process log
+ * instead.
+ */
+public class ThreadingTaskRunner implements TaskRunner, TaskLogStreamer, 
QuerySegmentWalker
+{
+  private static final EmittingLogger log = new 
EmittingLogger(ThreadingTaskRunner.class);
+
+  private static final String TASK_RESTORE_FILENAME = "restore.json";
+  private final TaskToolboxFactory toolboxFactory;
+  private final TaskConfig taskConfig;
+  private final TaskLogPusher taskLogPusher;
+  private final DruidNode node;
+  private final ObjectMapper jsonMapper;
+  private final CopyOnWriteArrayList<Pair<TaskRunnerListener, Executor>> 
listeners = new CopyOnWriteArrayList<>();
+  private final AppenderatorsManager appenderatorsManager;
+  private final TaskReportFileWriter taskReportFileWriter;
+  private final ListeningExecutorService taskExecutor;
+  private final ListeningExecutorService controlThreadExecutor;
+
+  private volatile boolean stopping = false;
+
+  /** Writes must be synchronized. This is only a ConcurrentMap so 
"informational" reads can occur without waiting. */
+  private final ConcurrentHashMap<String, ThreadingTaskRunnerWorkItem> tasks = 
new ConcurrentHashMap<>();
+
+  @Inject
+  public ThreadingTaskRunner(
+      TaskToolboxFactory toolboxFactory,
+      TaskConfig taskConfig,
+      WorkerConfig workerConfig,
+      TaskLogPusher taskLogPusher,
+      ObjectMapper jsonMapper,
+      AppenderatorsManager appenderatorsManager,
+      TaskReportFileWriter taskReportFileWriter,
+      @Self DruidNode node
+  )
+  {
+    this.toolboxFactory = toolboxFactory;
+    this.taskConfig = taskConfig;
+    this.taskLogPusher = taskLogPusher;
+    this.jsonMapper = jsonMapper;
+    this.node = node;
+    this.appenderatorsManager = appenderatorsManager;
+    this.taskReportFileWriter = taskReportFileWriter;
+    this.taskExecutor = MoreExecutors.listeningDecorator(
+        Execs.multiThreaded(workerConfig.getCapacity(), 
"threading-task-runner-executor-%d")
+    );
+    this.controlThreadExecutor = MoreExecutors.listeningDecorator(
+        Execs.multiThreaded(workerConfig.getCapacity() * 2, 
"threading-task-runner-control-%d")
+    );
+  }
+
+  @Override
+  public Optional<ByteSource> streamTaskLog(String taskid, long offset) throws 
IOException
+  {
+    // task logs will appear in the main indexer log, streaming individual 
task logs is not supported
+    return Optional.absent();
+  }
+
+  @Override
+  public List<Pair<Task, ListenableFuture<TaskStatus>>> restore()
+  {
+    final File restoreFile = getRestoreFile();
+    final TaskRestoreInfo taskRestoreInfo;
+    if (restoreFile.exists()) {
+      try {
+        taskRestoreInfo = jsonMapper.readValue(restoreFile, 
TaskRestoreInfo.class);
+      }
+      catch (Exception e) {
+        log.error(e, "Failed to read restorable tasks from file[%s]. Skipping 
restore.", restoreFile);
+        return ImmutableList.of();
+      }
+    } else {
+      return ImmutableList.of();
+    }
+
+    final List<Pair<Task, ListenableFuture<TaskStatus>>> retVal = new 
ArrayList<>();
+    for (final String taskId : taskRestoreInfo.getRunningTasks()) {
+      try {
+        final File taskFile = new File(taskConfig.getTaskDir(taskId), 
"task.json");
+        final Task task = jsonMapper.readValue(taskFile, Task.class);
+
+        if (!task.getId().equals(taskId)) {
+          throw new ISE("WTF?! Task[%s] restore file had wrong id[%s].", 
taskId, task.getId());
+        }
+
+        if (taskConfig.isRestoreTasksOnRestart() && task.canRestore()) {
+          log.info("Restoring task[%s].", task.getId());
+          retVal.add(Pair.of(task, run(task)));
+        }
+      }
+      catch (Exception e) {
+        log.warn(e, "Failed to restore task[%s]. Trying to restore other 
tasks.", taskId);
+      }
+    }
+
+    log.info("Restored %,d tasks.", retVal.size());
+
+    return retVal;
+  }
+
+  @Override
+  public void start()
+  {
+
 
 Review comment:
   Added comment

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to