This is an automated email from the ASF dual-hosted git repository.
karan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git
The following commit(s) were added to refs/heads/master by this push:
new 66dd2e35105 Remove MetadataStorageJobUpdateHandler (#17734)
66dd2e35105 is described below
commit 66dd2e3510505c5586d0e9560276d8ebd69de592
Author: Kashif Faraz <[email protected]>
AuthorDate: Tue Feb 18 20:05:53 2025 +0530
Remove MetadataStorageJobUpdateHandler (#17734)
MetadataStorageUpdateJobHandler is used to commit segments to the metadata
store.
It is used only by stand-alone Hadoop indexer tasks i.e. CliHadoopIndexer
(which internally launches CliInternalHadoopIndexer).
Here, IndexerSQLMetadataStorageCoordinator is already bound which can
commit segments.
So, we don't need the extra MetadataStorageUpdateJobHandler as it
unnecessarily duplicates code.
---
.../druid/indexer/HadoopDruidIndexerJob.java | 14 ++--
.../druid/indexer/MetadataStorageUpdaterJob.java | 16 ++--
.../indexer/path/SegmentMetadataPublisher.java | 24 ++++--
.../druid/indexer/HadoopDruidIndexerJobTest.java | 3 +-
.../indexer/MetadataStorageUpdaterJobTest.java | 35 ++------
.../indexing/common/task/HadoopIndexTask.java | 11 +--
.../druid/guice/SQLMetadataStorageDruidModule.java | 8 --
.../SQLMetadataStorageUpdaterJobHandler.java | 92 ----------------------
.../apache/druid/cli/CliInternalHadoopIndexer.java | 13 +--
9 files changed, 58 insertions(+), 158 deletions(-)
diff --git
a/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopDruidIndexerJob.java
b/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopDruidIndexerJob.java
index 58977ad4840..509054d7050 100644
---
a/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopDruidIndexerJob.java
+++
b/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopDruidIndexerJob.java
@@ -21,6 +21,7 @@ package org.apache.druid.indexer;
import com.google.common.base.Preconditions;
import com.google.inject.Inject;
+import org.apache.druid.indexer.path.SegmentMetadataPublisher;
import org.apache.druid.java.util.common.logger.Logger;
import javax.annotation.Nullable;
@@ -46,7 +47,7 @@ public class HadoopDruidIndexerJob implements Jobby
@Inject
public HadoopDruidIndexerJob(
HadoopDruidIndexerConfig config,
- MetadataStorageUpdaterJobHandler handler
+ SegmentMetadataPublisher handler
)
{
config.verify();
@@ -85,14 +86,9 @@ public class HadoopDruidIndexerJob implements Jobby
}
jobs.add(
- new Jobby()
- {
- @Override
- public boolean run()
- {
- publishedSegmentAndIndexZipFilePaths =
IndexGeneratorJob.getPublishedSegmentAndIndexZipFilePaths(config);
- return true;
- }
+ () -> {
+ publishedSegmentAndIndexZipFilePaths =
IndexGeneratorJob.getPublishedSegmentAndIndexZipFilePaths(config);
+ return true;
}
);
diff --git
a/indexing-hadoop/src/main/java/org/apache/druid/indexer/MetadataStorageUpdaterJob.java
b/indexing-hadoop/src/main/java/org/apache/druid/indexer/MetadataStorageUpdaterJob.java
index bdadc95010e..015b68f23d6 100644
---
a/indexing-hadoop/src/main/java/org/apache/druid/indexer/MetadataStorageUpdaterJob.java
+++
b/indexing-hadoop/src/main/java/org/apache/druid/indexer/MetadataStorageUpdaterJob.java
@@ -19,9 +19,11 @@
package org.apache.druid.indexer;
+import org.apache.druid.indexer.path.SegmentMetadataPublisher;
import org.apache.druid.timeline.DataSegment;
import java.util.List;
+import java.util.Set;
import java.util.stream.Collectors;
/**
@@ -29,11 +31,11 @@ import java.util.stream.Collectors;
public class MetadataStorageUpdaterJob implements Jobby
{
private final HadoopDruidIndexerConfig config;
- private final MetadataStorageUpdaterJobHandler handler;
+ private final SegmentMetadataPublisher handler;
public MetadataStorageUpdaterJob(
HadoopDruidIndexerConfig config,
- MetadataStorageUpdaterJobHandler handler
+ SegmentMetadataPublisher handler
)
{
this.config = config;
@@ -43,10 +45,12 @@ public class MetadataStorageUpdaterJob implements Jobby
@Override
public boolean run()
{
- final List<DataSegmentAndIndexZipFilePath> segmentAndIndexZipFilePaths =
IndexGeneratorJob.getPublishedSegmentAndIndexZipFilePaths(config);
- final List<DataSegment> segments =
segmentAndIndexZipFilePaths.stream().map(s ->
s.getSegment()).collect(Collectors.toList());
- final String segmentTable =
config.getSchema().getIOConfig().getMetadataUpdateSpec().getSegmentTable();
- handler.publishSegments(segmentTable, segments,
HadoopDruidIndexerConfig.JSON_MAPPER);
+ final List<DataSegmentAndIndexZipFilePath> segmentAndIndexZipFilePaths
+ = IndexGeneratorJob.getPublishedSegmentAndIndexZipFilePaths(config);
+ final Set<DataSegment> segments = segmentAndIndexZipFilePaths.stream()
+
.map(DataSegmentAndIndexZipFilePath::getSegment)
+
.collect(Collectors.toSet());
+ handler.publishSegments(segments);
return true;
}
diff --git
a/processing/src/main/java/org/apache/druid/indexer/MetadataStorageUpdaterJobHandler.java
b/indexing-hadoop/src/main/java/org/apache/druid/indexer/path/SegmentMetadataPublisher.java
similarity index 55%
rename from
processing/src/main/java/org/apache/druid/indexer/MetadataStorageUpdaterJobHandler.java
rename to
indexing-hadoop/src/main/java/org/apache/druid/indexer/path/SegmentMetadataPublisher.java
index af657b162b4..f336550c14e 100644
---
a/processing/src/main/java/org/apache/druid/indexer/MetadataStorageUpdaterJobHandler.java
+++
b/indexing-hadoop/src/main/java/org/apache/druid/indexer/path/SegmentMetadataPublisher.java
@@ -17,14 +17,28 @@
* under the License.
*/
-package org.apache.druid.indexer;
+package org.apache.druid.indexer.path;
-import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator;
import org.apache.druid.timeline.DataSegment;
-import java.util.List;
+import java.util.Set;
-public interface MetadataStorageUpdaterJobHandler
+/**
+ * Publishes segments to the metadata store. Used by stand-alone hadoop indexer
+ * tasks to publish segments to the metadata store without going through the
Overlord.
+ */
+public class SegmentMetadataPublisher
{
- void publishSegments(String tableName, List<DataSegment> segments,
ObjectMapper mapper);
+ private final IndexerMetadataStorageCoordinator storageCoordinator;
+
+ public SegmentMetadataPublisher(IndexerMetadataStorageCoordinator
storageCoordinator)
+ {
+ this.storageCoordinator = storageCoordinator;
+ }
+
+ public void publishSegments(Set<DataSegment> segments)
+ {
+ storageCoordinator.commitSegments(segments, null);
+ }
}
diff --git
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopDruidIndexerJobTest.java
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopDruidIndexerJobTest.java
index 38b3b1accbc..a3b62d3fa5f 100644
---
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopDruidIndexerJobTest.java
+++
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopDruidIndexerJobTest.java
@@ -19,6 +19,7 @@
package org.apache.druid.indexer;
+import org.apache.druid.indexer.path.SegmentMetadataPublisher;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
@@ -40,7 +41,7 @@ public class HadoopDruidIndexerJobTest
public void test_run()
{
HadoopDruidIndexerConfig config = mock(HadoopDruidIndexerConfig.class);
- MetadataStorageUpdaterJobHandler handler =
mock(MetadataStorageUpdaterJobHandler.class);
+ SegmentMetadataPublisher handler = mock(SegmentMetadataPublisher.class);
try (MockedStatic<JobHelper> jobHelperMock =
Mockito.mockStatic(JobHelper.class)) {
try (final MockedStatic<IndexGeneratorJob> indexGeneratorJobMock =
Mockito.mockStatic(IndexGeneratorJob.class)) {
when(config.isUpdaterJobSpecSet()).thenReturn(false);
diff --git
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/MetadataStorageUpdaterJobTest.java
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/MetadataStorageUpdaterJobTest.java
index 606b93de0ed..e97e2e0c25b 100644
---
a/indexing-hadoop/src/test/java/org/apache/druid/indexer/MetadataStorageUpdaterJobTest.java
+++
b/indexing-hadoop/src/test/java/org/apache/druid/indexer/MetadataStorageUpdaterJobTest.java
@@ -19,8 +19,7 @@
package org.apache.druid.indexer;
-import com.google.common.collect.ImmutableList;
-import org.apache.druid.indexer.updater.MetadataStorageUpdaterJobSpec;
+import org.apache.druid.indexer.path.SegmentMetadataPublisher;
import org.junit.Test;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
@@ -31,57 +30,39 @@ import java.util.stream.Collectors;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.mockito.Mockito.when;
public class MetadataStorageUpdaterJobTest
{
- private static final List<DataSegmentAndIndexZipFilePath>
DATA_SEGMENT_AND_INDEX_ZIP_FILE_PATHS = ImmutableList.of(
+ private static final List<DataSegmentAndIndexZipFilePath>
DATA_SEGMENT_AND_INDEX_ZIP_FILE_PATHS = List.of(
new DataSegmentAndIndexZipFilePath(null, null, null)
);
- private static final String SEGMENT_TABLE = "segments";
- private HadoopIngestionSpec spec;
- private HadoopIOConfig ioConfig;
- private MetadataStorageUpdaterJobSpec metadataUpdateSpec;
private HadoopDruidIndexerConfig config;
- private MetadataStorageUpdaterJobHandler handler;
+ private SegmentMetadataPublisher handler;
private MetadataStorageUpdaterJob target;
@Test
public void test_run()
{
- metadataUpdateSpec = mock(MetadataStorageUpdaterJobSpec.class);
- ioConfig = mock(HadoopIOConfig.class);
- spec = mock(HadoopIngestionSpec.class);
config = mock(HadoopDruidIndexerConfig.class);
- handler = mock(MetadataStorageUpdaterJobHandler.class);
+ handler = mock(SegmentMetadataPublisher.class);
try (MockedStatic<IndexGeneratorJob> mockedStatic =
Mockito.mockStatic(IndexGeneratorJob.class)) {
mockedStatic.when(() ->
IndexGeneratorJob.getPublishedSegmentAndIndexZipFilePaths(config))
.thenReturn(DATA_SEGMENT_AND_INDEX_ZIP_FILE_PATHS);
- when(metadataUpdateSpec.getSegmentTable()).thenReturn(SEGMENT_TABLE);
- when(ioConfig.getMetadataUpdateSpec()).thenReturn(metadataUpdateSpec);
- when(spec.getIOConfig()).thenReturn(ioConfig);
- when(config.getSchema()).thenReturn(spec);
-
-
target = new MetadataStorageUpdaterJob(config, handler);
target.run();
verify(handler).publishSegments(
- SEGMENT_TABLE,
- DATA_SEGMENT_AND_INDEX_ZIP_FILE_PATHS.stream().map(s ->
s.getSegment()).collect(
- Collectors.toList()), HadoopDruidIndexerConfig.JSON_MAPPER
+ DATA_SEGMENT_AND_INDEX_ZIP_FILE_PATHS.stream()
+
.map(DataSegmentAndIndexZipFilePath::getSegment)
+ .collect(Collectors.toSet())
);
- verify(metadataUpdateSpec).getSegmentTable();
- verify(ioConfig).getMetadataUpdateSpec();
- verify(spec).getIOConfig();
- verify(config).getSchema();
mockedStatic.verify(() ->
IndexGeneratorJob.getPublishedSegmentAndIndexZipFilePaths(config));
- verifyNoMoreInteractions(handler, metadataUpdateSpec, ioConfig, spec,
config);
+ verifyNoMoreInteractions(handler, config);
}
}
}
diff --git
a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/HadoopIndexTask.java
b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/HadoopIndexTask.java
index 319c1a7ab69..6ae833a83c1 100644
---
a/indexing-service/src/main/java/org/apache/druid/indexing/common/task/HadoopIndexTask.java
+++
b/indexing-service/src/main/java/org/apache/druid/indexing/common/task/HadoopIndexTask.java
@@ -39,10 +39,10 @@ import org.apache.druid.indexer.HadoopDruidIndexerJob;
import org.apache.druid.indexer.HadoopIngestionSpec;
import org.apache.druid.indexer.IngestionState;
import org.apache.druid.indexer.JobHelper;
-import org.apache.druid.indexer.MetadataStorageUpdaterJobHandler;
import org.apache.druid.indexer.TaskMetricsGetter;
import org.apache.druid.indexer.TaskMetricsUtils;
import org.apache.druid.indexer.TaskStatus;
+import org.apache.druid.indexer.path.SegmentMetadataPublisher;
import org.apache.druid.indexer.report.TaskReport;
import org.apache.druid.indexing.common.TaskLock;
import org.apache.druid.indexing.common.TaskLockType;
@@ -52,6 +52,7 @@ import
org.apache.druid.indexing.common.actions.TimeChunkLockAcquireAction;
import org.apache.druid.indexing.common.actions.TimeChunkLockTryAcquireAction;
import org.apache.druid.indexing.common.config.TaskConfig;
import
org.apache.druid.indexing.hadoop.OverlordActionBasedUsedSegmentsRetriever;
+import org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator;
import org.apache.druid.java.util.common.JodaUtils;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.granularity.Granularity;
@@ -832,12 +833,12 @@ public class HadoopIndexTask extends HadoopTask
implements ChatHandler
.withTuningConfig(theSchema.getTuningConfig().withVersion(version))
);
- // MetadataStorageUpdaterJobHandler is only needed when running
standalone without indexing service
- // In that case the whatever runs the Hadoop Index Task must ensure
MetadataStorageUpdaterJobHandler
+ // SegmentMetadataPublisher is only needed when running standalone
without indexing service
+ // In that case the whatever runs the Hadoop Index Task must ensure
IndexerMetadataStorageCoordinator
// can be injected based on the configuration given in
config.getSchema().getIOConfig().getMetadataUpdateSpec()
- final MetadataStorageUpdaterJobHandler maybeHandler;
+ final SegmentMetadataPublisher maybeHandler;
if (config.isUpdaterJobSpecSet()) {
- maybeHandler =
INJECTOR.getInstance(MetadataStorageUpdaterJobHandler.class);
+ maybeHandler = new
SegmentMetadataPublisher(INJECTOR.getInstance(IndexerMetadataStorageCoordinator.class));
} else {
maybeHandler = null;
}
diff --git
a/server/src/main/java/org/apache/druid/guice/SQLMetadataStorageDruidModule.java
b/server/src/main/java/org/apache/druid/guice/SQLMetadataStorageDruidModule.java
index 65057efd375..e5dabdb8fe4 100644
---
a/server/src/main/java/org/apache/druid/guice/SQLMetadataStorageDruidModule.java
+++
b/server/src/main/java/org/apache/druid/guice/SQLMetadataStorageDruidModule.java
@@ -23,8 +23,6 @@ import com.google.inject.Binder;
import com.google.inject.Key;
import com.google.inject.Module;
import org.apache.druid.audit.AuditManager;
-import org.apache.druid.indexer.MetadataStorageUpdaterJobHandler;
-import org.apache.druid.indexer.SQLMetadataStorageUpdaterJobHandler;
import org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator;
import org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator;
import org.apache.druid.metadata.MetadataRuleManager;
@@ -79,7 +77,6 @@ public class SQLMetadataStorageDruidModule implements Module
PolyBind.createChoiceWithDefault(binder, prop,
Key.get(SegmentMetadataTransactionFactory.class), defaultValue);
PolyBind.createChoiceWithDefault(binder, prop,
Key.get(IndexerMetadataStorageCoordinator.class), defaultValue);
PolyBind.createChoiceWithDefault(binder, prop,
Key.get(MetadataStorageActionHandlerFactory.class), defaultValue);
- PolyBind.createChoiceWithDefault(binder, prop,
Key.get(MetadataStorageUpdaterJobHandler.class), defaultValue);
PolyBind.createChoiceWithDefault(binder, prop,
Key.get(MetadataSupervisorManager.class), defaultValue);
configureAuditManager(binder);
@@ -124,11 +121,6 @@ public class SQLMetadataStorageDruidModule implements
Module
.to(IndexerSQLMetadataStorageCoordinator.class)
.in(ManageLifecycle.class);
- PolyBind.optionBinder(binder,
Key.get(MetadataStorageUpdaterJobHandler.class))
- .addBinding(type)
- .to(SQLMetadataStorageUpdaterJobHandler.class)
- .in(LazySingleton.class);
-
PolyBind.optionBinder(binder, Key.get(MetadataSupervisorManager.class))
.addBinding(type)
.to(SQLMetadataSupervisorManager.class)
diff --git
a/server/src/main/java/org/apache/druid/indexer/SQLMetadataStorageUpdaterJobHandler.java
b/server/src/main/java/org/apache/druid/indexer/SQLMetadataStorageUpdaterJobHandler.java
deleted file mode 100644
index e1c9df11754..00000000000
---
a/server/src/main/java/org/apache/druid/indexer/SQLMetadataStorageUpdaterJobHandler.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.druid.indexer;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.common.collect.ImmutableMap;
-import com.google.inject.Inject;
-import org.apache.druid.java.util.common.DateTimes;
-import org.apache.druid.java.util.common.StringUtils;
-import org.apache.druid.java.util.common.logger.Logger;
-import org.apache.druid.metadata.SQLMetadataConnector;
-import org.apache.druid.timeline.DataSegment;
-import org.apache.druid.timeline.partition.NoneShardSpec;
-import org.skife.jdbi.v2.Handle;
-import org.skife.jdbi.v2.IDBI;
-import org.skife.jdbi.v2.PreparedBatch;
-import org.skife.jdbi.v2.tweak.HandleCallback;
-
-import java.util.List;
-
-public class SQLMetadataStorageUpdaterJobHandler implements
MetadataStorageUpdaterJobHandler
-{
- private static final Logger log = new
Logger(SQLMetadataStorageUpdaterJobHandler.class);
- private final SQLMetadataConnector connector;
- private final IDBI dbi;
-
- @Inject
- public SQLMetadataStorageUpdaterJobHandler(SQLMetadataConnector connector)
- {
- this.connector = connector;
- this.dbi = connector.getDBI();
- }
-
- @Override
- public void publishSegments(final String tableName, final List<DataSegment>
segments, final ObjectMapper mapper)
- {
- dbi.withHandle(
- new HandleCallback<Void>()
- {
- @Override
- public Void withHandle(Handle handle) throws Exception
- {
- final PreparedBatch batch = handle.prepareBatch(
- StringUtils.format(
- "INSERT INTO %1$s (id, dataSource, created_date, start,
%2$send%2$s, partitioned, version, used, payload, used_status_last_updated) "
- + "VALUES (:id, :dataSource, :created_date, :start, :end,
:partitioned, :version, :used, :payload, :used_status_last_updated)",
- tableName, connector.getQuoteString()
- )
- );
- for (final DataSegment segment : segments) {
- String now = DateTimes.nowUtc().toString();
- batch.add(
- new ImmutableMap.Builder<String, Object>()
- .put("id", segment.getId().toString())
- .put("dataSource", segment.getDataSource())
- .put("created_date", now)
- .put("start",
segment.getInterval().getStart().toString())
- .put("end", segment.getInterval().getEnd().toString())
- .put("partitioned", (segment.getShardSpec() instanceof
NoneShardSpec) ? false : true)
- .put("version", segment.getVersion())
- .put("used", true)
- .put("payload", mapper.writeValueAsBytes(segment))
- .put("used_status_last_updated", now)
- .build()
- );
- log.info("Published %s", segment.getId());
- }
- batch.execute();
-
- return null;
- }
- }
- );
- }
-}
diff --git
a/services/src/main/java/org/apache/druid/cli/CliInternalHadoopIndexer.java
b/services/src/main/java/org/apache/druid/cli/CliInternalHadoopIndexer.java
index 3f7be656a25..f621307b371 100644
--- a/services/src/main/java/org/apache/druid/cli/CliInternalHadoopIndexer.java
+++ b/services/src/main/java/org/apache/druid/cli/CliInternalHadoopIndexer.java
@@ -36,8 +36,8 @@ import org.apache.druid.indexer.HadoopDruidIndexerJob;
import org.apache.druid.indexer.HadoopIngestionSpec;
import org.apache.druid.indexer.JobHelper;
import org.apache.druid.indexer.Jobby;
-import org.apache.druid.indexer.MetadataStorageUpdaterJobHandler;
import org.apache.druid.indexer.path.MetadataStoreBasedUsedSegmentsRetriever;
+import org.apache.druid.indexer.path.SegmentMetadataPublisher;
import org.apache.druid.indexer.updater.MetadataStorageUpdaterJobSpec;
import org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator;
import org.apache.druid.java.util.common.logger.Logger;
@@ -110,16 +110,19 @@ public class CliInternalHadoopIndexer extends
GuiceRunnable
Preconditions.checkNotNull(metadataSpec.getType(), "type in
metadataUpdateSpec must not be null");
injector.getInstance(Properties.class).setProperty("druid.metadata.storage.type",
metadataSpec.getType());
+ final IndexerMetadataStorageCoordinator storageCoordinator
+ = injector.getInstance(IndexerMetadataStorageCoordinator.class);
HadoopIngestionSpec.updateSegmentListIfDatasourcePathSpecIsUsed(
config.getSchema(),
HadoopDruidIndexerConfig.JSON_MAPPER,
- new MetadataStoreBasedUsedSegmentsRetriever(
- injector.getInstance(IndexerMetadataStorageCoordinator.class)
- )
+ new MetadataStoreBasedUsedSegmentsRetriever(storageCoordinator)
);
List<Jobby> jobs = new ArrayList<>();
- HadoopDruidIndexerJob indexerJob = new HadoopDruidIndexerJob(config,
injector.getInstance(MetadataStorageUpdaterJobHandler.class));
+ HadoopDruidIndexerJob indexerJob = new HadoopDruidIndexerJob(
+ config,
+ new SegmentMetadataPublisher(storageCoordinator)
+ );
jobs.add(new HadoopDruidDetermineConfigurationJob(config));
jobs.add(indexerJob);
boolean jobsSucceeded = JobHelper.runJobs(jobs);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]