This is an automated email from the ASF dual-hosted git repository.
kfaraz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git
The following commit(s) were added to refs/heads/master by this push:
new c5a85b48d6e Migrate ITAppendBatchIndexTest,
ITCustomCoordinatorDutiesTest to embedded tests (#18867)
c5a85b48d6e is described below
commit c5a85b48d6e3abcba6cc04ac0bba185aacb916a9
Author: Kashif Faraz <[email protected]>
AuthorDate: Sat Dec 27 09:36:59 2025 +0530
Migrate ITAppendBatchIndexTest, ITCustomCoordinatorDutiesTest to embedded
tests (#18867)
Changes:
- Replace `ITAppendBatchIndexTest` with a new method in
`IndexParallelTaskTest`
- Migrate `ITCustomCoordinatorDutiesTest` to embedded tests as
`KillSupervisorsCustomDutyTest`
- Remove the following test groups since all the tests for that group have
already been migrated
- `input-source` (#18805, #18752)
- `cds-task-schema-publish-disabled`
- `cds-coordinator-metadata-query-disabled`
---
.github/workflows/cron-job-its.yml | 4 +-
.github/workflows/standard-its.yml | 16 +-
docs/development/modules.md | 15 +-
.../embedded/compact/CompactionSupervisorTest.java | 15 +-
.../embedded/indexing/IndexParallelTaskTest.java | 86 +++++++---
.../server/KillSupervisorsCustomDutyTest.java | 124 ++++++++++++++
...ose.cds-coordinator-metadata-query-disabled.yml | 117 -------------
...er-compose.cds-task-schema-publish-disabled.yml | 118 -------------
integration-tests/docker/druid.sh | 7 +-
.../test-groups/custom-coordinator-duties | 31 ----
...dinator-metadata-query-disabled-sample-data.sql | 20 ---
...ds-task-schema-publish-disabled-sample-data.sql | 20 ---
integration-tests/script/docker_compose_args.sh | 8 -
.../java/org/apache/druid/tests/TestNGGroup.java | 8 -
.../tests/indexer/ITAppendBatchIndexTest.java | 189 ---------------------
.../parallelized/ITCustomCoordinatorDuties.java | 54 ------
.../indexer/wikipedia_compaction_task.json | 14 --
...ingestion_non_perfect_rollup_index_queries.json | 143 ----------------
...ble_ingestion_perfect_rollup_index_queries.json | 143 ----------------
.../wikipedia_local_input_source_index_task.json | 89 ----------
20 files changed, 193 insertions(+), 1028 deletions(-)
diff --git a/.github/workflows/cron-job-its.yml
b/.github/workflows/cron-job-its.yml
index 1f555778ab9..d26ece294c9 100644
--- a/.github/workflows/cron-job-its.yml
+++ b/.github/workflows/cron-job-its.yml
@@ -60,7 +60,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- testing_group: [kafka-index, kafka-index-slow,
kafka-transactional-index, kafka-transactional-index-slow, kafka-data-format,
realtime-index, append-ingestion]
+ testing_group: [kafka-index, kafka-index-slow,
kafka-transactional-index, kafka-transactional-index-slow, kafka-data-format,
realtime-index]
uses: ./.github/workflows/reusable-standard-its.yml
needs: build
with:
@@ -74,7 +74,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- testing_group: [ input-source, kafka-index, kafka-transactional-index,
kafka-index-slow, kafka-transactional-index-slow, kafka-data-format,
append-ingestion ]
+ testing_group: [ kafka-index, kafka-transactional-index,
kafka-index-slow, kafka-transactional-index-slow, kafka-data-format ]
uses: ./.github/workflows/reusable-standard-its.yml
needs: build
with:
diff --git a/.github/workflows/standard-its.yml
b/.github/workflows/standard-its.yml
index 3c30c5c9194..e195572b6fb 100644
--- a/.github/workflows/standard-its.yml
+++ b/.github/workflows/standard-its.yml
@@ -47,7 +47,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- testing_group: [kafka-index, kafka-index-slow,
kafka-transactional-index, kafka-transactional-index-slow, realtime-index,
append-ingestion, cds-task-schema-publish-disabled,
cds-coordinator-metadata-query-disabled]
+ testing_group: [kafka-index, kafka-index-slow,
kafka-transactional-index, kafka-transactional-index-slow, realtime-index]
uses: ./.github/workflows/reusable-standard-its.yml
if: ${{ needs.changes.outputs.core == 'true' ||
needs.changes.outputs.common-extensions == 'true' }}
with:
@@ -63,7 +63,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- testing_group: [input-source, kafka-index, append-ingestion]
+ testing_group: [kafka-index]
uses: ./.github/workflows/reusable-standard-its.yml
if: ${{ needs.changes.outputs.core == 'true' ||
needs.changes.outputs.common-extensions == 'true' }}
with:
@@ -101,15 +101,3 @@ jobs:
mysql_driver: org.mariadb.jdbc.Driver
override_config_path: ./environment-configs/test-groups/prepopulated-data
group: query
-
- integration-custom-coordinator-duties-tests:
- needs: changes
- uses: ./.github/workflows/reusable-standard-its.yml
- if: ${{ needs.changes.outputs.core == 'true' ||
needs.changes.outputs.common-extensions == 'true' }}
- with:
- build_jdk: 17
- runtime_jdk: 17
- testing_groups: -Dgroups=custom-coordinator-duties
- use_indexer: middleManager
- override_config_path:
./environment-configs/test-groups/custom-coordinator-duties
- group: custom coordinator duties
\ No newline at end of file
diff --git a/docs/development/modules.md b/docs/development/modules.md
index c62a6d4a086..ff0411b150a 100644
--- a/docs/development/modules.md
+++ b/docs/development/modules.md
@@ -326,19 +326,8 @@ The duties will be grouped into multiple groups as per the
elements in list `dru
All duties in the same group will have the same run period configured by
`druid.coordinator.<GROUP_NAME>.period`.
Currently, there is a single thread running the duties sequentially for each
group.
-For example, see `KillSupervisorsCustomDuty` for a custom coordinator duty
implementation and the `custom-coordinator-duties`
-integration test group which loads `KillSupervisorsCustomDuty` using the
configs set in
`integration-tests/docker/environment-configs/test-groups/custom-coordinator-duties`.
-This config file adds the configs below to enable a custom coordinator duty.
-
-```properties
-druid.coordinator.dutyGroups=["cleanupMetadata"]
-druid.coordinator.cleanupMetadata.duties=["killSupervisors"]
-druid.coordinator.cleanupMetadata.duty.killSupervisors.durationToRetain=PT0M
-druid.coordinator.cleanupMetadata.period=PT10S
-```
-
-These configurations create a custom coordinator duty group called
`cleanupMetadata` which runs a custom coordinator duty called `killSupervisors`
every 10 seconds.
-The custom coordinator duty `killSupervisors` also has a config called
`durationToRetain` which is set to 0 minute.
+For example, see `KillSupervisorsCustomDuty` for a custom coordinator duty
implementation and the `KillSupervisorsCustomDutyTest`
+for sample properties that may be used to configure the
`KillSupervisorsCustomDuty`.
### Routing data through a HTTP proxy for your extension
diff --git
a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/compact/CompactionSupervisorTest.java
b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/compact/CompactionSupervisorTest.java
index 9019a0cb5ec..c3796ccaa15 100644
---
a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/compact/CompactionSupervisorTest.java
+++
b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/compact/CompactionSupervisorTest.java
@@ -29,11 +29,6 @@ import
org.apache.druid.indexing.compact.CompactionSupervisorSpec;
import org.apache.druid.indexing.overlord.Segments;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.java.util.common.granularity.Granularity;
-import org.apache.druid.msq.guice.IndexerMemoryManagementModule;
-import org.apache.druid.msq.guice.MSQDurableStorageModule;
-import org.apache.druid.msq.guice.MSQIndexingModule;
-import org.apache.druid.msq.guice.MSQSqlModule;
-import org.apache.druid.msq.guice.SqlTaskModule;
import org.apache.druid.query.DruidMetrics;
import org.apache.druid.rpc.UpdateResponse;
import org.apache.druid.server.coordinator.ClusterCompactionConfig;
@@ -90,15 +85,7 @@ public class CompactionSupervisorTest extends
EmbeddedClusterTestBase
"[\"org.apache.druid.query.policy.NoRestrictionPolicy\"]"
)
.addCommonProperty("druid.policy.enforcer.type", "restrictAllTables")
- .addExtensions(
- CatalogClientModule.class,
- CatalogCoordinatorModule.class,
- IndexerMemoryManagementModule.class,
- MSQDurableStorageModule.class,
- MSQIndexingModule.class,
- MSQSqlModule.class,
- SqlTaskModule.class
- )
+ .addExtensions(CatalogClientModule.class,
CatalogCoordinatorModule.class)
.addServer(coordinator)
.addServer(overlord)
.addServer(indexer)
diff --git
a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/indexing/IndexParallelTaskTest.java
b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/indexing/IndexParallelTaskTest.java
index 23645dea594..19ee1061a86 100644
---
a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/indexing/IndexParallelTaskTest.java
+++
b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/indexing/IndexParallelTaskTest.java
@@ -129,30 +129,7 @@ public class IndexParallelTaskTest extends
EmbeddedClusterTestBase
{
final boolean isRollup =
partitionsSpec.isForceGuaranteedRollupCompatible();
- final TaskBuilder.IndexParallel indexTask =
- TaskBuilder.ofTypeIndexParallel()
- .dataSource(dataSource)
- .timestampColumn("timestamp")
- .jsonInputFormat()
- .localInputSourceWithFiles(
- Resources.DataFile.tinyWiki1Json(),
- Resources.DataFile.tinyWiki2Json(),
- Resources.DataFile.tinyWiki3Json()
- )
- .segmentGranularity("DAY")
- .dimensions("namespace", "page", "language")
- .metricAggregates(
- new DoubleSumAggregatorFactory("added", "added"),
- new DoubleSumAggregatorFactory("deleted", "deleted"),
- new DoubleSumAggregatorFactory("delta", "delta"),
- new CountAggregatorFactory("count")
- )
- .tuningConfig(
- t -> t.withPartitionsSpec(partitionsSpec)
- .withForceGuaranteedRollup(isRollup)
- .withMaxNumConcurrentSubTasks(10)
- .withSplitHintSpec(new MaxSizeSplitHintSpec(1,
null))
- );
+ final TaskBuilder.IndexParallel indexTask =
buildIndexParallelTask(partitionsSpec, false);
runTask(indexTask, dataSource);
cluster.callApi().waitForAllSegmentsToBeAvailable(dataSource, coordinator,
broker);
@@ -211,6 +188,60 @@ public class IndexParallelTaskTest extends
EmbeddedClusterTestBase
runQueries(dataSource3);
}
+ @MethodSource("getTestParamPartitionsSpec")
+ @ParameterizedTest(name = "partitionsSpec={0}")
+ public void test_runIndexTask_andAppendData(PartitionsSpec partitionsSpec)
+ {
+ final TaskBuilder.IndexParallel initialTask =
buildIndexParallelTask(partitionsSpec, false);
+ runTask(initialTask, dataSource);
+ cluster.callApi().waitForAllSegmentsToBeAvailable(dataSource, coordinator,
broker);
+ cluster.callApi().verifySqlQuery("SELECT COUNT(*) FROM %s", dataSource,
"10");
+ runGroupByQuery("Crimson Typhoon,1,905.0,9050.0");
+
+ final TaskBuilder.IndexParallel appendTask
+ = buildIndexParallelTask(new DynamicPartitionsSpec(null, null), true);
+ runTask(appendTask, dataSource);
+ cluster.callApi().waitForAllSegmentsToBeAvailable(dataSource, coordinator,
broker);
+ cluster.callApi().verifySqlQuery("SELECT COUNT(*) FROM %s", dataSource,
"20");
+ runGroupByQuery("Crimson Typhoon,2,1810.0,18100.0");
+ }
+
+ /**
+ * Creates a builder for an "index_parallel" task to ingest into {@link
#dataSource}.
+ */
+ private TaskBuilder.IndexParallel buildIndexParallelTask(
+ PartitionsSpec partitionsSpec,
+ boolean appendToExisting
+ )
+ {
+ final boolean isRollup =
partitionsSpec.isForceGuaranteedRollupCompatible();
+
+ return TaskBuilder.ofTypeIndexParallel()
+ .dataSource(dataSource)
+ .timestampColumn("timestamp")
+ .jsonInputFormat()
+ .localInputSourceWithFiles(
+ Resources.DataFile.tinyWiki1Json(),
+ Resources.DataFile.tinyWiki2Json(),
+ Resources.DataFile.tinyWiki3Json()
+ )
+ .segmentGranularity("DAY")
+ .dimensions("namespace", "page", "language")
+ .metricAggregates(
+ new DoubleSumAggregatorFactory("added", "added"),
+ new DoubleSumAggregatorFactory("deleted", "deleted"),
+ new DoubleSumAggregatorFactory("delta", "delta"),
+ new CountAggregatorFactory("count")
+ )
+ .appendToExisting(appendToExisting)
+ .tuningConfig(
+ t -> t.withPartitionsSpec(partitionsSpec)
+ .withForceGuaranteedRollup(isRollup)
+ .withMaxNumConcurrentSubTasks(10)
+ .withSplitHintSpec(new MaxSizeSplitHintSpec(1,
null))
+ );
+ }
+
private String runTask(TaskBuilder.IndexParallel taskBuilder, String
dataSource)
{
final String taskId = EmbeddedClusterApis.newTaskId(dataSource);
@@ -224,8 +255,13 @@ public class IndexParallelTaskTest extends
EmbeddedClusterTestBase
"10,2013-09-01T12:41:27.000Z,2013-08-31T01:02:33.000Z",
cluster.runSql("SELECT COUNT(*), MAX(__time), MIN(__time) FROM %s",
dataSource)
);
+ runGroupByQuery("Crimson Typhoon,1,905.0,9050.0");
+ }
+
+ private void runGroupByQuery(String expectedResult)
+ {
Assertions.assertEquals(
- "Crimson Typhoon,1,905.0,9050.0",
+ expectedResult,
cluster.runSql(
"SELECT \"page\", COUNT(*) AS \"rows\", SUM(\"added\"), 10 *
SUM(\"added\") AS added_times_ten"
+ " FROM %s"
diff --git
a/embedded-tests/src/test/java/org/apache/druid/testing/embedded/server/KillSupervisorsCustomDutyTest.java
b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/server/KillSupervisorsCustomDutyTest.java
new file mode 100644
index 00000000000..f29d4b305b5
--- /dev/null
+++
b/embedded-tests/src/test/java/org/apache/druid/testing/embedded/server/KillSupervisorsCustomDutyTest.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.testing.embedded.server;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import org.apache.druid.error.ExceptionMatcher;
+import org.apache.druid.indexing.compact.CompactionSupervisorSpec;
+import org.apache.druid.indexing.overlord.supervisor.NoopSupervisorSpec;
+import org.apache.druid.indexing.overlord.supervisor.SupervisorSpec;
+import org.apache.druid.indexing.overlord.supervisor.VersionedSupervisorSpec;
+import org.apache.druid.java.util.common.StringUtils;
+import org.apache.druid.rpc.HttpResponseException;
+import org.apache.druid.rpc.RequestBuilder;
+import
org.apache.druid.server.coordinator.InlineSchemaDataSourceCompactionConfig;
+import org.apache.druid.testing.embedded.EmbeddedBroker;
+import org.apache.druid.testing.embedded.EmbeddedCoordinator;
+import org.apache.druid.testing.embedded.EmbeddedDruidCluster;
+import org.apache.druid.testing.embedded.EmbeddedOverlord;
+import org.apache.druid.testing.embedded.junit5.EmbeddedClusterTestBase;
+import org.hamcrest.MatcherAssert;
+import org.hamcrest.Matchers;
+import org.jboss.netty.handler.codec.http.HttpMethod;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+import java.util.List;
+
+public class KillSupervisorsCustomDutyTest extends EmbeddedClusterTestBase
+{
+ private final EmbeddedCoordinator coordinator = new EmbeddedCoordinator()
+ .addProperty("druid.coordinator.kill.supervisor.on", "false")
+ .addProperty("druid.coordinator.dutyGroups", "[\"cleanupMetadata\"]")
+ .addProperty("druid.coordinator.cleanupMetadata.duties",
"[\"killSupervisors\"]")
+
.addProperty("druid.coordinator.cleanupMetadata.duty.killSupervisors.durationToRetain",
"PT0M")
+ .addProperty("druid.coordinator.cleanupMetadata.period", "PT0.1S");
+
+ @Override
+ protected EmbeddedDruidCluster createCluster()
+ {
+ return EmbeddedDruidCluster
+ .withEmbeddedDerbyAndZookeeper()
+ .useLatchableEmitter()
+ .addServer(coordinator)
+ .addServer(new EmbeddedOverlord())
+ .addServer(new EmbeddedBroker());
+ }
+
+ @Test
+ public void test_customDuty_removesHistoryOfTerminatedSupervisor()
+ {
+ // Create a compaction supervisor
+ final CompactionSupervisorSpec supervisor = new CompactionSupervisorSpec(
+
InlineSchemaDataSourceCompactionConfig.builder().forDataSource(dataSource).build(),
+ false,
+ null
+ );
+ cluster.callApi().postSupervisor(supervisor);
+
+ // Verify that the history of the supervisor has 1 entry
+ final List<VersionedSupervisorSpec> history =
getSupervisorHistory(supervisor.getId());
+ Assertions.assertEquals(1, history.size());
+
+ final SupervisorSpec supervisorEntry = history.get(0).getSpec();
+ Assertions.assertNotNull(supervisorEntry);
+ Assertions.assertEquals(List.of(dataSource),
supervisorEntry.getDataSources());
+ Assertions.assertEquals(supervisor.getId(), supervisorEntry.getId());
+
+ // Terminate the supervisor
+ cluster.callApi().onLeaderOverlord(o ->
o.terminateSupervisor(supervisor.getId()));
+
+ // Verify that the history now has 2 entries and the latest entry is a
tombstone
+ final List<VersionedSupervisorSpec> historyAfterTermination =
getSupervisorHistory(supervisor.getId());
+ Assertions.assertEquals(2, historyAfterTermination.size());
+ Assertions.assertInstanceOf(NoopSupervisorSpec.class,
historyAfterTermination.get(0).getSpec());
+
+ // Wait until the cleanup metric has been emitted
+ coordinator.latchableEmitter().waitForEvent(
+ event -> event.hasMetricName("metadata/kill/supervisor/count")
+ .hasValueMatching(Matchers.greaterThanOrEqualTo(1L))
+ );
+
+ // Verify that the history now returns 404 Not Found
+ MatcherAssert.assertThat(
+ Assertions.assertThrows(
+ RuntimeException.class,
+ () -> getSupervisorHistory(supervisor.getId())
+ ),
+ ExceptionMatcher.of(RuntimeException.class).expectRootCause(
+ ExceptionMatcher.of(HttpResponseException.class)
+ .expectMessageContains("404 Not Found")
+ .expectMessageContains(StringUtils.format("No
history for [%s]", supervisor.getId()))
+ )
+ );
+ }
+
+ private List<VersionedSupervisorSpec> getSupervisorHistory(String
supervisorId)
+ {
+ final String url = StringUtils.format(
+ "/druid/indexer/v1/supervisor/%s/history",
+ StringUtils.urlEncode(supervisorId)
+ );
+ return cluster.callApi().serviceClient().onLeaderOverlord(
+ mapper -> new RequestBuilder(HttpMethod.GET, url),
+ new TypeReference<>() {}
+ );
+ }
+}
diff --git
a/integration-tests/docker/docker-compose.cds-coordinator-metadata-query-disabled.yml
b/integration-tests/docker/docker-compose.cds-coordinator-metadata-query-disabled.yml
deleted file mode 100644
index 569cef1d06d..00000000000
---
a/integration-tests/docker/docker-compose.cds-coordinator-metadata-query-disabled.yml
+++ /dev/null
@@ -1,117 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "2.2"
-services:
- druid-zookeeper-kafka:
- extends:
- file: docker-compose.base.yml
- service: druid-zookeeper-kafka
-
- druid-metadata-storage:
- extends:
- file: docker-compose.base.yml
- service: druid-metadata-storage
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - druid-zookeeper-kafka
-
- druid-coordinator:
- extends:
- file: docker-compose.base.yml
- service: druid-coordinator
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_centralizedDatasourceSchema_enabled=true
- - druid_centralizedDatasourceSchema_backFillEnabled=true
- - druid_centralizedDatasourceSchema_backFillPeriod=15000
- - druid_coordinator_segmentMetadata_metadataRefreshPeriod=PT15S
- - druid_coordinator_segmentMetadata_disableSegmentMetadataQueries=true
- - druid_manager_segments_useIncrementalCache=always
- depends_on:
- druid-overlord:
- condition: service_started
- druid-metadata-storage:
- condition: service_healthy
- druid-zookeeper-kafka:
- condition: service_started
-
- druid-overlord:
- extends:
- file: docker-compose.base.yml
- service: druid-overlord
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_centralizedDatasourceSchema_enabled=true
- - druid_manager_segments_useIncrementalCache=always
- depends_on:
- druid-metadata-storage:
- condition: service_healthy
- druid-zookeeper-kafka:
- condition: service_started
-
- druid-historical:
- extends:
- file: docker-compose.base.yml
- service: druid-historical
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - druid-zookeeper-kafka
-
- druid-middlemanager:
- extends:
- file: docker-compose.base.yml
- service: druid-middlemanager
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- -
druid_indexer_fork_property_druid_centralizedDatasourceSchema_enabled=true
- depends_on:
- - druid-zookeeper-kafka
- - druid-overlord
-
- druid-broker:
- extends:
- file: docker-compose.base.yml
- service: druid-broker
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_sql_planner_metadataRefreshPeriod=PT20S
- - druid_sql_planner_disableSegmentMetadataQueries=true
- depends_on:
- - druid-coordinator
- - druid-zookeeper-kafka
- - druid-middlemanager
- - druid-historical
-
- druid-router:
- extends:
- file: docker-compose.base.yml
- service: druid-router
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - druid-zookeeper-kafka
- - druid-coordinator
- - druid-broker
- - druid-overlord
-
-networks:
- druid-it-net:
- name: druid-it-net
- ipam:
- config:
- - subnet: 172.172.172.0/24
diff --git
a/integration-tests/docker/docker-compose.cds-task-schema-publish-disabled.yml
b/integration-tests/docker/docker-compose.cds-task-schema-publish-disabled.yml
deleted file mode 100644
index 65bbef6b238..00000000000
---
a/integration-tests/docker/docker-compose.cds-task-schema-publish-disabled.yml
+++ /dev/null
@@ -1,118 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-version: "2.2"
-services:
- druid-zookeeper-kafka:
- extends:
- file: docker-compose.base.yml
- service: druid-zookeeper-kafka
-
- druid-metadata-storage:
- extends:
- file: docker-compose.base.yml
- service: druid-metadata-storage
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - druid-zookeeper-kafka
-
- druid-coordinator:
- extends:
- file: docker-compose.base.yml
- service: druid-coordinator
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_centralizedDatasourceSchema_enabled=true
- - druid_centralizedDatasourceSchema_taskSchemaPublishDisabled=true
- - druid_centralizedDatasourceSchema_backFillEnabled=true
- - druid_centralizedDatasourceSchema_backFillPeriod=15000
- - druid_coordinator_segmentMetadata_metadataRefreshPeriod=PT15S
- - druid_manager_segments_useIncrementalCache=always
- depends_on:
- druid-overlord:
- condition: service_started
- druid-metadata-storage:
- condition: service_healthy
- druid-zookeeper-kafka:
- condition: service_started
-
- druid-overlord:
- extends:
- file: docker-compose.base.yml
- service: druid-overlord
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_centralizedDatasourceSchema_enabled=true
- - druid_centralizedDatasourceSchema_taskSchemaPublishDisabled=true
- - druid_manager_segments_useIncrementalCache=always
- depends_on:
- druid-metadata-storage:
- condition: service_healthy
- druid-zookeeper-kafka:
- condition: service_started
-
- druid-historical:
- extends:
- file: docker-compose.base.yml
- service: druid-historical
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - druid-zookeeper-kafka
-
- druid-middlemanager:
- extends:
- file: docker-compose.base.yml
- service: druid-middlemanager
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- -
druid_indexer_fork_property_druid_centralizedDatasourceSchema_enabled=true
- depends_on:
- - druid-zookeeper-kafka
- - druid-overlord
-
- druid-broker:
- extends:
- file: docker-compose.base.yml
- service: druid-broker
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- - druid_sql_planner_metadataRefreshPeriod=PT20S
- - druid_sql_planner_disableSegmentMetadataQueries=true
- depends_on:
- - druid-coordinator
- - druid-zookeeper-kafka
- - druid-middlemanager
- - druid-historical
-
- druid-router:
- extends:
- file: docker-compose.base.yml
- service: druid-router
- environment:
- - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
- depends_on:
- - druid-zookeeper-kafka
- - druid-coordinator
- - druid-broker
- - druid-overlord
-
-networks:
- druid-it-net:
- name: druid-it-net
- ipam:
- config:
- - subnet: 172.172.172.0/24
diff --git a/integration-tests/docker/druid.sh
b/integration-tests/docker/druid.sh
index 6d666af1a17..9478a9e9ef4 100755
--- a/integration-tests/docker/druid.sh
+++ b/integration-tests/docker/druid.sh
@@ -103,14 +103,9 @@ setupData()
# The "query" and "security" test groups require data to be setup before
running the tests.
# In particular, they requires segments to be download from a pre-existing
s3 bucket.
# This is done by using the loadSpec put into metadatastore and s3
credientials set below.
- if [ "$DRUID_INTEGRATION_TEST_GROUP" = "query" ] || [
"$DRUID_INTEGRATION_TEST_GROUP" = "security" ] || [
"$DRUID_INTEGRATION_TEST_GROUP" = "upgrade" ] || [
"$DRUID_INTEGRATION_TEST_GROUP" = "centralized-datasource-schema" ] || [
"$DRUID_INTEGRATION_TEST_GROUP" = "cds-task-schema-publish-disabled" ] || [
"$DRUID_INTEGRATION_TEST_GROUP" = "cds-coordinator-metadata-query-disabled" ];
then
+ if [ "$DRUID_INTEGRATION_TEST_GROUP" = "query" ] || [
"$DRUID_INTEGRATION_TEST_GROUP" = "security" ] || [
"$DRUID_INTEGRATION_TEST_GROUP" = "upgrade" ] || [
"$DRUID_INTEGRATION_TEST_GROUP" = "centralized-datasource-schema" ]; then
cat /test-data/${DRUID_INTEGRATION_TEST_GROUP}-sample-data.sql | mysql -u
root druid
fi
- # The SqlInputSource tests in the "input-source" test group require data to
be setup in MySQL before running the tests.
- if [ "$DRUID_INTEGRATION_TEST_GROUP" = "input-source" ] ; then
- echo "GRANT ALL ON sqlinputsource.* TO 'druid'@'%'; CREATE database
sqlinputsource DEFAULT CHARACTER SET utf8mb4;" | mysql -u root druid
- cat /test-data/sql-input-source-sample-data.sql | mysql -u root druid
- fi
service mariadb stop
}
diff --git
a/integration-tests/docker/environment-configs/test-groups/custom-coordinator-duties
b/integration-tests/docker/environment-configs/test-groups/custom-coordinator-duties
deleted file mode 100644
index cea6370c291..00000000000
---
a/integration-tests/docker/environment-configs/test-groups/custom-coordinator-duties
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# If you are making a change in load list below, make the necessary changes in
github actions too
-druid_extensions_loadList=["druid-kafka-indexing-service","mysql-metadata-storage","druid-s3-extensions","druid-basic-security","simple-client-sslcontext","druid-testing-tools","druid-lookups-cached-global","druid-histogram","druid-datasketches"]
-
-druid_coordinator_period_metadataStoreManagementPeriod=PT5S
-druid_sql_planner_authorizeSystemTablesDirectly=false
-
-#Testing kill supervisor custom coordinator duty
-druid_coordinator_kill_supervisor_on=false
-druid_coordinator_dutyGroups=["cleanupMetadata"]
-druid_coordinator_cleanupMetadata_duties=["killSupervisors"]
-druid_coordinator_cleanupMetadata_duty_killSupervisors_durationToRetain=PT0M
-druid_coordinator_cleanupMetadata_period=PT10S
diff --git
a/integration-tests/docker/test-data/cds-coordinator-metadata-query-disabled-sample-data.sql
b/integration-tests/docker/test-data/cds-coordinator-metadata-query-disabled-sample-data.sql
deleted file mode 100644
index abe0f115189..00000000000
---
a/integration-tests/docker/test-data/cds-coordinator-metadata-query-disabled-sample-data.sql
+++ /dev/null
@@ -1,20 +0,0 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements. See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the "License"); you may not use this file except in compliance with
--- the License. You may obtain a copy of the License at
---
--- http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-INSERT INTO druid_segments
(id,dataSource,created_date,start,end,partitioned,version,used,payload,used_status_last_updated)
VALUES
('twitterstream_2013-01-01T00:00:00.000Z_2013-01-02T00:00:00.000Z_2013-01-02T04:13:41.980Z_v9','twitterstream','2013-05-13T01:08:18.192Z','2013-01-01T00:00:00.000Z','2013-01-02T00:00:00.000Z',0,'2013-01-02T04:13:41.980Z_v9',1,'{\"dataSource\":\"twitterstream\",\"interval\":\"2013-01-01T00:00:00.000Z/2013-01-02T00:00:00.000Z\",\"version\":\"2013-01-02T04:13:41
[...]
-INSERT INTO druid_segments
(id,dataSource,created_date,start,end,partitioned,version,used,payload,used_status_last_updated)
VALUES
('twitterstream_2013-01-02T00:00:00.000Z_2013-01-03T00:00:00.000Z_2013-01-03T03:44:58.791Z_v9','twitterstream','2013-05-13T00:03:28.640Z','2013-01-02T00:00:00.000Z','2013-01-03T00:00:00.000Z',0,'2013-01-03T03:44:58.791Z_v9',1,'{\"dataSource\":\"twitterstream\",\"interval\":\"2013-01-02T00:00:00.000Z/2013-01-03T00:00:00.000Z\",\"version\":\"2013-01-03T03:44:58
[...]
-INSERT INTO druid_segments
(id,dataSource,created_date,start,end,partitioned,version,used,payload,used_status_last_updated)
VALUES
('twitterstream_2013-01-03T00:00:00.000Z_2013-01-04T00:00:00.000Z_2013-01-04T04:09:13.590Z_v9','twitterstream','2013-05-13T00:03:48.807Z','2013-01-03T00:00:00.000Z','2013-01-04T00:00:00.000Z',0,'2013-01-04T04:09:13.590Z_v9',1,'{\"dataSource\":\"twitterstream\",\"interval\":\"2013-01-03T00:00:00.000Z/2013-01-04T00:00:00.000Z\",\"version\":\"2013-01-04T04:09:13
[...]
-INSERT INTO druid_segments
(id,dataSource,created_date,start,end,partitioned,version,used,payload,used_status_last_updated)
VALUES
('wikipedia_editstream_2012-12-29T00:00:00.000Z_2013-01-10T08:00:00.000Z_2013-01-10T08:13:47.830Z_v9','wikipedia_editstream','2013-03-15T20:49:52.348Z','2012-12-29T00:00:00.000Z','2013-01-10T08:00:00.000Z',0,'2013-01-10T08:13:47.830Z_v9',1,'{\"dataSource\":\"wikipedia_editstream\",\"interval\":\"2012-12-29T00:00:00.000Z/2013-01-10T08:00:00.000Z\",\"version\":
[...]
-INSERT INTO druid_segments (id, dataSource, created_date, start, end,
partitioned, version, used, payload,used_status_last_updated) VALUES
('wikipedia_2013-08-01T00:00:00.000Z_2013-08-02T00:00:00.000Z_2013-08-08T21:22:48.989Z',
'wikipedia', '2013-08-08T21:26:23.799Z', '2013-08-01T00:00:00.000Z',
'2013-08-02T00:00:00.000Z', '0', '2013-08-08T21:22:48.989Z', '1',
'{\"dataSource\":\"wikipedia\",\"interval\":\"2013-08-01T00:00:00.000Z/2013-08-02T00:00:00.000Z\",\"version\":\"2013-08-08T21:22:
[...]
diff --git
a/integration-tests/docker/test-data/cds-task-schema-publish-disabled-sample-data.sql
b/integration-tests/docker/test-data/cds-task-schema-publish-disabled-sample-data.sql
deleted file mode 100644
index abe0f115189..00000000000
---
a/integration-tests/docker/test-data/cds-task-schema-publish-disabled-sample-data.sql
+++ /dev/null
@@ -1,20 +0,0 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements. See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the "License"); you may not use this file except in compliance with
--- the License. You may obtain a copy of the License at
---
--- http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
-
-INSERT INTO druid_segments
(id,dataSource,created_date,start,end,partitioned,version,used,payload,used_status_last_updated)
VALUES
('twitterstream_2013-01-01T00:00:00.000Z_2013-01-02T00:00:00.000Z_2013-01-02T04:13:41.980Z_v9','twitterstream','2013-05-13T01:08:18.192Z','2013-01-01T00:00:00.000Z','2013-01-02T00:00:00.000Z',0,'2013-01-02T04:13:41.980Z_v9',1,'{\"dataSource\":\"twitterstream\",\"interval\":\"2013-01-01T00:00:00.000Z/2013-01-02T00:00:00.000Z\",\"version\":\"2013-01-02T04:13:41
[...]
-INSERT INTO druid_segments
(id,dataSource,created_date,start,end,partitioned,version,used,payload,used_status_last_updated)
VALUES
('twitterstream_2013-01-02T00:00:00.000Z_2013-01-03T00:00:00.000Z_2013-01-03T03:44:58.791Z_v9','twitterstream','2013-05-13T00:03:28.640Z','2013-01-02T00:00:00.000Z','2013-01-03T00:00:00.000Z',0,'2013-01-03T03:44:58.791Z_v9',1,'{\"dataSource\":\"twitterstream\",\"interval\":\"2013-01-02T00:00:00.000Z/2013-01-03T00:00:00.000Z\",\"version\":\"2013-01-03T03:44:58
[...]
-INSERT INTO druid_segments
(id,dataSource,created_date,start,end,partitioned,version,used,payload,used_status_last_updated)
VALUES
('twitterstream_2013-01-03T00:00:00.000Z_2013-01-04T00:00:00.000Z_2013-01-04T04:09:13.590Z_v9','twitterstream','2013-05-13T00:03:48.807Z','2013-01-03T00:00:00.000Z','2013-01-04T00:00:00.000Z',0,'2013-01-04T04:09:13.590Z_v9',1,'{\"dataSource\":\"twitterstream\",\"interval\":\"2013-01-03T00:00:00.000Z/2013-01-04T00:00:00.000Z\",\"version\":\"2013-01-04T04:09:13
[...]
-INSERT INTO druid_segments
(id,dataSource,created_date,start,end,partitioned,version,used,payload,used_status_last_updated)
VALUES
('wikipedia_editstream_2012-12-29T00:00:00.000Z_2013-01-10T08:00:00.000Z_2013-01-10T08:13:47.830Z_v9','wikipedia_editstream','2013-03-15T20:49:52.348Z','2012-12-29T00:00:00.000Z','2013-01-10T08:00:00.000Z',0,'2013-01-10T08:13:47.830Z_v9',1,'{\"dataSource\":\"wikipedia_editstream\",\"interval\":\"2012-12-29T00:00:00.000Z/2013-01-10T08:00:00.000Z\",\"version\":
[...]
-INSERT INTO druid_segments (id, dataSource, created_date, start, end,
partitioned, version, used, payload,used_status_last_updated) VALUES
('wikipedia_2013-08-01T00:00:00.000Z_2013-08-02T00:00:00.000Z_2013-08-08T21:22:48.989Z',
'wikipedia', '2013-08-08T21:26:23.799Z', '2013-08-01T00:00:00.000Z',
'2013-08-02T00:00:00.000Z', '0', '2013-08-08T21:22:48.989Z', '1',
'{\"dataSource\":\"wikipedia\",\"interval\":\"2013-08-01T00:00:00.000Z/2013-08-02T00:00:00.000Z\",\"version\":\"2013-08-08T21:22:
[...]
diff --git a/integration-tests/script/docker_compose_args.sh
b/integration-tests/script/docker_compose_args.sh
index 87475239e62..fcf5bdafa7c 100644
--- a/integration-tests/script/docker_compose_args.sh
+++ b/integration-tests/script/docker_compose_args.sh
@@ -49,14 +49,6 @@ getComposeArgs()
then
# cluster with overriden properties for broker and coordinator
echo "-f ${DOCKERDIR}/docker-compose.centralized-datasource-schema.yml"
- elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "cds-task-schema-publish-disabled" ]
- then
- # cluster with overriden properties for broker and coordinator
- echo "-f
${DOCKERDIR}/docker-compose.cds-task-schema-publish-disabled.yml"
- elif [ "$DRUID_INTEGRATION_TEST_GROUP" =
"cds-coordinator-metadata-query-disabled" ]
- then
- # cluster with overriden properties for broker and coordinator
- echo "-f
${DOCKERDIR}/docker-compose.cds-coordinator-metadata-query-disabled.yml"
else
# default
echo "-f ${DOCKERDIR}/docker-compose.yml"
diff --git
a/integration-tests/src/test/java/org/apache/druid/tests/TestNGGroup.java
b/integration-tests/src/test/java/org/apache/druid/tests/TestNGGroup.java
index efdd26de20a..60600d633d2 100644
--- a/integration-tests/src/test/java/org/apache/druid/tests/TestNGGroup.java
+++ b/integration-tests/src/test/java/org/apache/druid/tests/TestNGGroup.java
@@ -33,8 +33,6 @@ public class TestNGGroup
public static final String TRANSACTIONAL_KAFKA_INDEX_SLOW =
"kafka-transactional-index-slow";
- public static final String APPEND_INGESTION = "append-ingestion";
-
/**
* This group can only be run individually using -Dgroups=query since it
requires specific test data setup.
*/
@@ -115,11 +113,5 @@ public class TestNGGroup
*/
public static final String KINESIS_DATA_FORMAT = "kinesis-data-format";
- public static final String CUSTOM_COORDINATOR_DUTIES =
"custom-coordinator-duties";
-
public static final String CENTRALIZED_DATASOURCE_SCHEMA =
"centralized-datasource-schema";
-
- public static final String CDS_TASK_SCHEMA_PUBLISH_DISABLED =
"cds-task-schema-publish-disabled";
-
- public static final String CDS_COORDINATOR_METADATA_QUERY_DISABLED =
"cds-coordinator-metadata-query-disabled";
}
diff --git
a/integration-tests/src/test/java/org/apache/druid/tests/indexer/ITAppendBatchIndexTest.java
b/integration-tests/src/test/java/org/apache/druid/tests/indexer/ITAppendBatchIndexTest.java
deleted file mode 100644
index ced35949626..00000000000
---
a/integration-tests/src/test/java/org/apache/druid/tests/indexer/ITAppendBatchIndexTest.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.druid.tests.indexer;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import org.apache.druid.indexer.partitions.DynamicPartitionsSpec;
-import org.apache.druid.indexer.partitions.HashedPartitionsSpec;
-import org.apache.druid.indexer.partitions.PartitionsSpec;
-import org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec;
-import org.apache.druid.java.util.common.Pair;
-import org.apache.druid.java.util.common.StringUtils;
-import org.apache.druid.java.util.common.logger.Logger;
-import org.apache.druid.testing.guice.DruidTestModuleFactory;
-import org.apache.druid.tests.TestNGGroup;
-import org.testng.annotations.DataProvider;
-import org.testng.annotations.Guice;
-import org.testng.annotations.Test;
-
-import java.io.Closeable;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.function.Function;
-
-@Test(groups = {TestNGGroup.APPEND_INGESTION,
TestNGGroup.CDS_TASK_SCHEMA_PUBLISH_DISABLED,
TestNGGroup.CDS_COORDINATOR_METADATA_QUERY_DISABLED})
-@Guice(moduleFactory = DruidTestModuleFactory.class)
-public class ITAppendBatchIndexTest extends AbstractITBatchIndexTest
-{
- private static final Logger LOG = new Logger(ITAppendBatchIndexTest.class);
- private static final String INDEX_TASK =
"/indexer/wikipedia_local_input_source_index_task.json";
- // This query file is for the initial ingestion which is one complete
dataset with roll up
- private static final String INDEX_QUERIES_INITIAL_INGESTION_RESOURCE =
"/indexer/wikipedia_index_queries.json";
- // This query file is for the initial ingestion plus the append ingestion
which are two complete dataset with roll
- // up within each dataset (roll up within the initial ingestion and roll up
within the append ingestion but not
- // roll up across both dataset).
- private static final String INDEX_QUERIES_POST_APPEND_PRE_COMPACT_RESOURCE =
"/indexer/wikipedia_double_ingestion_non_perfect_rollup_index_queries.json";
- // This query file is for the initial ingestion plus the append ingestion
plus a compaction task after the two ingestions.
- // This is two complete dataset with perfect roll up across both dataset.
- private static final String INDEX_QUERIES_POST_APPEND_POST_COMPACT_RESOURCE
= "/indexer/wikipedia_double_ingestion_perfect_rollup_index_queries.json";
-
- private static final String COMPACTION_TASK =
"/indexer/wikipedia_compaction_task.json";
-
- @DataProvider
- public static Object[][] resources()
- {
- return new Object[][]{
- // First index with dynamically-partitioned then append
dynamically-partitioned
- {
- ImmutableList.of(
- new DynamicPartitionsSpec(null, null),
- new DynamicPartitionsSpec(null, null)
- ),
- ImmutableList.of(4, 8, 2)
- },
- // First index with hash-partitioned then append
dynamically-partitioned
- {
- ImmutableList.of(
- new HashedPartitionsSpec(null, 3, ImmutableList.of("page",
"user")),
- new DynamicPartitionsSpec(null, null)
- ),
- ImmutableList.of(6, 10, 2)
- },
- // First index with range-partitioned then append
dynamically-partitioned
- {
- ImmutableList.of(
- new SingleDimensionPartitionsSpec(1000, null, "page", false),
- new DynamicPartitionsSpec(null, null)
- ),
- ImmutableList.of(2, 6, 2)
- }
- };
- }
-
- @Test(dataProvider = "resources")
- public void doIndexTest(List<PartitionsSpec> partitionsSpecList,
List<Integer> expectedSegmentCountList) throws Exception
- {
- final String indexDatasource = "wikipedia_index_test_" + UUID.randomUUID();
- try (
- final Closeable ignored1 = unloader(indexDatasource +
config.getExtraDatasourceNameSuffix());
- ) {
- // Submit initial ingestion task
- submitIngestionTaskAndVerify(indexDatasource, partitionsSpecList.get(0),
false, new Pair<>(false, false));
- verifySegmentsCountAndLoaded(indexDatasource,
expectedSegmentCountList.get(0));
- doTestQuery(indexDatasource, INDEX_QUERIES_INITIAL_INGESTION_RESOURCE);
- // Submit append ingestion task
- submitIngestionTaskAndVerify(indexDatasource, partitionsSpecList.get(1),
true, new Pair<>(false, false));
- verifySegmentsCountAndLoaded(indexDatasource,
expectedSegmentCountList.get(1));
- doTestQuery(indexDatasource,
INDEX_QUERIES_POST_APPEND_PRE_COMPACT_RESOURCE);
- // Submit compaction task
- compactData(indexDatasource, COMPACTION_TASK);
- // Verification post compaction
- verifySegmentsCountAndLoaded(indexDatasource,
expectedSegmentCountList.get(2));
- verifySegmentsCompacted(indexDatasource,
expectedSegmentCountList.get(2));
- doTestQuery(indexDatasource,
INDEX_QUERIES_POST_APPEND_POST_COMPACT_RESOURCE);
- }
- }
-
- private void submitIngestionTaskAndVerify(
- String indexDatasource,
- PartitionsSpec partitionsSpec,
- boolean appendToExisting,
- Pair<Boolean, Boolean> segmentAvailabilityConfirmationPair
- ) throws Exception
- {
- InputFormatDetails inputFormatDetails = InputFormatDetails.JSON;
- Map inputFormatMap = new ImmutableMap.Builder<String,
Object>().put("type", inputFormatDetails.getInputFormatType())
- .build();
- final Function<String, String> sqlInputSourcePropsTransform = spec -> {
- try {
- spec = StringUtils.replace(
- spec,
- "%%PARTITIONS_SPEC%%",
- jsonMapper.writeValueAsString(partitionsSpec)
- );
- spec = StringUtils.replace(
- spec,
- "%%INPUT_SOURCE_FILTER%%",
- "*" + inputFormatDetails.getFileExtension()
- );
- spec = StringUtils.replace(
- spec,
- "%%INPUT_SOURCE_BASE_DIR%%",
- "/resources/data/batch_index" +
inputFormatDetails.getFolderSuffix()
- );
- spec = StringUtils.replace(
- spec,
- "%%INPUT_FORMAT%%",
- jsonMapper.writeValueAsString(inputFormatMap)
- );
- spec = StringUtils.replace(
- spec,
- "%%APPEND_TO_EXISTING%%",
- jsonMapper.writeValueAsString(appendToExisting)
- );
- spec = StringUtils.replace(
- spec,
- "%%DROP_EXISTING%%",
- jsonMapper.writeValueAsString(false)
- );
- if (partitionsSpec instanceof DynamicPartitionsSpec) {
- spec = StringUtils.replace(
- spec,
- "%%FORCE_GUARANTEED_ROLLUP%%",
- jsonMapper.writeValueAsString(false)
- );
- } else if (partitionsSpec instanceof HashedPartitionsSpec ||
partitionsSpec instanceof SingleDimensionPartitionsSpec) {
- spec = StringUtils.replace(
- spec,
- "%%FORCE_GUARANTEED_ROLLUP%%",
- jsonMapper.writeValueAsString(true)
- );
- }
- return spec;
- }
- catch (Exception e) {
- throw new RuntimeException(e);
- }
- };
-
- doIndexTest(
- indexDatasource,
- INDEX_TASK,
- sqlInputSourcePropsTransform,
- null,
- false,
- false,
- true,
- segmentAvailabilityConfirmationPair
- );
- }
-}
diff --git
a/integration-tests/src/test/java/org/apache/druid/tests/parallelized/ITCustomCoordinatorDuties.java
b/integration-tests/src/test/java/org/apache/druid/tests/parallelized/ITCustomCoordinatorDuties.java
deleted file mode 100644
index 61635e5c758..00000000000
---
a/integration-tests/src/test/java/org/apache/druid/tests/parallelized/ITCustomCoordinatorDuties.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.druid.tests.parallelized;
-
-import org.apache.druid.testing.guice.DruidTestModuleFactory;
-import org.apache.druid.tests.TestNGGroup;
-import org.apache.druid.tests.indexer.AbstractKafkaIndexingServiceTest;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Guice;
-import org.testng.annotations.Test;
-
-@Test(groups = TestNGGroup.CUSTOM_COORDINATOR_DUTIES)
-@Guice(moduleFactory = DruidTestModuleFactory.class)
-public class ITCustomCoordinatorDuties extends AbstractKafkaIndexingServiceTest
-{
- @Override
- public String getTestNamePrefix()
- {
- return "kafka_non_transactional_parallelized";
- }
-
- @BeforeClass
- public void beforeClass() throws Exception
- {
- doBeforeClass();
- }
-
- /**
- * This test verify the kill supervisor custom coordinator duties which is
enabled using the
- * custom coordinator pluggable configurations.
- */
- @Test
- public void testKafkaTerminatedSupervisorAutoCleanup() throws Exception
- {
- doTestTerminatedSupervisorAutoCleanup(false);
- }
-}
diff --git
a/integration-tests/src/test/resources/indexer/wikipedia_compaction_task.json
b/integration-tests/src/test/resources/indexer/wikipedia_compaction_task.json
deleted file mode 100644
index fb620c11aa2..00000000000
---
a/integration-tests/src/test/resources/indexer/wikipedia_compaction_task.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
- "type" : "compact",
- "dataSource" : "%%DATASOURCE%%",
- "ioConfig" : {
- "type": "compact",
- "inputSpec": {
- "type": "interval",
- "interval": "2013-08-31/2013-09-02"
- }
- },
- "context" : {
- "storeCompactionState" : true
- }
-}
\ No newline at end of file
diff --git
a/integration-tests/src/test/resources/indexer/wikipedia_double_ingestion_non_perfect_rollup_index_queries.json
b/integration-tests/src/test/resources/indexer/wikipedia_double_ingestion_non_perfect_rollup_index_queries.json
deleted file mode 100644
index 586da63e3db..00000000000
---
a/integration-tests/src/test/resources/indexer/wikipedia_double_ingestion_non_perfect_rollup_index_queries.json
+++ /dev/null
@@ -1,143 +0,0 @@
-[
- {
- "description": "timeseries, 1 agg, all",
- "query":{
- "queryType" : "timeBoundary",
- "dataSource": "%%DATASOURCE%%"
- },
- "expectedResults":[
- {
- "timestamp" : "2013-08-31T01:02:33.000Z",
- "result" : {
- "minTime" : "2013-08-31T01:02:33.000Z",
- "maxTime" : "2013-09-01T12:41:27.000Z"
- }
- }
- ]
- },
- {
- "description": "timeseries, datasketch aggs, all",
- "query":{
- "queryType" : "timeseries",
- "dataSource": "%%DATASOURCE%%",
- "granularity":"day",
- "intervals":[
- "2013-08-31T00:00/2013-09-01T00:00"
- ],
- "filter":null,
- "aggregations":[
- {
- "type": "HLLSketchMerge",
- "name": "approxCountHLL",
- "fieldName": "HLLSketchBuild",
- "lgK": 12,
- "tgtHllType": "HLL_4",
- "round": true
- },
- {
- "type":"thetaSketch",
- "name":"approxCountTheta",
- "fieldName":"thetaSketch",
- "size":16384,
- "shouldFinalize":true,
- "isInputThetaSketch":false,
- "errorBoundsStdDev":null
- },
- {
- "type":"quantilesDoublesSketch",
- "name":"quantilesSketch",
- "fieldName":"quantilesDoublesSketch",
- "k":128
- }
- ]
- },
- "expectedResults":[
- {
- "timestamp" : "2013-08-31T00:00:00.000Z",
- "result" : {
- "quantilesSketch":10,
- "approxCountTheta":5.0,
- "approxCountHLL":5
- }
- }
- ]
- },
- {
- "description":"having spec on post aggregation",
- "query":{
- "queryType":"groupBy",
- "dataSource":"%%DATASOURCE%%",
- "granularity":"day",
- "dimensions":[
- "page"
- ],
- "filter":{
- "type":"selector",
- "dimension":"language",
- "value":"zh"
- },
- "aggregations":[
- {
- "type":"count",
- "name":"rows"
- },
- {
- "type":"longSum",
- "fieldName":"added",
- "name":"added_count"
- }
- ],
- "postAggregations": [
- {
- "type":"arithmetic",
- "name":"added_count_times_ten",
- "fn":"*",
- "fields":[
- {"type":"fieldAccess", "name":"added_count",
"fieldName":"added_count"},
- {"type":"constant", "name":"const", "value":10}
- ]
- }
- ],
- "having":{"type":"greaterThan", "aggregation":"added_count_times_ten",
"value":9000},
- "intervals":[
- "2013-08-31T00:00/2013-09-01T00:00"
- ]
- },
- "expectedResults":[ {
- "version" : "v1",
- "timestamp" : "2013-08-31T00:00:00.000Z",
- "event" : {
- "added_count_times_ten" : 18100.0,
- "page" : "Crimson Typhoon",
- "added_count" : 1810,
- "rows" : 2
- }
- } ]
- },
- {
- "description": "timeseries, count aggs, all",
- "query":{
- "queryType" : "timeseries",
- "dataSource": "%%DATASOURCE%%",
- "granularity":"day",
- "intervals":[
- "2013-08-31T00:00/2013-09-01T00:00"
- ],
- "filter":null,
- "aggregations":[
- {
- "type": "count",
- "name": "rows"
- }
- ]
- },
- "expectedResults":[
- {
- "timestamp" : "2013-08-31T00:00:00.000Z",
- "result" : {
- "rows":10
- }
- }
- ]
- }
-]
\ No newline at end of file
diff --git
a/integration-tests/src/test/resources/indexer/wikipedia_double_ingestion_perfect_rollup_index_queries.json
b/integration-tests/src/test/resources/indexer/wikipedia_double_ingestion_perfect_rollup_index_queries.json
deleted file mode 100644
index eaa9592ca26..00000000000
---
a/integration-tests/src/test/resources/indexer/wikipedia_double_ingestion_perfect_rollup_index_queries.json
+++ /dev/null
@@ -1,143 +0,0 @@
-[
- {
- "description": "timeseries, 1 agg, all",
- "query":{
- "queryType" : "timeBoundary",
- "dataSource": "%%DATASOURCE%%"
- },
- "expectedResults":[
- {
- "timestamp" : "2013-08-31T01:02:33.000Z",
- "result" : {
- "minTime" : "2013-08-31T01:02:33.000Z",
- "maxTime" : "2013-09-01T12:41:27.000Z"
- }
- }
- ]
- },
- {
- "description": "timeseries, datasketch aggs, all",
- "query":{
- "queryType" : "timeseries",
- "dataSource": "%%DATASOURCE%%",
- "granularity":"day",
- "intervals":[
- "2013-08-31T00:00/2013-09-01T00:00"
- ],
- "filter":null,
- "aggregations":[
- {
- "type": "HLLSketchMerge",
- "name": "approxCountHLL",
- "fieldName": "HLLSketchBuild",
- "lgK": 12,
- "tgtHllType": "HLL_4",
- "round": true
- },
- {
- "type":"thetaSketch",
- "name":"approxCountTheta",
- "fieldName":"thetaSketch",
- "size":16384,
- "shouldFinalize":true,
- "isInputThetaSketch":false,
- "errorBoundsStdDev":null
- },
- {
- "type":"quantilesDoublesSketch",
- "name":"quantilesSketch",
- "fieldName":"quantilesDoublesSketch",
- "k":128
- }
- ]
- },
- "expectedResults":[
- {
- "timestamp" : "2013-08-31T00:00:00.000Z",
- "result" : {
- "quantilesSketch":10,
- "approxCountTheta":5.0,
- "approxCountHLL":5
- }
- }
- ]
- },
- {
- "description":"having spec on post aggregation",
- "query":{
- "queryType":"groupBy",
- "dataSource":"%%DATASOURCE%%",
- "granularity":"day",
- "dimensions":[
- "page"
- ],
- "filter":{
- "type":"selector",
- "dimension":"language",
- "value":"zh"
- },
- "aggregations":[
- {
- "type":"count",
- "name":"rows"
- },
- {
- "type":"longSum",
- "fieldName":"added",
- "name":"added_count"
- }
- ],
- "postAggregations": [
- {
- "type":"arithmetic",
- "name":"added_count_times_ten",
- "fn":"*",
- "fields":[
- {"type":"fieldAccess", "name":"added_count",
"fieldName":"added_count"},
- {"type":"constant", "name":"const", "value":10}
- ]
- }
- ],
- "having":{"type":"greaterThan", "aggregation":"added_count_times_ten",
"value":9000},
- "intervals":[
- "2013-08-31T00:00/2013-09-01T00:00"
- ]
- },
- "expectedResults":[ {
- "version" : "v1",
- "timestamp" : "2013-08-31T00:00:00.000Z",
- "event" : {
- "added_count_times_ten" : 18100.0,
- "page" : "Crimson Typhoon",
- "added_count" : 1810,
- "rows" : 1
- }
- } ]
- },
- {
- "description": "timeseries, count aggs, all",
- "query":{
- "queryType" : "timeseries",
- "dataSource": "%%DATASOURCE%%",
- "granularity":"day",
- "intervals":[
- "2013-08-31T00:00/2013-09-01T00:00"
- ],
- "filter":null,
- "aggregations":[
- {
- "type": "count",
- "name": "rows"
- }
- ]
- },
- "expectedResults":[
- {
- "timestamp" : "2013-08-31T00:00:00.000Z",
- "result" : {
- "rows":5
- }
- }
- ]
- }
-]
\ No newline at end of file
diff --git
a/integration-tests/src/test/resources/indexer/wikipedia_local_input_source_index_task.json
b/integration-tests/src/test/resources/indexer/wikipedia_local_input_source_index_task.json
deleted file mode 100644
index ee0fd73021a..00000000000
---
a/integration-tests/src/test/resources/indexer/wikipedia_local_input_source_index_task.json
+++ /dev/null
@@ -1,89 +0,0 @@
-{
- "type": "index_parallel",
- "spec": {
- "dataSchema": {
- "dataSource": "%%DATASOURCE%%",
- "timestampSpec": {
- "column": "timestamp"
- },
- "dimensionsSpec": {
- "dimensions": [
- "page",
- {"type": "string", "name": "language", "createBitmapIndex": false},
- "user",
- "unpatrolled",
- "newPage",
- "robot",
- "anonymous",
- "namespace",
- "continent",
- "country",
- "region",
- "city"
- ]
- },
- "metricsSpec": [
- {
- "type": "count",
- "name": "count"
- },
- {
- "type": "doubleSum",
- "name": "added",
- "fieldName": "added"
- },
- {
- "type": "doubleSum",
- "name": "deleted",
- "fieldName": "deleted"
- },
- {
- "type": "doubleSum",
- "name": "delta",
- "fieldName": "delta"
- },
- {
- "name": "thetaSketch",
- "type": "thetaSketch",
- "fieldName": "user"
- },
- {
- "name": "quantilesDoublesSketch",
- "type": "quantilesDoublesSketch",
- "fieldName": "delta"
- },
- {
- "name": "HLLSketchBuild",
- "type": "HLLSketchBuild",
- "fieldName": "user"
- }
- ],
- "granularitySpec": {
- "segmentGranularity": "DAY",
- "queryGranularity": "second",
- "intervals" : [ "2013-08-31/2013-09-02" ]
- }
- },
- "ioConfig": {
- "type": "index_parallel",
- "inputSource": {
- "type": "local",
- "filter" : "%%INPUT_SOURCE_FILTER%%",
- "baseDir": "%%INPUT_SOURCE_BASE_DIR%%"
- },
- "appendToExisting": %%APPEND_TO_EXISTING%%,
- "dropExisting": %%DROP_EXISTING%%,
- "inputFormat": %%INPUT_FORMAT%%
- },
- "tuningConfig": {
- "type": "index_parallel",
- "maxNumConcurrentSubTasks": 4,
- "splitHintSpec": {
- "type": "maxSize",
- "maxNumFiles": 1
- },
- "forceGuaranteedRollup": %%FORCE_GUARANTEED_ROLLUP%%,
- "partitionsSpec": %%PARTITIONS_SPEC%%
- }
- }
-}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]