This is an automated email from the ASF dual-hosted git repository.

capistrant pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git


The following commit(s) were added to refs/heads/master by this push:
     new e9fb5493f98 Improve revised integration tests for local use by devs 
(#17838)
e9fb5493f98 is described below

commit e9fb5493f98c9110b88c0608f9a76cd01329d35f
Author: Lucas Capistrant <[email protected]>
AuthorDate: Fri Apr 4 08:30:11 2025 -0500

    Improve revised integration tests for local use by devs (#17838)
    
    * Improve revised integration tests for local use by devs
    
    The goal of this work is to make running the revised ITs locally easier for 
devs while not impacting the robustness of ITs running in Github actions CI 
pipeline
    
    * Internalize dependency startup conditional within template.py
    
    Having dependency-service startup conditional setup use a centralized 
lookup with a sane default simplifies individual IT cases and will drive 
consistency across all existing and future IT cases
    
    * Remove the db dir from shared dir and stop incorrectly mounting a mysql 
docker entrypoint script
    
    The mysql docker container allows you to prive startup scripts for the 
database in this entrypoint directory. We were nmounting a directory named like 
a file, and it confuses the docker startup for mysql foring a restart on 
startup. This restart is now problematic since we are telling docker-compose to 
wait for mysql health before starting services that depend on it. Since we 
aren't using the startup script mechanism at all, simply removing it seems like 
cleanest strategy
    
    * Clean up defunct comment
    
    This comment has lost meaning after previous change to remove the init dir 
db for mysql as it is not needed
---
 integration-tests-ex/cases/cluster.sh              |   4 +-
 .../cluster/AzureDeepStorage/docker-compose.py     |   4 +
 .../docker-compose.py                              |  27 +++--
 .../BackwardCompatibilityMain/docker-compose.yaml  | 111 ---------------------
 .../cases/cluster/BatchIndex/docker-compose.py     |   8 +-
 .../cases/cluster/Common/dependencies.yaml         |   9 +-
 .../cases/cluster/GcsDeepStorage/docker-compose.py |   4 +
 .../cluster/HighAvailability/docker-compose.py     |  10 +-
 .../cluster/MultiStageQuery/docker-compose.py      |   4 +
 .../docker-compose.py                              |  27 +++--
 .../MultiStageQueryWithMM/docker-compose.yaml      |  98 ------------------
 .../{MultiStageQuery => Query}/docker-compose.py   |  17 +++-
 .../cases/cluster/Query/docker-compose.yaml        | 105 -------------------
 .../cases/cluster/S3DeepStorage/docker-compose.py  |   8 +-
 .../cases/cluster/Security/docker-compose.py       |   4 +
 integration-tests-ex/cases/cluster/template.py     |  38 ++++++-
 .../druid/testsEx/auth/ITSecurityBasicQuery.java   |  24 +++--
 17 files changed, 138 insertions(+), 364 deletions(-)

diff --git a/integration-tests-ex/cases/cluster.sh 
b/integration-tests-ex/cases/cluster.sh
index bc626a559c4..559392cb14f 100755
--- a/integration-tests-ex/cases/cluster.sh
+++ b/integration-tests-ex/cases/cluster.sh
@@ -128,13 +128,11 @@ function show_status {
 
 function build_shared_dir {
   mkdir -p $SHARED_DIR
-  # Must start with an empty DB to keep MySQL happy
-  sudo rm -rf $SHARED_DIR/db
   mkdir -p $SHARED_DIR/logs
   mkdir -p $SHARED_DIR/tasklogs
-  mkdir -p $SHARED_DIR/db
   mkdir -p $SHARED_DIR/kafka
   mkdir -p $SHARED_DIR/resources
+  mkdir -p $SHARED_DIR/export
   cp $BASE_MODULE_DIR/assets/log4j2.xml $SHARED_DIR/resources
   # Permissions in some build setups are screwed up. See above. The user
   # which runs Docker does not have permission to write into the /shared
diff --git 
a/integration-tests-ex/cases/cluster/AzureDeepStorage/docker-compose.py 
b/integration-tests-ex/cases/cluster/AzureDeepStorage/docker-compose.py
index 3c893832a3a..db255d53db6 100644
--- a/integration-tests-ex/cases/cluster/AzureDeepStorage/docker-compose.py
+++ b/integration-tests-ex/cases/cluster/AzureDeepStorage/docker-compose.py
@@ -40,4 +40,8 @@ class Template(BaseTemplate):
     def define_data_dir(self, service):
         self.add_volume(service, '../data', '/resources')
 
+    # No kafka dependency in this cluster
+    def define_kafka(self):
+        pass
+
 generate(__file__, Template())
diff --git a/integration-tests-ex/cases/cluster/Security/docker-compose.py 
b/integration-tests-ex/cases/cluster/BackwardCompatibilityMain/docker-compose.py
similarity index 50%
copy from integration-tests-ex/cases/cluster/Security/docker-compose.py
copy to 
integration-tests-ex/cases/cluster/BackwardCompatibilityMain/docker-compose.py
index 452026750bc..37ab70e7538 100644
--- a/integration-tests-ex/cases/cluster/Security/docker-compose.py
+++ 
b/integration-tests-ex/cases/cluster/BackwardCompatibilityMain/docker-compose.py
@@ -14,19 +14,26 @@
 # limitations under the License.
 
 from template import BaseTemplate, generate
+from template import COORDINATOR, MIDDLE_MANAGER
 
 class Template(BaseTemplate):
 
-    def define_indexer(self):
-        service = super().define_indexer()
-        self.add_property(service, 'druid.msq.intermediate.storage.enable', 
'true')
-        self.add_property(service, 'druid.msq.intermediate.storage.type', 
'local')
-        self.add_property(service, 'druid.msq.intermediate.storage.basePath', 
'/shared/durablestorage/')
-        self.add_property(service, 'druid.export.storage.baseDir', '/')
 
-    def extend_druid_service(self, service):
-        self.add_env_file(service, '../Common/environment-configs/auth.env')
-        self.add_env(service, 'druid_test_loadList', 'druid-basic-security')
+    def define_coordinator(self):
+        service_name = COORDINATOR
+        service = self.define_master_service(service_name, COORDINATOR)
+        self.add_env(service, 'druid_host', service_name)
+        self.add_env(service, 'druid_manager_segments_pollDuration', 'PT5S')
+        self.add_env(service, 'druid_coordinator_period', 'PT10S')
 
+    def define_indexer(self):
+        '''
+        Override the indexer to MIDDLE_MANAGER
+        '''
+        service = self.define_std_indexer(MIDDLE_MANAGER)
+        self.add_env(service, 'druid_msq_intermediate_storage_enable', 'true')
+        self.add_env(service, 'druid_msq_intermediate_storage_type', 'local')
+        self.add_env(service, 'druid_msq_intermediate_storage_basePath', 
'/shared/durablestorage/')
+        self.add_env(service, 'druid_export_storage_baseDir', '/')
 
-generate(__file__, Template())
+generate(__file__, Template())
\ No newline at end of file
diff --git 
a/integration-tests-ex/cases/cluster/BackwardCompatibilityMain/docker-compose.yaml
 
b/integration-tests-ex/cases/cluster/BackwardCompatibilityMain/docker-compose.yaml
deleted file mode 100644
index 4fbf0f71197..00000000000
--- 
a/integration-tests-ex/cases/cluster/BackwardCompatibilityMain/docker-compose.yaml
+++ /dev/null
@@ -1,111 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-networks:
-  druid-it-net:
-    name: druid-it-net
-    ipam:
-      config:
-        - subnet: 172.172.172.0/24
-
-services:
-  zookeeper:
-    extends:
-      file: ../Common/dependencies.yaml
-      service: zookeeper
-
-  metadata:
-    extends:
-      file: ../Common/dependencies.yaml
-      service: metadata
-
-  coordinator:
-    extends:
-      file: ../Common/druid.yaml
-      service: coordinator
-    image: ${DRUID_PREVIOUS_IT_IMAGE_NAME}
-    container_name: coordinator
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-      # The frequency with which the coordinator polls the database
-      # for changes. The DB population code has to wait at least this
-      # long for the coordinator to notice changes.
-      - druid_manager_segments_pollDuration=PT5S
-      - druid_coordinator_period=PT10S
-    depends_on:
-      - zookeeper
-      - metadata
-
-  overlord:
-    extends:
-      file: ../Common/druid.yaml
-      service: overlord
-    image: ${DRUID_PREVIOUS_IT_IMAGE_NAME}
-    container_name: overlord
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-    depends_on:
-      - zookeeper
-      - metadata
-
-  broker:
-    extends:
-      file: ../Common/druid.yaml
-      service: broker
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-    depends_on:
-      - zookeeper
-
-  router:
-    extends:
-      file: ../Common/druid.yaml
-      service: router
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-    depends_on:
-      - zookeeper
-
-  historical:
-    extends:
-      file: ../Common/druid.yaml
-      service: historical
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-    depends_on:
-      - zookeeper
-
-  middlemanager:
-    extends:
-      file: ../Common/druid.yaml
-      service: middlemanager
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-      - druid_msq_intermediate_storage_enable=true
-      - druid_msq_intermediate_storage_type=local
-      - druid_msq_intermediate_storage_basePath=/shared/durablestorage/
-      - druid_export_storage_baseDir=/
-    volumes:
-      # Test data
-      - ../../resources:/resources
-    depends_on:
-      - zookeeper
-
-  kafka:
-    extends:
-      file: ../Common/dependencies.yaml
-      service: kafka
-    depends_on:
-      - zookeeper
diff --git a/integration-tests-ex/cases/cluster/BatchIndex/docker-compose.py 
b/integration-tests-ex/cases/cluster/BatchIndex/docker-compose.py
index 27f8acdbffb..8129e0fb76a 100644
--- a/integration-tests-ex/cases/cluster/BatchIndex/docker-compose.py
+++ b/integration-tests-ex/cases/cluster/BatchIndex/docker-compose.py
@@ -15,4 +15,10 @@
 
 from template import BaseTemplate, generate
 
-generate(__file__, BaseTemplate())
+class Template(BaseTemplate):
+
+    # No kafka dependency in this cluster
+    def define_kafka(self):
+        pass
+
+generate(__file__, Template())
diff --git a/integration-tests-ex/cases/cluster/Common/dependencies.yaml 
b/integration-tests-ex/cases/cluster/Common/dependencies.yaml
index 0409c30bf53..745da3b407c 100644
--- a/integration-tests-ex/cases/cluster/Common/dependencies.yaml
+++ b/integration-tests-ex/cases/cluster/Common/dependencies.yaml
@@ -86,13 +86,18 @@ services:
         ipv4_address: 172.172.172.3
     ports:
       - 3306:3306
-    volumes:
-      - ${SHARED_DIR}/db/init.sql:/docker-entrypoint-initdb.d/init.sql
     environment:
       MYSQL_ROOT_PASSWORD: driud
       MYSQL_DATABASE: druid
       MYSQL_USER: druid
       MYSQL_PASSWORD: diurd
+    healthcheck:
+      test: "mysql -uroot -pdriud druid -e 'select 1'"
+      interval: 5s
+      timeout: 3s
+      retries: 60
+      start_period: 3m
+      start_interval: 15s
 
   minio:
     container_name: minio
diff --git 
a/integration-tests-ex/cases/cluster/GcsDeepStorage/docker-compose.py 
b/integration-tests-ex/cases/cluster/GcsDeepStorage/docker-compose.py
index 7f91ce021e7..588866b1258 100644
--- a/integration-tests-ex/cases/cluster/GcsDeepStorage/docker-compose.py
+++ b/integration-tests-ex/cases/cluster/GcsDeepStorage/docker-compose.py
@@ -42,4 +42,8 @@ class Template(BaseTemplate):
     def define_data_dir(self, service):
         self.add_volume(service, '../data', '/resources')
 
+    # No kafka dependency in this cluster
+    def define_kafka(self):
+        pass
+
 generate(__file__, Template())
diff --git 
a/integration-tests-ex/cases/cluster/HighAvailability/docker-compose.py 
b/integration-tests-ex/cases/cluster/HighAvailability/docker-compose.py
index b00fc0fa1f2..c958cbcaf15 100644
--- a/integration-tests-ex/cases/cluster/HighAvailability/docker-compose.py
+++ b/integration-tests-ex/cases/cluster/HighAvailability/docker-compose.py
@@ -39,7 +39,7 @@ class Template(BaseTemplate):
         self.add_env(service, 'DRUID_INSTANCE', 'two')
         self.add_env(service, 'druid_host', service_name)
         service['ports'] = [ '18081:8081', '18281:8281', '15006:8000' ]
-        self.add_depends(service, [ ZOO_KEEPER, METADATA ] )
+        self.add_depends(service, [ZOO_KEEPER, METADATA])
 
     def define_overlord(self):
         self.define_overlord_one()
@@ -59,7 +59,7 @@ class Template(BaseTemplate):
         self.add_env(service, 'DRUID_INSTANCE', 'two')
         self.add_env(service, 'druid_host', service_name)
         service['ports'] = [ '18090:8090', '18290:8290', '15009:8000' ]
-        self.add_depends(service, [ ZOO_KEEPER, METADATA ] )
+        self.add_depends(service, [ZOO_KEEPER, METADATA])
 
     # No indexer in this cluster
     def define_indexer(self):
@@ -69,6 +69,10 @@ class Template(BaseTemplate):
     def define_historical(self):
         pass
 
+    # No kafka dependency in this cluster
+    def define_kafka(self):
+      pass
+
     # The custom node role has no base definition. Also, there is
     # no environment file: the needed environment settings are
     # given here.
@@ -80,6 +84,6 @@ class Template(BaseTemplate):
         self.add_env(service, 'SERVICE_DRUID_JAVA_OPTS', '-Xmx64m -Xms64m')
         self.add_env(service, 'druid_host', service_name)
         service['ports'] = [ '50011:50011', '9301:9301', '9501:9501', 
'5010:8000' ]
-        self.add_depends(service, [ ZOO_KEEPER ] )
+        self.add_depends(service, [ZOO_KEEPER])
 
 generate(__file__, Template())
diff --git 
a/integration-tests-ex/cases/cluster/MultiStageQuery/docker-compose.py 
b/integration-tests-ex/cases/cluster/MultiStageQuery/docker-compose.py
index 159a0638dd1..a172c33aee3 100644
--- a/integration-tests-ex/cases/cluster/MultiStageQuery/docker-compose.py
+++ b/integration-tests-ex/cases/cluster/MultiStageQuery/docker-compose.py
@@ -24,4 +24,8 @@ class Template(BaseTemplate):
         self.add_property(service, 'druid.msq.intermediate.storage.basePath', 
'/shared/durablestorage/')
         self.add_property(service, 'druid.export.storage.baseDir', '/')
 
+    # No kafka dependency in this cluster
+    def define_kafka(self):
+        pass
+
 generate(__file__, Template())
diff --git a/integration-tests-ex/cases/cluster/Security/docker-compose.py 
b/integration-tests-ex/cases/cluster/MultiStageQueryWithMM/docker-compose.py
similarity index 51%
copy from integration-tests-ex/cases/cluster/Security/docker-compose.py
copy to 
integration-tests-ex/cases/cluster/MultiStageQueryWithMM/docker-compose.py
index 452026750bc..139d6acc1e1 100644
--- a/integration-tests-ex/cases/cluster/Security/docker-compose.py
+++ b/integration-tests-ex/cases/cluster/MultiStageQueryWithMM/docker-compose.py
@@ -14,19 +14,28 @@
 # limitations under the License.
 
 from template import BaseTemplate, generate
+from template import COORDINATOR, MIDDLE_MANAGER
 
 class Template(BaseTemplate):
 
-    def define_indexer(self):
-        service = super().define_indexer()
-        self.add_property(service, 'druid.msq.intermediate.storage.enable', 
'true')
-        self.add_property(service, 'druid.msq.intermediate.storage.type', 
'local')
-        self.add_property(service, 'druid.msq.intermediate.storage.basePath', 
'/shared/durablestorage/')
-        self.add_property(service, 'druid.export.storage.baseDir', '/')
+    def define_coordinator(self):
+        service_name = COORDINATOR
+        service = self.define_master_service(service_name, COORDINATOR)
+        self.add_env(service, 'druid_host', service_name)
+        self.add_env(service, 'druid_manager_segments_pollDuration', 'PT5S')
+        self.add_env(service, 'druid_coordinator_period', 'PT10S')
 
-    def extend_druid_service(self, service):
-        self.add_env_file(service, '../Common/environment-configs/auth.env')
-        self.add_env(service, 'druid_test_loadList', 'druid-basic-security')
+    def define_indexer(self):
+        '''
+        Override the indexer to MIDDLE_MANAGER
+        '''
+        service = self.define_std_indexer(MIDDLE_MANAGER)
+        self.add_env(service, 'druid_msq_intermediate_storage_enable', 'true')
+        self.add_env(service, 'druid_msq_intermediate_storage_type', 'local')
+        self.add_env(service, 'druid_msq_intermediate_storage_basePath', 
'/shared/durablestorage/')
 
+    # No kafka dependency in this cluster
+    def define_kafka(self):
+        pass
 
 generate(__file__, Template())
diff --git 
a/integration-tests-ex/cases/cluster/MultiStageQueryWithMM/docker-compose.yaml 
b/integration-tests-ex/cases/cluster/MultiStageQueryWithMM/docker-compose.yaml
deleted file mode 100644
index 2ce345063ac..00000000000
--- 
a/integration-tests-ex/cases/cluster/MultiStageQueryWithMM/docker-compose.yaml
+++ /dev/null
@@ -1,98 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-networks:
-  druid-it-net:
-    name: druid-it-net
-    ipam:
-      config:
-        - subnet: 172.172.172.0/24
-
-services:
-  zookeeper:
-    extends:
-      file: ../Common/dependencies.yaml
-      service: zookeeper
-
-  metadata:
-    extends:
-      file: ../Common/dependencies.yaml
-      service: metadata
-
-  coordinator:
-    extends:
-      file: ../Common/druid.yaml
-      service: coordinator
-    container_name: coordinator
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-      - druid_manager_segments_pollDuration=PT5S
-      - druid_coordinator_period=PT10S
-    depends_on:
-      - zookeeper
-      - metadata
-
-  overlord:
-    extends:
-      file: ../Common/druid.yaml
-      service: overlord
-    container_name: overlord
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-    depends_on:
-      - zookeeper
-      - metadata
-
-  broker:
-    extends:
-      file: ../Common/druid.yaml
-      service: broker
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-    depends_on:
-      - zookeeper
-
-  router:
-    extends:
-      file: ../Common/druid.yaml
-      service: router
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-    depends_on:
-      - zookeeper
-
-  historical:
-    extends:
-      file: ../Common/druid.yaml
-      service: historical
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-    depends_on:
-      - zookeeper
-
-  middlemanager:
-    extends:
-      file: ../Common/druid.yaml
-      service: middlemanager
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-      - druid_msq_intermediate_storage_enable=true
-      - druid_msq_intermediate_storage_type=local
-      - druid_msq_intermediate_storage_basePath=/shared/durablestorage/
-    volumes:
-      # Test data
-      - ../../resources:/resources
-    depends_on:
-      - zookeeper
diff --git 
a/integration-tests-ex/cases/cluster/MultiStageQuery/docker-compose.py 
b/integration-tests-ex/cases/cluster/Query/docker-compose.py
similarity index 64%
copy from integration-tests-ex/cases/cluster/MultiStageQuery/docker-compose.py
copy to integration-tests-ex/cases/cluster/Query/docker-compose.py
index 159a0638dd1..0e6eb42a28f 100644
--- a/integration-tests-ex/cases/cluster/MultiStageQuery/docker-compose.py
+++ b/integration-tests-ex/cases/cluster/Query/docker-compose.py
@@ -14,14 +14,21 @@
 # limitations under the License.
 
 from template import BaseTemplate, generate
+from template import COORDINATOR, MIDDLE_MANAGER
 
 class Template(BaseTemplate):
 
+    def define_coordinator(self):
+        service_name = COORDINATOR
+        service = self.define_master_service(service_name, COORDINATOR)
+        self.add_env(service, 'druid_host', service_name)
+        self.add_env(service, 'druid_manager_segments_pollDuration', 'PT5S')
+        self.add_env(service, 'druid_coordinator_period', 'PT10S')
+
     def define_indexer(self):
-        service = super().define_indexer()
-        self.add_property(service, 'druid.msq.intermediate.storage.enable', 
'true')
-        self.add_property(service, 'druid.msq.intermediate.storage.type', 
'local')
-        self.add_property(service, 'druid.msq.intermediate.storage.basePath', 
'/shared/durablestorage/')
-        self.add_property(service, 'druid.export.storage.baseDir', '/')
+      '''
+      Override the indexer to MIDDLE_MANAGER
+      '''
+      return self.define_std_indexer(MIDDLE_MANAGER)
 
 generate(__file__, Template())
diff --git a/integration-tests-ex/cases/cluster/Query/docker-compose.yaml 
b/integration-tests-ex/cases/cluster/Query/docker-compose.yaml
deleted file mode 100644
index 0b7c461e126..00000000000
--- a/integration-tests-ex/cases/cluster/Query/docker-compose.yaml
+++ /dev/null
@@ -1,105 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-networks:
-  druid-it-net:
-    name: druid-it-net
-    ipam:
-      config:
-        - subnet: 172.172.172.0/24
-
-services:
-  zookeeper:
-    extends:
-      file: ../Common/dependencies.yaml
-      service: zookeeper
-
-  metadata:
-    extends:
-      file: ../Common/dependencies.yaml
-      service: metadata
-
-  coordinator:
-    extends:
-      file: ../Common/druid.yaml
-      service: coordinator
-    container_name: coordinator
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-      # The frequency with which the coordinator polls the database
-      # for changes. The DB population code has to wait at least this
-      # long for the coordinator to notice changes.
-      - druid_manager_segments_pollDuration=PT5S
-      - druid_coordinator_period=PT10S
-    depends_on:
-      - zookeeper
-      - metadata
-
-  overlord:
-    extends:
-      file: ../Common/druid.yaml
-      service: overlord
-    container_name: overlord
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-    depends_on:
-      - zookeeper
-      - metadata
-
-  broker:
-    extends:
-      file: ../Common/druid.yaml
-      service: broker
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-    depends_on:
-      - zookeeper
-
-  router:
-    extends:
-      file: ../Common/druid.yaml
-      service: router
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-    depends_on:
-      - zookeeper
-
-  historical:
-    extends:
-      file: ../Common/druid.yaml
-      service: historical
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-    depends_on:
-      - zookeeper
-
-  middlemanager:
-    extends:
-      file: ../Common/druid.yaml
-      service: middlemanager
-    environment:
-      - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
-    volumes:
-      # Test data
-      - ../../resources:/resources
-    depends_on:
-      - zookeeper
-
-  kafka:
-    extends:
-      file: ../Common/dependencies.yaml
-      service: kafka
-    depends_on:
-      - zookeeper
diff --git a/integration-tests-ex/cases/cluster/S3DeepStorage/docker-compose.py 
b/integration-tests-ex/cases/cluster/S3DeepStorage/docker-compose.py
index 772ce333d1b..005ff8d143f 100644
--- a/integration-tests-ex/cases/cluster/S3DeepStorage/docker-compose.py
+++ b/integration-tests-ex/cases/cluster/S3DeepStorage/docker-compose.py
@@ -48,7 +48,7 @@ class Template(BaseTemplate):
 
     def define_overlord(self):
         service = self.define_druid_service(OVERLORD, OVERLORD)
-        self.add_depends(service, [ZOO_KEEPER, METADATA, 
"create_minio_buckets"])
+        self.add_depends(service, [ZOO_KEEPER, METADATA, 
'create_minio_buckets'])
         return service
 
     # This test uses different data than the default.
@@ -60,9 +60,13 @@ class Template(BaseTemplate):
 
     def create_minio_bucket(self):
         service = self.define_external_service("create_minio_buckets")
-        self.add_depends(service, ["minio"])
+        self.add_depends(service, ['minio'])
         return service
 
+    # No kafka dependency in this cluster
+    def define_kafka(self):
+        pass
+
     def define_custom_services(self):
         self.create_minio_container()
         self.create_minio_bucket()
diff --git a/integration-tests-ex/cases/cluster/Security/docker-compose.py 
b/integration-tests-ex/cases/cluster/Security/docker-compose.py
index 452026750bc..9ecb40de19e 100644
--- a/integration-tests-ex/cases/cluster/Security/docker-compose.py
+++ b/integration-tests-ex/cases/cluster/Security/docker-compose.py
@@ -24,6 +24,10 @@ class Template(BaseTemplate):
         self.add_property(service, 'druid.msq.intermediate.storage.basePath', 
'/shared/durablestorage/')
         self.add_property(service, 'druid.export.storage.baseDir', '/')
 
+    # No kafka dependency in this cluster
+    def define_kafka(self):
+        pass
+
     def extend_druid_service(self, service):
         self.add_env_file(service, '../Common/environment-configs/auth.env')
         self.add_env(service, 'druid_test_loadList', 'druid-basic-security')
diff --git a/integration-tests-ex/cases/cluster/template.py 
b/integration-tests-ex/cases/cluster/template.py
index 015b5126e22..374f902dbf5 100644
--- a/integration-tests-ex/cases/cluster/template.py
+++ b/integration-tests-ex/cases/cluster/template.py
@@ -23,7 +23,7 @@ internally as a Python data structure made up of maps, arrays 
and scalars.
 PyYaml does the grunt work of converting the data structure to the YAML file.
 '''
 
-import yaml, os
+import yaml, os, platform
 from pathlib import Path
 
 # Constants used frequently in the template.
@@ -32,6 +32,7 @@ DRUID_NETWORK = 'druid-it-net'
 DRUID_SUBNET = '172.172.172'
 ZOO_KEEPER = 'zookeeper'
 METADATA = 'metadata'
+KAFKA = 'kafka'
 COORDINATOR = 'coordinator'
 OVERLORD = 'overlord'
 ROUTER = 'router'
@@ -68,6 +69,12 @@ def generate(template_path, template):
 
 class BaseTemplate:
 
+    # Lookup map to determine what depends_on condition needs to be specified 
for a docker-compose dependency.
+    SERVICE_DEPENDENCY_CONDITION_LOOKUP = {
+      METADATA: 'service_healthy',
+      ZOO_KEEPER: 'service_started'
+    }
+
     def __init__(self):
         # Cluster is the object tree for the docker-compose.yaml file for our 
test cluster.
         # The tree is a map of objects, each of which is a map of values. The 
values are
@@ -102,6 +109,7 @@ class BaseTemplate:
         '''
         self.define_zk()
         self.define_metadata()
+        self.define_kafka()
 
     def define_druid_services(self):
         '''
@@ -242,6 +250,11 @@ class BaseTemplate:
             'file': '../Common/dependencies.yaml',
             'service': name
             }}
+
+        # Apple Silicon compatibility helper.
+        if platform.processor() == 'arm':
+            service['platform'] = 'linux/x86_64'
+
         self.add_service(name, service)
         return service
 
@@ -257,6 +270,14 @@ class BaseTemplate:
         '''
         return self.define_external_service(METADATA)
 
+    def define_kafka(self) -> dict:
+      '''
+      Defines the kafka service. Returns the service
+      '''
+      service = self.define_external_service(KAFKA)
+      self.add_depends(service, [ZOO_KEEPER])
+      return service
+
     def define_druid_service(self, name, base) -> dict:
         '''
         Defines a Druid service as a reference to the base definition in
@@ -283,11 +304,18 @@ class BaseTemplate:
 
     def add_depends(self, service, items):
         '''
-        Adds a service dependency to a service.
+        Adds 1 or more service dependencies to a service.
+
+        Args:
+            service: The service definition dictionary that is recieving new 
dependencies
+            items: A list of strings specifying the dependency service name(s)
         '''
         if items is not None and len(items) > 0:
-            depends = service.setdefault('depends_on', [])
-            depends += items
+            depends = service.setdefault('depends_on', {})
+            for dependency_service in items:
+                depends[dependency_service] = {
+                    'condition': 
BaseTemplate.SERVICE_DEPENDENCY_CONDITION_LOOKUP.get(dependency_service, 
'service_started')
+                }
 
     def define_master_service(self, name, base) -> dict:
         '''
@@ -323,7 +351,7 @@ class BaseTemplate:
         Defines a Druid "worker" service: one that depends only on ZooKeeper.
         '''
         service = self.define_druid_service(name, base)
-        self.add_depends(service, [ZOO_KEEPER])
+        self.add_depends(service, [ZOO_KEEPER, METADATA])
         return service
 
     def define_std_worker_service(self, name) -> dict:
diff --git 
a/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/auth/ITSecurityBasicQuery.java
 
b/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/auth/ITSecurityBasicQuery.java
index 181e8e92d1f..399a5e3b798 100644
--- 
a/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/auth/ITSecurityBasicQuery.java
+++ 
b/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/auth/ITSecurityBasicQuery.java
@@ -74,6 +74,10 @@ public class ITSecurityBasicQuery
   public static final String USER_1_PASSWORD = "password1";
   private static final String EXPORT_TASK = "/indexer/export_task.json";
 
+  // Time in ms to sleep after updating role permissions in each test. This 
intends to give the
+  // underlying test cluster enough time to sync permissions and be ready when 
test execution starts.
+  private static final int SYNC_SLEEP = 10000;
+
   @Before
   public void setUp() throws IOException
   {
@@ -161,8 +165,8 @@ public class ITSecurityBasicQuery
     );
     securityClient.setPermissionsToRole(ROLE_1, permissions);
 
-    // Wait for a second so that the auth is synced, to avoid flakiness
-    Thread.sleep(1000);
+    // Allow permissions sync across cluster to avoid flakes
+    Thread.sleep(SYNC_SLEEP);
 
     String queryLocal =
         StringUtils.format(
@@ -226,8 +230,8 @@ public class ITSecurityBasicQuery
     );
     securityClient.setPermissionsToRole(ROLE_1, permissions);
 
-    // Wait for a second so that the auth is synced, to avoid flakiness
-    Thread.sleep(4000);
+    // Allow permissions sync across cluster to avoid flakes
+    Thread.sleep(SYNC_SLEEP);
 
     String exportQuery =
         StringUtils.format(
@@ -266,8 +270,8 @@ public class ITSecurityBasicQuery
     );
     securityClient.setPermissionsToRole(ROLE_1, permissions);
 
-    // Wait for a second so that the auth is synced, to avoid flakyness
-    Thread.sleep(1000);
+    // Allow permissions sync across cluster to avoid flakes
+    Thread.sleep(SYNC_SLEEP);
 
     String exportQuery =
         StringUtils.format(
@@ -306,8 +310,8 @@ public class ITSecurityBasicQuery
     );
     securityClient.setPermissionsToRole(ROLE_1, permissions);
 
-    // Wait for a second so that the auth is synced, to avoid flakiness
-    Thread.sleep(1000);
+    // Allow permissions sync across cluster to avoid flakes
+    Thread.sleep(SYNC_SLEEP);
 
     String task = createTaskString();
     StatusResponseHolder statusResponseHolder = 
overlordResourceTestClient.submitTaskAndReturnStatusWithAuth(task, USER_1, 
USER_1_PASSWORD);
@@ -327,8 +331,8 @@ public class ITSecurityBasicQuery
     );
     securityClient.setPermissionsToRole(ROLE_1, permissions);
 
-    // Wait for a second so that the auth is synced, to avoid flakiness
-    Thread.sleep(1000);
+    // Allow permissions sync across cluster to avoid flakes
+    Thread.sleep(SYNC_SLEEP);
 
     String task = createTaskString();
     StatusResponseHolder statusResponseHolder = 
overlordResourceTestClient.submitTaskAndReturnStatusWithAuth(task, USER_1, 
USER_1_PASSWORD);


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to