maytasm commented on a change in pull request #9854: URL: https://github.com/apache/druid/pull/9854#discussion_r425512951
########## File path: integration-tests/script/copy_hadoop_resources.sh ########## @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# wait for hadoop namenode to be up +echo "Waiting for hadoop namenode to be up" +docker exec -t druid-it-hadoop sh -c "./usr/local/hadoop/bin/hdfs dfs -mkdir -p /druid" +while [ $? -ne 0 ] +do + sleep 2 + docker exec -t druid-it-hadoop sh -c "./usr/local/hadoop/bin/hdfs dfs -mkdir -p /druid" +done +echo "Finished waiting for Hadoop namenode" + +# Setup hadoop druid dirs +echo "Setting up druid hadoop dirs" +docker exec -t druid-it-hadoop sh -c "./usr/local/hadoop/bin/hdfs dfs -mkdir -p /druid" +docker exec -t druid-it-hadoop sh -c "./usr/local/hadoop/bin/hdfs dfs -mkdir -p /druid/segments" +docker exec -t druid-it-hadoop sh -c "./usr/local/hadoop/bin/hdfs dfs -mkdir -p /quickstart" +docker exec -t druid-it-hadoop sh -c "./usr/local/hadoop/bin/hdfs dfs -chmod 777 /druid" +docker exec -t druid-it-hadoop sh -c "./usr/local/hadoop/bin/hdfs dfs -chmod 777 /druid/segments" +docker exec -t druid-it-hadoop sh -c "./usr/local/hadoop/bin/hdfs dfs -chmod 777 /quickstart" +docker exec -t druid-it-hadoop sh -c "./usr/local/hadoop/bin/hdfs dfs -chmod -R 777 /tmp" +docker exec -t druid-it-hadoop sh -c "./usr/local/hadoop/bin/hdfs dfs -chmod -R 777 /user" +# Copy data files to Hadoop container +docker exec -t druid-it-hadoop sh -c "./usr/local/hadoop/bin/hdfs dfs -put /shared/wikiticker-it/wikiticker-2015-09-12-sampled.json.gz /quickstart/wikiticker-2015-09-12-sampled.json.gz" +docker exec -t druid-it-hadoop sh -c "./usr/local/hadoop/bin/hdfs dfs -put /resources/data/batch_index /batch_index" +echo "Finished setting up druid hadoop dirs" + +echo "Copying Hadoop XML files to shared" +docker exec -t druid-it-hadoop sh -c "cp /usr/local/hadoop/etc/hadoop/*.xml /shared/hadoop_xml" +echo "Copied Hadoop XML files to shared" Review comment: nit:new line ########## File path: integration-tests/pom.xml ########## @@ -374,21 +375,23 @@ <artifactId>exec-maven-plugin</artifactId> <executions> <execution> - <id>build-and-start-druid-cluster</id> + <id>docker-package</id> <goals> <goal>exec</goal> </goals> <phase>pre-integration-test</phase> <configuration> <environmentVariables> - <DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER>${start.hadoop.docker}</DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER> - <DRUID_INTEGRATION_TEST_SKIP_START_DOCKER>${skip.start.docker}</DRUID_INTEGRATION_TEST_SKIP_START_DOCKER> - <DRUID_INTEGRATION_TEST_JVM_RUNTIME>${jvm.runtime}</DRUID_INTEGRATION_TEST_JVM_RUNTIME> - <DRUID_INTEGRATION_TEST_GROUP>${groups}</DRUID_INTEGRATION_TEST_GROUP> - <DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH>${override.config.path}</DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH> - <DRUID_INTEGRATION_TEST_RESOURCE_FILE_DIR_PATH>${resource.file.dir.path}</DRUID_INTEGRATION_TEST_RESOURCE_FILE_DIR_PATH>> + <DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER>${start.hadoop.docker}</DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER> + <DRUID_INTEGRATION_TEST_JVM_RUNTIME>${jvm.runtime}</DRUID_INTEGRATION_TEST_JVM_RUNTIME> + <DRUID_INTEGRATION_TEST_GROUP>${groups}</DRUID_INTEGRATION_TEST_GROUP> + <DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH>${override.config.path}</DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH> + <DRUID_INTEGRATION_TEST_RESOURCE_FILE_DIR_PATH>${resource.file.dir.path}</DRUID_INTEGRATION_TEST_RESOURCE_FILE_DIR_PATH> + <DRUID_INTEGRATION_TEST_SKIP_BUILD_DOCKER>${skip.build.docker}</DRUID_INTEGRATION_TEST_SKIP_BUILD_DOCKER> + <DRUID_INTEGRATION_TEST_SKIP_RUN_DOCKER>${skip.start.docker}</DRUID_INTEGRATION_TEST_SKIP_RUN_DOCKER> Review comment: nit: change skip.start.docker to skip.run.docker to be consistent ########## File path: integration-tests/pom.xml ########## @@ -446,61 +450,7 @@ </suiteXmlFiles> </configuration> </plugin> - <plugin> - <groupId>de.thetaphi</groupId> - <artifactId>forbiddenapis</artifactId> - <configuration> - <signaturesFiles> - <!-- Needed because of https://github.com/policeman-tools/forbidden-apis/issues/126 --> - <signaturesFile>../codestyle/joda-time-forbidden-apis.txt</signaturesFile> - <signaturesFile>../codestyle/druid-forbidden-apis.txt</signaturesFile> - </signaturesFiles> - </configuration> - </plugin> - </plugins> - </build> - </profile> - <profile> - <id>int-tests-config-file</id> - <build> - <plugins> - <plugin> - <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-failsafe-plugin</artifactId> - <executions> - <execution> - <id>integration-tests</id> - <phase>integration-test</phase> - <goals> - <goal>integration-test</goal> - <goal>verify</goal> - </goals> - </execution> - </executions> - <configuration> - <properties> - <property> - <name>testrunfactory</name> - <value>org.testng.DruidTestRunnerFactory</value> - </property> - </properties> - <argLine> - -Duser.timezone=UTC - -Dfile.encoding=UTF-8 - -Ddruid.test.config.type=configFile - -Ddruid.test.config.configFile=${env.CONFIG_FILE} - -Ddruid.client.https.trustStorePath=client_tls/truststore.jks - -Ddruid.client.https.trustStorePassword=druid123 - -Ddruid.client.https.keyStorePath=client_tls/client.jks - -Ddruid.client.https.certAlias=druid - -Ddruid.client.https.keyManagerPassword=druid123 - -Ddruid.client.https.keyStorePassword=druid123 - </argLine> - <suiteXmlFiles> - <suiteXmlFile>src/test/resources/testng.xml</suiteXmlFile> - </suiteXmlFiles> - </configuration> - </plugin> + Review comment: Where is the below deleted? Why is the int-tests-config-file removed? ########## File path: integration-tests/build_run_cluster.sh ########## @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo $DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH + +export DIR=$(cd $(dirname $0) && pwd) +export HADOOP_DOCKER_DIR=$DIR/../examples/quickstart/tutorial/hadoop/docker +export DOCKERDIR=$DIR/docker +export SERVICE_SUPERVISORDS_DIR=$DOCKERDIR/service-supervisords +export ENVIRONMENT_CONFIGS_DIR=$DOCKERDIR/environment-configs +export SHARED_DIR=${HOME}/shared +export SUPERVISORDIR=/usr/lib/druid/conf +export RESOURCEDIR=$DIR/src/test/resources + +# so docker IP addr will be known during docker build +echo ${DOCKER_IP:=127.0.0.1} > $DOCKERDIR/docker_ip + +if !($DRUID_INTEGRATION_TEST_SKIP_BUILD_DOCKER); then + sh ./script/copy_resources.sh + sh ./script/docker_build_containers.sh +fi + +if !($DRUID_INTEGRATION_TEST_SKIP_RUN_DOCKER); then + sh ./stop_cluster.sh + sh ./script/docker_run_cluster.sh +fi + +if ($DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER); then + sh ./script/copy_hadoop_resources.sh Review comment: copy_hadoop_resources.sh have to be run after hadoop cluster started BUT BEFORE the other druid containers start. ########## File path: integration-tests/build_run_cluster.sh ########## @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo $DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH + +export DIR=$(cd $(dirname $0) && pwd) +export HADOOP_DOCKER_DIR=$DIR/../examples/quickstart/tutorial/hadoop/docker +export DOCKERDIR=$DIR/docker +export SERVICE_SUPERVISORDS_DIR=$DOCKERDIR/service-supervisords +export ENVIRONMENT_CONFIGS_DIR=$DOCKERDIR/environment-configs +export SHARED_DIR=${HOME}/shared +export SUPERVISORDIR=/usr/lib/druid/conf +export RESOURCEDIR=$DIR/src/test/resources + +# so docker IP addr will be known during docker build +echo ${DOCKER_IP:=127.0.0.1} > $DOCKERDIR/docker_ip + +if !($DRUID_INTEGRATION_TEST_SKIP_BUILD_DOCKER); then + sh ./script/copy_resources.sh + sh ./script/docker_build_containers.sh +fi + +if !($DRUID_INTEGRATION_TEST_SKIP_RUN_DOCKER); then + sh ./stop_cluster.sh Review comment: stop_cluster.sh has a check for DRUID_INTEGRATION_TEST_SKIP_STOP_DOCKER The check for DRUID_INTEGRATION_TEST_SKIP_STOP_DOCKER is too skip teardown of the cluster after the test finish running (for debugging). In this case, where we are stopping the cluster so that we can start then fresh to run integration test, we should skip checking DRUID_INTEGRATION_TEST_SKIP_STOP_DOCKER here. ########## File path: integration-tests/docker/docker-compose.base.yml ########## @@ -0,0 +1,271 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more Review comment: Is it possible to use the environment variables we define in the scripts for the path to mount volumes and env files here. (i.e. COMMON_ENV, SERVICE_SUPERVISORDS_DIR, SUPERVISORDIR, RESOURCEDIR, etc.) ########## File path: integration-tests/build_run_cluster.sh ########## @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo $DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH + +export DIR=$(cd $(dirname $0) && pwd) +export HADOOP_DOCKER_DIR=$DIR/../examples/quickstart/tutorial/hadoop/docker +export DOCKERDIR=$DIR/docker +export SERVICE_SUPERVISORDS_DIR=$DOCKERDIR/service-supervisords +export ENVIRONMENT_CONFIGS_DIR=$DOCKERDIR/environment-configs +export SHARED_DIR=${HOME}/shared +export SUPERVISORDIR=/usr/lib/druid/conf +export RESOURCEDIR=$DIR/src/test/resources + +# so docker IP addr will be known during docker build +echo ${DOCKER_IP:=127.0.0.1} > $DOCKERDIR/docker_ip + +if !($DRUID_INTEGRATION_TEST_SKIP_BUILD_DOCKER); then Review comment: Why the split DRUID_INTEGRATION_TEST_SKIP_BUILD_DOCKER and DRUID_INTEGRATION_TEST_SKIP_RUN_DOCKER? I don't think we should ever want to BUILD but not RUN. I am also not really sure if having not BUILD and run is that useful. If your cluster is not running then you are not sure what state it is in, might as well be better to build and start fresh. ########## File path: integration-tests/docker/docker-compose.s3.yml ########## @@ -0,0 +1,182 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more Review comment: What is the purpose of this since we already have integration-tests/docker/docker-compose.override-env.yml Note that the env file in environment-configs/override-examples/ is never meant to be use as-is or modified directly. User should copy this and write their own override env file and use integration-tests/docker/docker-compose.override-env.yml ########## File path: integration-tests/docker/docker-compose.gcs.yml ########## @@ -0,0 +1,182 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more Review comment: What is the purpose of this since we already have integration-tests/docker/docker-compose.override-env.yml Note that the env file in environment-configs/override-examples/ is never meant to be use as-is or modified directly. User should copy this and write their own override env file and use integration-tests/docker/docker-compose.override-env.yml ########## File path: integration-tests/script/copy_resources.sh ########## @@ -0,0 +1,80 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# setup client keystore +./docker/tls/generate-client-certs-and-keystores.sh +rm -rf docker/client_tls +cp -r client_tls docker/client_tls + +# Make directories if they dont exist +mkdir -p $SHARED_DIR/hadoop_xml +mkdir -p $SHARED_DIR/hadoop-dependencies +mkdir -p $SHARED_DIR/logs +mkdir -p $SHARED_DIR/tasklogs +mkdir -p $SHARED_DIR/docker/extensions +mkdir -p $SHARED_DIR/docker/credentials + +# install druid jars +rm -rf $SHARED_DIR/docker +cp -R docker $SHARED_DIR/docker +mvn -B dependency:copy-dependencies -DoutputDirectory=$SHARED_DIR/docker/lib + +# move extensions into a seperate extension folder +# For druid-s3-extensions +mkdir -p $SHARED_DIR/docker/extensions/druid-s3-extensions +mv $SHARED_DIR/docker/lib/druid-s3-extensions-* $SHARED_DIR/docker/extensions/druid-s3-extensions +# For druid-azure-extensions +mkdir -p $SHARED_DIR/docker/extensions/druid-azure-extensions +mv $SHARED_DIR/docker/lib/druid-azure-extensions-* $SHARED_DIR/docker/extensions/druid-azure-extensions +# For druid-google-extensions +mkdir -p $SHARED_DIR/docker/extensions/druid-google-extensions +mv $SHARED_DIR/docker/lib/druid-google-extensions-* $SHARED_DIR/docker/extensions/druid-google-extensions +# For druid-hdfs-storage +mkdir -p $SHARED_DIR/docker/extensions/druid-hdfs-storage +mv $SHARED_DIR/docker/lib/druid-hdfs-storage-* $SHARED_DIR/docker/extensions/druid-hdfs-storage +# For druid-kinesis-indexing-service +mkdir -p $SHARED_DIR/docker/extensions/druid-kinesis-indexing-service +mv $SHARED_DIR/docker/lib/druid-kinesis-indexing-service-* $SHARED_DIR/docker/extensions/druid-kinesis-indexing-service +# For druid-parquet-extensions +mkdir -p $SHARED_DIR/docker/extensions/druid-parquet-extensions +mv $SHARED_DIR/docker/lib/druid-parquet-extensions-* $SHARED_DIR/docker/extensions/druid-parquet-extensions +# For druid-orc-extensions +mkdir -p $SHARED_DIR/docker/extensions/druid-orc-extensions +mv $SHARED_DIR/docker/lib/druid-orc-extensions-* $SHARED_DIR/docker/extensions/druid-orc-extensions + +# Pull Hadoop dependency if needed +if [ -n "$DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER" ] && [ "$DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER" == true ] +then + java -cp "$SHARED_DIR/docker/lib/*" -Ddruid.extensions.hadoopDependenciesDir="$SHARED_DIR/hadoop-dependencies" org.apache.druid.cli.Main tools pull-deps -h org.apache.hadoop:hadoop-client:2.8.5 -h org.apache.hadoop:hadoop-aws:2.8.5 -h org.apache.hadoop:hadoop-azure:2.8.5 + curl https://storage.googleapis.com/hadoop-lib/gcs/gcs-connector-hadoop2-latest.jar --output $SHARED_DIR/docker/lib/gcs-connector-hadoop2-latest.jar +fi + +# install logging config +cp src/main/resources/log4j2.xml $SHARED_DIR/docker/lib/log4j2.xml + +# copy the integration test jar, it provides test-only extension implementations +cp target/druid-integration-tests*.jar $SHARED_DIR/docker/lib + +# one of the integration tests needs the wikiticker sample data +mkdir -p $SHARED_DIR/wikiticker-it +cp ../examples/quickstart/tutorial/wikiticker-2015-09-12-sampled.json.gz $SHARED_DIR/wikiticker-it/wikiticker-2015-09-12-sampled.json.gz +cp docker/wiki-simple-lookup.json $SHARED_DIR/wikiticker-it/wiki-simple-lookup.json + +# copy other files if needed +if [ -n "$DRUID_INTEGRATION_TEST_RESOURCE_FILE_DIR_PATH" ] +then + cp -a $DRUID_INTEGRATION_TEST_RESOURCE_FILE_DIR_PATH/. $SHARED_DIR/docker/credentials/ +fi Review comment: nit:new line ########## File path: integration-tests/docker/docker-compose.azure.yml ########## @@ -0,0 +1,182 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more Review comment: What is the purpose of this since we already have integration-tests/docker/docker-compose.override-env.yml Note that the env file in environment-configs/override-examples/ is never meant to be use as-is or modified directly. User should copy this and write their own override env file and use integration-tests/docker/docker-compose.override-env.yml ########## File path: integration-tests/script/docker_run_cluster.sh ########## @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Create docker network +{ + docker network create --subnet=172.172.172.0/24 druid-it-net +} + +# setup all enviornment variables to be pass to the containers +COMMON_ENV="--env-file=$ENVIRONMENT_CONFIGS_DIR/common -e DRUID_INTEGRATION_TEST_GROUP" +BROKER_ENV="--env-file=$ENVIRONMENT_CONFIGS_DIR/broker" +COORDINATOR_ENV="--env-file=$ENVIRONMENT_CONFIGS_DIR/coordinator" +HISTORICAL_ENV="--env-file=$ENVIRONMENT_CONFIGS_DIR/historical" +MIDDLEMANAGER_ENV="--env-file=$ENVIRONMENT_CONFIGS_DIR/middlemanager" +OVERLORD_ENV="--env-file=$ENVIRONMENT_CONFIGS_DIR/overlord" +ROUTER_ENV="--env-file=$ENVIRONMENT_CONFIGS_DIR/router" +ROUTER_CUSTOM_CHECK_TLS_ENV="--env-file=$ENVIRONMENT_CONFIGS_DIR/router-custom-check-tls" +ROUTER_NO_CLIENT_AUTH_TLS_ENV="--env-file=$ENVIRONMENT_CONFIGS_DIR/router-no-client-auth-tls" +ROUTER_PERMISSIVE_TLS_ENV="--env-file=$ENVIRONMENT_CONFIGS_DIR/router-permissive-tls" + +if [ -z "$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH" ] +then + echo "\$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH is not set. No override config file provided" + if [ "$DRUID_INTEGRATION_TEST_GROUP" = "s3-deep-storage" ] || \ + [ "$DRUID_INTEGRATION_TEST_GROUP" = "gcs-deep-storage" ] || \ + [ "$DRUID_INTEGRATION_TEST_GROUP" = "azure-deep-storage" ]; then Review comment: Can you add "hdfs-deep-storage", "s3-ingestion", "kinesis-index", and "kinesis-data-format" to this list too. ########## File path: integration-tests/docker/docker-compose.test-env.yml ########## @@ -0,0 +1,173 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more Review comment: Do we need both integration-tests/docker/docker-compose.test-env.yml and integration-tests/docker/docker-compose.yml? ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
