hunyadi-dev commented on a change in pull request #1053:
URL: https://github.com/apache/nifi-minifi-cpp/pull/1053#discussion_r617223981



##########
File path: docker/test/integration/MiNiFi_integration_test_driver.py
##########
@@ -90,35 +102,56 @@ def acquire_cluster(self, name):
         return self.clusters.setdefault(name, DockerTestCluster())
 
     def set_up_cluster_network(self):
-        self.docker_network = SingleNodeDockerCluster.create_docker_network()
-        for cluster in self.clusters.values():
-            cluster.set_network(self.docker_network)
+        if self.docker_network is None:
+            logging.info("Setting up new network.")
+            self.docker_network = 
SingleNodeDockerCluster.create_docker_network()
+            for cluster in self.clusters.values():
+                cluster.set_network(self.docker_network)
+        else:
+            logging.info("Network is already set.")
+
+    def wait_for_cluster_startup_finish(self, cluster):
+        startup_success = True
+        logging.info("Engine: %s", cluster.get_engine())
+        if cluster.get_engine() == "minifi-cpp":
+            startup_success = cluster.wait_for_app_logs("Starting Flow 
Controller", 120)
+        elif cluster.get_engine() == "nifi":
+            startup_success = cluster.wait_for_app_logs("Starting Flow 
Controller...", 120)
+        elif cluster.get_engine() == "kafka-broker":
+            startup_success = cluster.wait_for_app_logs("Startup complete.", 
120)
+        elif cluster.get_engine() == "http-proxy":
+            startup_success = cluster.wait_for_app_logs("Accepting HTTP Socket 
connections at", 120)
+        elif cluster.get_engine() == "s3-server":
+            startup_success = cluster.wait_for_app_logs("Started 
S3MockApplication", 120)
+        elif cluster.get_engine() == "azure-storage-server":
+            startup_success = cluster.wait_for_app_logs("Azurite Queue service 
is successfully listening at", 120)
+        if not startup_success:
+            cluster.log_nifi_output()
+        return startup_success
+
+    def start_single_cluster(self, cluster_name):
+        self.set_up_cluster_network()
+        cluster = self.clusters[cluster_name]
+        cluster.deploy_flow()
+        assert self.wait_for_cluster_startup_finish(cluster)
+        time.sleep(10)
 
     def start(self):
         logging.info("MiNiFi_integration_test start")
         self.set_up_cluster_network()
         for cluster in self.clusters.values():
-            logging.info("Starting cluster %s with an engine of %s", 
cluster.get_name(), cluster.get_engine())
-            
cluster.set_directory_bindings(self.docker_directory_bindings.get_directory_bindings(self.test_id))
-            cluster.deploy_flow()
-        for cluster_name, cluster in self.clusters.items():
-            startup_success = True
-            logging.info("Engine: %s", cluster.get_engine())
-            if cluster.get_engine() == "minifi-cpp":
-                startup_success = cluster.wait_for_app_logs("Starting Flow 
Controller", 120)
-            elif cluster.get_engine() == "nifi":
-                startup_success = cluster.wait_for_app_logs("Starting Flow 
Controller...", 120)
-            elif cluster.get_engine() == "kafka-broker":
-                startup_success = cluster.wait_for_app_logs("Startup 
complete.", 120)
-            elif cluster.get_engine() == "http-proxy":
-                startup_success = cluster.wait_for_app_logs("Accepting HTTP 
Socket connections at", 120)
-            elif cluster.get_engine() == "s3-server":
-                startup_success = cluster.wait_for_app_logs("Started 
S3MockApplication", 120)
-            elif cluster.get_engine() == "azure-storage-server":
-                startup_success = cluster.wait_for_app_logs("Azurite Queue 
service is successfully listening at", 120)
-            if not startup_success:
-                cluster.log_nifi_output()
-            assert startup_success
+            if len(cluster.containers) == 0:
+                logging.info("Starting cluster %s with an engine of %s", 
cluster.get_name(), cluster.get_engine())
+                
cluster.set_directory_bindings(self.docker_directory_bindings.get_directory_bindings(self.test_id))
+                cluster.deploy_flow()
+            else:
+                logging.info("Container %s is already started with an engine 
of %s", cluster.get_name(), cluster.get_engine())
+        for cluster in self.clusters.values():
+            assert self.wait_for_cluster_startup_finish(cluster)
+        # Seems like some extra time needed for consumers to negotiate with 
the broker
+        for cluster in self.clusters.values():
+            if cluster.get_engine() == "kafka-broker":
+                time.sleep(10)

Review comment:
       We already check for `"Startup complete."` it seems like this is 
insufficient.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to