This is an automated email from the ASF dual-hosted git repository.

houston pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/solr-operator.git


The following commit(s) were added to refs/heads/main by this push:
     new b2c9510  Fix a few issues with testing, add better cluster information
b2c9510 is described below

commit b2c951052828f88e9fc415f54aec189b805117e9
Author: Houston Putman <[email protected]>
AuthorDate: Mon Apr 8 08:35:39 2024 -0500

    Fix a few issues with testing, add better cluster information
---
 controllers/solr_cluster_ops_util.go        |  3 +--
 controllers/solrcloud_controller.go         |  1 -
 tests/e2e/solrcloud_rolling_upgrade_test.go |  2 +-
 tests/e2e/suite_test.go                     | 31 ++++++++++++++++++++++++++++-
 tests/e2e/test_utils_test.go                |  8 +++++++-
 5 files changed, 39 insertions(+), 6 deletions(-)

diff --git a/controllers/solr_cluster_ops_util.go 
b/controllers/solr_cluster_ops_util.go
index 07452eb..916446b 100644
--- a/controllers/solr_cluster_ops_util.go
+++ b/controllers/solr_cluster_ops_util.go
@@ -66,6 +66,7 @@ func clearClusterOpLock(statefulSet *appsv1.StatefulSet) {
 }
 
 func setClusterOpLock(statefulSet *appsv1.StatefulSet, op SolrClusterOp) error 
{
+       op.LastStartTime = metav1.Now()
        bytes, err := json.Marshal(op)
        if err != nil {
                return err
@@ -124,7 +125,6 @@ func retryNextQueuedClusterOp(statefulSet 
*appsv1.StatefulSet) (hasOp bool, err
        hasOp = len(clusterOpRetryQueue) > 0
        if len(clusterOpRetryQueue) > 0 {
                nextOp := clusterOpRetryQueue[0]
-               nextOp.LastStartTime = metav1.Now()
                err = setClusterOpLock(statefulSet, nextOp)
                if err != nil {
                        return hasOp, err
@@ -141,7 +141,6 @@ func retryNextQueuedClusterOpWithQueue(statefulSet 
*appsv1.StatefulSet, clusterO
        hasOp = len(clusterOpQueue) > 0
        if len(clusterOpQueue) > 0 {
                nextOp := clusterOpQueue[0]
-               nextOp.LastStartTime = metav1.Now()
                err = setClusterOpLock(statefulSet, nextOp)
                if err != nil {
                        return hasOp, err
diff --git a/controllers/solrcloud_controller.go 
b/controllers/solrcloud_controller.go
index 4d832cf..9940ff9 100644
--- a/controllers/solrcloud_controller.go
+++ b/controllers/solrcloud_controller.go
@@ -574,7 +574,6 @@ func (r *SolrCloudReconciler) Reconcile(ctx 
context.Context, req ctrl.Request) (
                        if clusterOp != nil {
                                // Starting a locked cluster operation!
                                originalStatefulSet := statefulSet.DeepCopy()
-                               clusterOp.LastStartTime = metav1.Now()
                                err = setClusterOpLock(statefulSet, *clusterOp)
                                if err == nil {
                                        err = r.Patch(ctx, statefulSet, 
client.StrategicMergeFrom(originalStatefulSet))
diff --git a/tests/e2e/solrcloud_rolling_upgrade_test.go 
b/tests/e2e/solrcloud_rolling_upgrade_test.go
index faf2175..c56951d 100644
--- a/tests/e2e/solrcloud_rolling_upgrade_test.go
+++ b/tests/e2e/solrcloud_rolling_upgrade_test.go
@@ -163,7 +163,7 @@ var _ = FDescribe("E2E - SolrCloud - Rolling Upgrades", 
func() {
                        }
 
                        By("waiting for the balanceReplicas to finish")
-                       expectStatefulSetWithChecksAndTimeout(ctx, solrCloud, 
solrCloud.StatefulSetName(), time.Second*30, time.Second, func(g Gomega, found 
*appsv1.StatefulSet) {
+                       expectStatefulSetWithChecksAndTimeout(ctx, solrCloud, 
solrCloud.StatefulSetName(), time.Second*70, time.Second, func(g Gomega, found 
*appsv1.StatefulSet) {
                                clusterOp, err := 
controllers.GetCurrentClusterOp(found)
                                g.Expect(err).ToNot(HaveOccurred(), "Error 
occurred while finding clusterLock for SolrCloud")
                                g.Expect(clusterOp).To(BeNil(), "StatefulSet 
should not have a balanceReplicas lock after balancing is complete.")
diff --git a/tests/e2e/suite_test.go b/tests/e2e/suite_test.go
index 8b38a98..1c2aec5 100644
--- a/tests/e2e/suite_test.go
+++ b/tests/e2e/suite_test.go
@@ -285,8 +285,23 @@ func writeAllSolrInfoToFiles(ctx context.Context, 
directory string, namespace st
        req, err := labels.NewRequirement("technology", selection.In, 
[]string{solrv1beta1.SolrTechnologyLabel, 
solrv1beta1.SolrPrometheusExporterTechnologyLabel})
        Expect(err).ToNot(HaveOccurred())
 
-       labelSelector := labels.Everything().Add(*req)
        listOps := &client.ListOptions{
+               Namespace: namespace,
+       }
+
+       foundSolrs := &solrv1beta1.SolrCloudList{}
+       Expect(k8sClient.List(ctx, foundSolrs, listOps)).To(Succeed(), "Could 
not fetch SolrClouds")
+       Expect(foundSolrs).ToNot(BeNil(), "No SolrClouds could be found")
+       for _, solrCloud := range foundSolrs.Items {
+               writeSolrClusterStatusInfoToFile(
+                       ctx,
+                       directory+solrCloud.Name,
+                       &solrCloud,
+               )
+       }
+
+       labelSelector := labels.Everything().Add(*req)
+       listOps = &client.ListOptions{
                Namespace:     namespace,
                LabelSelector: labelSelector,
        }
@@ -333,6 +348,20 @@ func writeAllSolrInfoToFiles(ctx context.Context, 
directory string, namespace st
        }
 }
 
+// writeSolrClusterStatusInfoToFile writes the following each to a separate 
file with the given base name & directory.
+//   - SolrCloud's Cluster Status from the Collections API
+func writeSolrClusterStatusInfoToFile(ctx context.Context, baseFilename 
string, solrCloud *solrv1beta1.SolrCloud) {
+       clusterStatus := fetchClusterStatusWithErrorHandling(ctx, solrCloud, 
false)
+       if clusterStatus != "" {
+               // Write cluster status to a file
+               statusFile, err := os.Create(baseFilename + 
".cluster-state.json")
+               defer statusFile.Close()
+               Expect(err).ToNot(HaveOccurred(), "Could not open file to save 
cluster status: %s", baseFilename+".cluster-state.json")
+               _, writeErr := statusFile.Write([]byte(clusterStatus))
+               Expect(writeErr).ToNot(HaveOccurred(), "Could not write cluster 
status json to file")
+       }
+}
+
 // writeAllStatefulSetInfoToFiles writes the following each to a separate file 
with the given base name & directory.
 //   - StatefulSet Spec/Status
 //   - StatefulSet Events
diff --git a/tests/e2e/test_utils_test.go b/tests/e2e/test_utils_test.go
index c3e693c..486499d 100644
--- a/tests/e2e/test_utils_test.go
+++ b/tests/e2e/test_utils_test.go
@@ -313,6 +313,10 @@ func queryCollectionWithGomega(ctx context.Context, 
solrCloud *solrv1beta1.SolrC
 }
 
 func fetchClusterStatus(ctx context.Context, solrCloud *solrv1beta1.SolrCloud) 
string {
+       return fetchClusterStatusWithErrorHandling(ctx, solrCloud, true)
+}
+
+func fetchClusterStatusWithErrorHandling(ctx context.Context, solrCloud 
*solrv1beta1.SolrCloud, expectNoError bool) string {
        response, err := callSolrApiInPod(
                ctx,
                solrCloud,
@@ -323,7 +327,9 @@ func fetchClusterStatus(ctx context.Context, solrCloud 
*solrv1beta1.SolrCloud) s
                        "wt":     "json",
                },
        )
-       Expect(err).ToNot(HaveOccurred(), "Could not fetch clusterStatus for 
cloud")
+       if expectNoError {
+               Expect(err).ToNot(HaveOccurred(), "Could not fetch 
clusterStatus for cloud")
+       }
 
        return response
 }

Reply via email to