This is an automated email from the ASF dual-hosted git repository.
wmedvedeo pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-kie-tools.git
The following commit(s) were added to refs/heads/main by this push:
new 4f89607c6e2 incubator-kie-tools-3496: [sonataflow-operator] Ensure DB
Mibrator Job is executed when upgrading the operator from version N to N+1
(#3497)
4f89607c6e2 is described below
commit 4f89607c6e2dfb57178e1e19eaa27d0a83d86851
Author: Walter Medvedeo <[email protected]>
AuthorDate: Thu Mar 19 11:08:20 2026 +0100
incubator-kie-tools-3496: [sonataflow-operator] Ensure DB Mibrator Job is
executed when upgrading the operator from version N to N+1 (#3497)
---
packages/kn-plugin-workflow/Makefile | 13 +++-
.../kie/kogito/migrator/postgresql/DBMigrator.java | 26 +++----
.../config/default/manager_auth_proxy_patch.yaml | 4 +-
.../controller/platform/db_migrator_job.go | 88 ++++++++++++++++------
.../controller/platform/db_migrator_job_test.go | 15 +++-
packages/sonataflow-operator/operator.yaml | 4 +-
.../utils/kubernetes/autoscaling.go | 10 ++-
.../utils/kubernetes/deployment.go | 25 ++++++
.../sonataflow-operator/utils/kubernetes/jobs.go | 66 ++++++++++++++++
9 files changed, 202 insertions(+), 49 deletions(-)
diff --git a/packages/kn-plugin-workflow/Makefile
b/packages/kn-plugin-workflow/Makefile
index c88439b9282..c453713b12b 100644
--- a/packages/kn-plugin-workflow/Makefile
+++ b/packages/kn-plugin-workflow/Makefile
@@ -36,6 +36,9 @@ LDFLAGS := "-X
$(SET_QUARKUS_PLATFORM_GROUP_ID) -X $(SET_Q
KIND_VERSION ?= v0.20.0
OLM_VERSION = v0.31.0
+KIND_CLUSTER ?= kind
+KUBE_RBAC_PROXY_SRC := quay.io/brancz/kube-rbac-proxy:v0.13.1
+KUBE_RBAC_PROXY_DST := gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0
ARCH := $(shell uname -m)
ifeq ($(ARCH),arm64)
@@ -72,6 +75,7 @@ test-e2e:
@$(MAKE) install-kind
@$(MAKE) create-cluster
@$(MAKE) install-operator-framework
+ @$(MAKE) kind-preload-images
@$(MAKE) go-test-e2e
@$(MAKE) go-test-e2e-report
@@ -92,7 +96,7 @@ install-operator-framework:
go-test-e2e:
rm -rf dist-tests-e2e
mkdir dist-tests-e2e
- go test -v ./e2e-tests/... -tags e2e_tests -timeout 20m 2>&1 | tee
./dist-tests-e2e/go-test-output-e2e.txt
+ go test -v ./e2e-tests/... -tags e2e_tests -run TestQuarkusRunCommand
-timeout 20m 2>&1 | tee ./dist-tests-e2e/go-test-output-e2e.txt
.PHONY: go-test-e2e-report
go-test-e2e-report:
@@ -100,3 +104,10 @@ go-test-e2e-report:
-set-exit-code \
-in ./dist-tests-e2e/go-test-output-e2e.txt \
-out ./dist-tests-e2e/junit-report-it.xml
+
+.PHONY: kind-preload-images
+kind-preload-images:
+ @echo "Preloading kube-rbac-proxy image into kind..."
+ docker pull $(KUBE_RBAC_PROXY_SRC)
+ docker tag $(KUBE_RBAC_PROXY_SRC) $(KUBE_RBAC_PROXY_DST)
+ kind load docker-image --name $(KIND_CLUSTER) $(KUBE_RBAC_PROXY_DST)
\ No newline at end of file
diff --git
a/packages/kogito-db-migrator-tool/src/main/java/org/kie/kogito/migrator/postgresql/DBMigrator.java
b/packages/kogito-db-migrator-tool/src/main/java/org/kie/kogito/migrator/postgresql/DBMigrator.java
index 9031a4365f2..903286b86cc 100644
---
a/packages/kogito-db-migrator-tool/src/main/java/org/kie/kogito/migrator/postgresql/DBMigrator.java
+++
b/packages/kogito-db-migrator-tool/src/main/java/org/kie/kogito/migrator/postgresql/DBMigrator.java
@@ -18,7 +18,6 @@
*/
package org.kie.kogito.migrator.postgresql;
-import io.quarkus.runtime.Quarkus;
import io.quarkus.runtime.QuarkusApplication;
import io.quarkus.runtime.annotations.QuarkusMain;
import jakarta.inject.Inject;
@@ -32,11 +31,12 @@ import
org.eclipse.microprofile.config.inject.ConfigProperty;
@QuarkusMain
public class DBMigrator implements QuarkusApplication {
- private int SUCCESS_DB_MIGRATION = 0;
- private int ERR_DATA_INDEX_DB_CONN = -1;
- private int ERR_JOBS_SERVICE_DB_CONN = -2;
- private int ERR_DATA_INDEX_MIGRATION = -3;
- private int ERR_JOBS_SERVICE_MIGRATION = -4;
+ // Exit codes >= 0 please!
+ private static final int SUCCESS_DB_MIGRATION = 0;
+ private static final int ERR_DATA_INDEX_DB_CONN = 1;
+ private static final int ERR_JOBS_SERVICE_DB_CONN = 2;
+ private static final int ERR_DATA_INDEX_MIGRATION = 3;
+ private static final int ERR_JOBS_SERVICE_MIGRATION = 4;
@Inject
MigrationService service;
@@ -56,16 +56,14 @@ public class DBMigrator implements QuarkusApplication {
try {
dbConnectionChecker.checkDataIndexDBConnection();
} catch (SQLException e) {
- Log.error( "Error obtaining data index database connection.
Cannot proceed, exiting.");
- Quarkus.asyncExit(ERR_DATA_INDEX_DB_CONN);
+ Log.error( "Error obtaining data index database connection.
Cannot proceed, exiting", e);
return ERR_DATA_INDEX_DB_CONN;
}
try{
service.migrateDataIndex();
} catch ( FlywayException fe ){
- Log.error( "Error migrating data index database, flyway
service exception occured, please check logs.");
- Quarkus.asyncExit(ERR_DATA_INDEX_MIGRATION);
+ Log.error( "Error migrating data index database, flyway
service exception occurred, please check logs.", fe);
return ERR_DATA_INDEX_MIGRATION;
}
}
@@ -74,21 +72,17 @@ public class DBMigrator implements QuarkusApplication {
try {
dbConnectionChecker.checkJobsServiceDBConnection();
} catch (SQLException e) {
- Log.error( "Error obtaining jobs service database connection.
Cannot proceed, exiting.");
- Quarkus.asyncExit(ERR_JOBS_SERVICE_DB_CONN);
+ Log.error( "Error obtaining jobs service database connection.
Cannot proceed, exiting.", e);
return ERR_JOBS_SERVICE_DB_CONN;
}
try{
service.migrateJobsService();
} catch ( FlywayException fe ){
- Log.error( "Error migrating jobs service database, flyway
service exception occured, please check logs.");
- Quarkus.asyncExit(ERR_JOBS_SERVICE_MIGRATION);
+ Log.error( "Error migrating jobs service database, flyway
service exception occured, please check logs.", fe);
return ERR_JOBS_SERVICE_MIGRATION;
}
}
-
- Quarkus.asyncExit(SUCCESS_DB_MIGRATION);
return SUCCESS_DB_MIGRATION;
}
}
\ No newline at end of file
diff --git
a/packages/sonataflow-operator/config/default/manager_auth_proxy_patch.yaml
b/packages/sonataflow-operator/config/default/manager_auth_proxy_patch.yaml
index 4506f8a6d55..43eab6fef2b 100644
--- a/packages/sonataflow-operator/config/default/manager_auth_proxy_patch.yaml
+++ b/packages/sonataflow-operator/config/default/manager_auth_proxy_patch.yaml
@@ -27,10 +27,10 @@ spec:
spec:
containers:
- name: kube-rbac-proxy
- image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
+ image: quay.io/brancz/kube-rbac-proxy:v0.13.1
env:
- name: RELATED_IMAGE_KUBE_RBAC_PROXY
- value: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
+ value: quay.io/brancz/kube-rbac-proxy:v0.13.1
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"
diff --git
a/packages/sonataflow-operator/internal/controller/platform/db_migrator_job.go
b/packages/sonataflow-operator/internal/controller/platform/db_migrator_job.go
index a045197cce5..648ddc77692 100644
---
a/packages/sonataflow-operator/internal/controller/platform/db_migrator_job.go
+++
b/packages/sonataflow-operator/internal/controller/platform/db_migrator_job.go
@@ -23,9 +23,11 @@ import (
"context"
"errors"
"fmt"
+ "hash/fnv"
"strconv"
"github.com/apache/incubator-kie-tools/packages/sonataflow-operator/api/version"
+
"github.com/apache/incubator-kie-tools/packages/sonataflow-operator/utils/kubernetes"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
@@ -52,6 +54,11 @@ type QuarkusDataSource struct {
}
type DBMigratorJob struct {
+ Name string
+ Data DBMigratorJobData
+}
+
+type DBMigratorJobData struct {
MigrateDBDataIndex bool
DataIndexDataSource *QuarkusDataSource
MigrateDBJobsService bool
@@ -101,6 +108,15 @@ func getJdbcUrl(env []corev1.EnvVar) string {
return ""
}
+func getDbMigratorJobName(version string) (string, error) {
+ h := fnv.New32a()
+ _, err := h.Write([]byte(version))
+ if err != nil {
+ return "", fmt.Errorf("failed to calculate
sonataflow-db-migrator job name for version: %s, %v", version, err)
+ }
+ return fmt.Sprintf("%s-%x", dbMigrationJobName, h.Sum32()), nil
+}
+
// getQuarkusDSFromServicePersistence Returns QuarkusDataSource from service
level persistence config
func getQuarkusDSFromServicePersistence(platform
*operatorapi.SonataFlowPlatform, persistenceOptionsSpec
*operatorapi.PersistenceOptionsSpec, defaultSchemaName string)
*QuarkusDataSource {
klog.InfoS("Using service level persistence for PostgreSQL",
"defaultSchemaName", defaultSchemaName)
@@ -153,7 +169,7 @@ func getQuarkusDataSourceFromPersistence(platform
*operatorapi.SonataFlowPlatfor
return nil
}
-func NewDBMigratorJobData(ctx context.Context, client client.Client, platform
*operatorapi.SonataFlowPlatform, pshDI services.PlatformServiceHandler, pshJS
services.PlatformServiceHandler) *DBMigratorJob {
+func NewDBMigratorJobData(ctx context.Context, client client.Client, platform
*operatorapi.SonataFlowPlatform, pshDI services.PlatformServiceHandler, pshJS
services.PlatformServiceHandler) *DBMigratorJobData {
diJobsBasedDBMigration := false
jsJobsBasedDBMigration := false
@@ -177,7 +193,7 @@ func NewDBMigratorJobData(ctx context.Context, client
client.Client, platform *o
quarkusDataSourceJobService =
getQuarkusDataSourceFromPersistence(platform,
platform.Spec.Services.JobService.Persistence, pshJS.GetServiceName())
}
- return &DBMigratorJob{
+ return &DBMigratorJobData{
MigrateDBDataIndex: diJobsBasedDBMigration,
DataIndexDataSource: quarkusDataSourceDataIndex,
MigrateDBJobsService: jsJobsBasedDBMigration,
@@ -203,10 +219,34 @@ func IsJobsBasedDBMigration(platform
*operatorapi.SonataFlowPlatform, pshDI serv
}
func createOrUpdateDBMigrationJob(ctx context.Context, client client.Client,
platform *operatorapi.SonataFlowPlatform, pshDI
services.PlatformServiceHandler, pshJS services.PlatformServiceHandler)
(*DBMigratorJob, error) {
- dbMigratorJob := NewDBMigratorJobData(ctx, client, platform, pshDI,
pshJS)
+ dbMigratorJobData := NewDBMigratorJobData(ctx, client, platform, pshDI,
pshJS)
+ var dbMigratorJob *DBMigratorJob
// Invoke DB Migration only if both or either DI/JS services are
requested, in addition to DBMigrationStrategyJob
- if dbMigratorJob != nil {
+ if dbMigratorJobData != nil {
+ jobName, err :=
getDbMigratorJobName(version.GetImageTagVersion())
+ if err != nil {
+ return nil, err
+ }
+ dbMigratorJob = &DBMigratorJob{
+ Name: jobName,
+ Data: *dbMigratorJobData,
+ }
+ currentJob, err := kubernetes.FindJob(ctx, client,
platform.Namespace, jobName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to detect if the job
%s/%s already exists, %v", platform.Namespace, jobName, err)
+ }
+ if currentJob == nil {
+ // The job about to be created do not exist, we might
be in the middle of a version N to version N+1 upgrade, or the regular creation.
+ // Delete DI and JS deployments if present for safety,
we must avoid serving requests during the DB schema migration. (Unexpected
results, data, etc., might happen)
+ // Both will be recreated in upcoming recon cycle after
the job finishes. (we can keep the respective Services if already created to
keep better response time)
+ if err = kubernetes.SafeDeleteDeployment(ctx, client,
platform.Namespace, pshDI.GetServiceName()); err != nil {
+ return nil, fmt.Errorf("failed to delete DI
deployment: %s/%s, %v", platform.Namespace, pshDI.GetServiceName(), err)
+ }
+ if err = kubernetes.SafeDeleteDeployment(ctx, client,
platform.Namespace, pshJS.GetServiceName()); err != nil {
+ return nil, fmt.Errorf("failed to delete JS
deployment: %s/%s, %v", platform.Namespace, pshJS.GetServiceName(), err)
+ }
+ }
job := createJobDBMigration(platform, dbMigratorJob)
klog.V(log.I).InfoS("Starting DB Migration Job: ", "namespace",
platform.Namespace, "job", job.Name)
if err := controllerutil.SetControllerReference(platform, job,
client.Scheme()); err != nil {
@@ -266,20 +306,20 @@ func createJobDBMigration(platform
*operatorapi.SonataFlowPlatform, dbmj *DBMigr
diQuarkusDataSource := newQuarkusDataSource(nonEmptyValue,
nonEmptyValue, nonEmptyValue, nonEmptyValue, nonEmptyValue)
jsQuarkusDataSource := newQuarkusDataSource(nonEmptyValue,
nonEmptyValue, nonEmptyValue, nonEmptyValue, nonEmptyValue)
- if dbmj.MigrateDBDataIndex && dbmj.DataIndexDataSource != nil {
- diQuarkusDataSource.JdbcUrl = dbmj.DataIndexDataSource.JdbcUrl
- diQuarkusDataSource.SecretRefName =
dbmj.DataIndexDataSource.SecretRefName
- diQuarkusDataSource.SecretUserKey =
dbmj.DataIndexDataSource.SecretUserKey
- diQuarkusDataSource.SecretPasswordKey =
dbmj.DataIndexDataSource.SecretPasswordKey
- diQuarkusDataSource.Schema = dbmj.DataIndexDataSource.Schema
+ if dbmj.Data.MigrateDBDataIndex && dbmj.Data.DataIndexDataSource != nil
{
+ diQuarkusDataSource.JdbcUrl =
dbmj.Data.DataIndexDataSource.JdbcUrl
+ diQuarkusDataSource.SecretRefName =
dbmj.Data.DataIndexDataSource.SecretRefName
+ diQuarkusDataSource.SecretUserKey =
dbmj.Data.DataIndexDataSource.SecretUserKey
+ diQuarkusDataSource.SecretPasswordKey =
dbmj.Data.DataIndexDataSource.SecretPasswordKey
+ diQuarkusDataSource.Schema =
dbmj.Data.DataIndexDataSource.Schema
}
- if dbmj.MigrateDBJobsService && dbmj.JobsServiceDataSource != nil {
- jsQuarkusDataSource.JdbcUrl = dbmj.JobsServiceDataSource.JdbcUrl
- jsQuarkusDataSource.SecretRefName =
dbmj.JobsServiceDataSource.SecretRefName
- jsQuarkusDataSource.SecretUserKey =
dbmj.JobsServiceDataSource.SecretUserKey
- jsQuarkusDataSource.SecretPasswordKey =
dbmj.JobsServiceDataSource.SecretPasswordKey
- jsQuarkusDataSource.Schema = dbmj.JobsServiceDataSource.Schema
+ if dbmj.Data.MigrateDBJobsService && dbmj.Data.JobsServiceDataSource !=
nil {
+ jsQuarkusDataSource.JdbcUrl =
dbmj.Data.JobsServiceDataSource.JdbcUrl
+ jsQuarkusDataSource.SecretRefName =
dbmj.Data.JobsServiceDataSource.SecretRefName
+ jsQuarkusDataSource.SecretUserKey =
dbmj.Data.JobsServiceDataSource.SecretUserKey
+ jsQuarkusDataSource.SecretPasswordKey =
dbmj.Data.JobsServiceDataSource.SecretPasswordKey
+ jsQuarkusDataSource.Schema =
dbmj.Data.JobsServiceDataSource.Schema
}
diDBSecretRef := corev1.LocalObjectReference{
@@ -290,16 +330,16 @@ func createJobDBMigration(platform
*operatorapi.SonataFlowPlatform, dbmj *DBMigr
Name: jsQuarkusDataSource.SecretRefName,
}
- dbMigrationJobCfg := newDBMigrationJobCfg()
+ dbMigrationJobCfg := newDBMigrationJobCfg(dbmj.Name)
lbl, _ := getServicesLabelsMap(platform.Name, platform.Namespace,
fmt.Sprintf("%s-%s", "sonataflow-db-job", dbMigrationJobCfg.JobName),
dbMigrationJobCfg.JobName, fmt.Sprintf("%s-%s", platform.Name,
dbMigrationJobCfg.JobName), platform.Name, "sonataflow-operator")
envVars := make([]corev1.EnvVar, 0)
envVars = append(envVars, corev1.EnvVar{
Name: migrateDBDataIndex,
- Value: strconv.FormatBool(dbmj.MigrateDBDataIndex),
+ Value: strconv.FormatBool(dbmj.Data.MigrateDBDataIndex),
})
- if dbmj.MigrateDBDataIndex {
+ if dbmj.Data.MigrateDBDataIndex {
envVars = append(envVars,
corev1.EnvVar{
Name: quarkusDataSourceDataIndexJdbcURL,
@@ -331,9 +371,9 @@ func createJobDBMigration(platform
*operatorapi.SonataFlowPlatform, dbmj *DBMigr
envVars = append(envVars, corev1.EnvVar{
Name: migrateDBJobsService,
- Value: strconv.FormatBool(dbmj.MigrateDBJobsService),
+ Value: strconv.FormatBool(dbmj.Data.MigrateDBJobsService),
})
- if dbmj.MigrateDBJobsService {
+ if dbmj.Data.MigrateDBJobsService {
envVars = append(envVars,
corev1.EnvVar{
Name: quarkusDataSourceJobsServiceJdbcURL,
@@ -390,7 +430,7 @@ func createJobDBMigration(platform
*operatorapi.SonataFlowPlatform, dbmj *DBMigr
// GetDBMigrationJobStatus Returns db migration job status
func (dbmj DBMigratorJob) GetDBMigrationJobStatus(ctx context.Context, client
client.Client, platform *operatorapi.SonataFlowPlatform) (*DBMigratorJobStatus,
error) {
- job, err := client.BatchV1().Jobs(platform.Namespace).Get(ctx,
dbMigrationJobName, metav1.GetOptions{})
+ job, err := client.BatchV1().Jobs(platform.Namespace).Get(ctx,
dbmj.Name, metav1.GetOptions{})
if err != nil {
klog.V(log.E).InfoS("Error getting DB migrator job while
monitoring completion: ", "error", err, "namespace", platform.Namespace, "job",
job.Name)
return nil, err
@@ -429,9 +469,9 @@ func getKogitoDBMigratorToolImageName() string {
return imgTag
}
-func newDBMigrationJobCfg() *DBMigrationJobCfg {
+func newDBMigrationJobCfg(name string) *DBMigrationJobCfg {
return &DBMigrationJobCfg{
- JobName: dbMigrationJobName,
+ JobName: name,
ContainerName: dbMigrationContainerName,
ToolImageName: getKogitoDBMigratorToolImageName(),
}
diff --git
a/packages/sonataflow-operator/internal/controller/platform/db_migrator_job_test.go
b/packages/sonataflow-operator/internal/controller/platform/db_migrator_job_test.go
index bbf44ab82f7..ff0059e0f75 100644
---
a/packages/sonataflow-operator/internal/controller/platform/db_migrator_job_test.go
+++
b/packages/sonataflow-operator/internal/controller/platform/db_migrator_job_test.go
@@ -20,8 +20,12 @@
package platform
import (
+ "fmt"
+ "hash/fnv"
"testing"
+
"github.com/apache/incubator-kie-tools/packages/sonataflow-operator/api/version"
+
"github.com/stretchr/testify/assert"
"github.com/apache/incubator-kie-tools/packages/sonataflow-operator/api/v1alpha08"
@@ -38,7 +42,6 @@ const (
DBSecretKeyRef = "dbSecretName"
UserNameKey = "postgresUserKey"
PasswordKey = "postgresPasswordKey"
- DbMigrationJobName = "sonataflow-db-migrator-job"
DbMigrationContainerName = "db-migration-container"
)
@@ -68,8 +71,14 @@ func TestDbMigratorJob(t *testing.T) {
})
t.Run("verify new new db migration job config", func(t *testing.T) {
- dbMigrationJobCfg := newDBMigrationJobCfg()
- assert.Equal(t, dbMigrationJobCfg.JobName, DbMigrationJobName)
+ jobName, err :=
getDbMigratorJobName(version.GetImageTagVersion())
+ assert.NoError(t, err)
+ versionHash := fnv.New32a()
+ _, err = versionHash.Write([]byte(version.GetImageTagVersion()))
+ assert.NoError(t, err)
+ assert.Equal(t, fmt.Sprintf("sonataflow-db-migrator-job-%x",
versionHash.Sum32()), jobName)
+ dbMigrationJobCfg := newDBMigrationJobCfg(jobName)
+ assert.Equal(t, dbMigrationJobCfg.JobName, jobName)
assert.Equal(t, dbMigrationJobCfg.ContainerName,
DbMigrationContainerName)
})
}
diff --git a/packages/sonataflow-operator/operator.yaml
b/packages/sonataflow-operator/operator.yaml
index 86f720fea0b..a9e93fa95ee 100644
--- a/packages/sonataflow-operator/operator.yaml
+++ b/packages/sonataflow-operator/operator.yaml
@@ -28495,8 +28495,8 @@ spec:
- --v=0
env:
- name: RELATED_IMAGE_KUBE_RBAC_PROXY
- value: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
- image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
+ value: quay.io/brancz/kube-rbac-proxy:v0.13.1
+ image: quay.io/brancz/kube-rbac-proxy:v0.13.1
name: kube-rbac-proxy
ports:
- containerPort: 8443
diff --git a/packages/sonataflow-operator/utils/kubernetes/autoscaling.go
b/packages/sonataflow-operator/utils/kubernetes/autoscaling.go
index d5274b26dca..fc7ce2e8533 100644
--- a/packages/sonataflow-operator/utils/kubernetes/autoscaling.go
+++ b/packages/sonataflow-operator/utils/kubernetes/autoscaling.go
@@ -86,7 +86,15 @@ func HPAIsWorking(hpa
*autoscalingv2.HorizontalPodAutoscaler) bool {
// HPAEqualsBySpec returns true if to HorizontalPodAutoscaler has the same
Spec, false in any other case.
func HPAEqualsBySpec(hpa1, hpa2 *autoscalingv2.HorizontalPodAutoscaler) bool {
- return reflect.DeepEqual(hpa1, hpa2)
+ var hpa1Spec *autoscalingv2.HorizontalPodAutoscalerSpec = nil
+ var hpa2Spec *autoscalingv2.HorizontalPodAutoscalerSpec = nil
+ if hpa1 != nil {
+ hpa1Spec = &hpa1.Spec
+ }
+ if hpa2 != nil {
+ hpa2Spec = &hpa2.Spec
+ }
+ return reflect.DeepEqual(hpa1Spec, hpa2Spec)
}
// IsHPAndTargetsAKind returns (*autoscalingv2.HorizontalPodAutoscaler, true)
if the object is a HorizontalPodAutoscaler
diff --git a/packages/sonataflow-operator/utils/kubernetes/deployment.go
b/packages/sonataflow-operator/utils/kubernetes/deployment.go
index 08c04b9612a..95b05ca30d1 100644
--- a/packages/sonataflow-operator/utils/kubernetes/deployment.go
+++ b/packages/sonataflow-operator/utils/kubernetes/deployment.go
@@ -20,16 +20,22 @@
package kubernetes
import (
+ "context"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"time"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+
"github.com/apache/incubator-kie-tools/packages/sonataflow-operator/api/metadata"
operatorapi
"github.com/apache/incubator-kie-tools/packages/sonataflow-operator/api/v1alpha08"
"github.com/apache/incubator-kie-tools/packages/sonataflow-operator/log"
@@ -216,3 +222,22 @@ func DeploymentReplicasIsGreaterThan(deployment
*appsv1.Deployment, value int32)
func DeploymentIsScaledToZero(deployment *appsv1.Deployment) bool {
return deployment.Spec.Replicas != nil && *deployment.Spec.Replicas ==
int32(0)
}
+
+// SafeDeleteDeployment deletes a potentially existing Deployment, ignoring
the not existing error.
+func SafeDeleteDeployment(ctx context.Context, c client.Client, namespace,
name string) error {
+ err := c.Delete(ctx, &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ })
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ klog.V(log.D).Infof("Deployment %s/%s was already
deleted or never existed.", namespace, name)
+ return nil
+ } else {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/packages/sonataflow-operator/utils/kubernetes/jobs.go
b/packages/sonataflow-operator/utils/kubernetes/jobs.go
new file mode 100644
index 00000000000..c86da15115b
--- /dev/null
+++ b/packages/sonataflow-operator/utils/kubernetes/jobs.go
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package kubernetes
+
+import (
+ "context"
+
+ batchv1 "k8s.io/api/batch/v1"
+ corev1 "k8s.io/api/core/v1"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// FindJob returns a Job given the namespace and name, nil if not exists.
+func FindJob(ctx context.Context, cli client.Client, namespace, name string)
(*batchv1.Job, error) {
+ job := &batchv1.Job{}
+ err := cli.Get(ctx, client.ObjectKey{
+ Namespace: namespace,
+ Name: name,
+ }, job)
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ return job, nil
+}
+
+func FindJobs(ctx context.Context, cli client.Client, namespace string)
(*batchv1.JobList, error) {
+ jobList := &batchv1.JobList{}
+ if err := cli.List(ctx, jobList, client.InNamespace(namespace)); err !=
nil {
+ return nil, err
+ }
+ return jobList, nil
+}
+
+// JobHasFinished returns a pair (bool1, bool2) indicating first if the Job
has finished, and lastly if it has finished
+// successfully.
+// bool1 == true, when finished.
+// boo2 == true, when finished successfully, false in other case.
+func JobHasFinished(job *batchv1.Job) (bool, bool) {
+ for _, c := range job.Status.Conditions {
+ if (c.Type == batchv1.JobComplete || c.Type ==
batchv1.JobFailed) && c.Status == corev1.ConditionTrue {
+ return true, c.Type == batchv1.JobComplete
+ }
+ }
+ return false, false
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]