This is an automated email from the ASF dual-hosted git repository.
jimin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-seata-k8s.git
The following commit(s) were added to refs/heads/master by this push:
new c522f45 optimize: optimize synchronizers.go (#53)
c522f45 is described below
commit c522f455adddba4854ff953ce52c2ea965c2b9bb
Author: jimin <[email protected]>
AuthorDate: Sun Jan 18 19:13:51 2026 +0800
optimize: optimize synchronizers.go (#53)
---
codecov.yml | 5 +
pkg/seata/synchronizers.go | 83 ++++--
pkg/seata/synchronizers_test.go | 564 ++++++++++++++++++----------------------
3 files changed, 321 insertions(+), 331 deletions(-)
diff --git a/codecov.yml b/codecov.yml
index 9c66d2c..e047001 100644
--- a/codecov.yml
+++ b/codecov.yml
@@ -33,6 +33,11 @@ ignore:
- "**/*_test.go"
- "**/zz_generated*.go"
- "hack"
+ - "vendor"
+ - "**/*.yaml"
+ - "**/*.yml"
+ - "**/*.md"
+ - "**/*.json"
fixes:
- "::gocov"
diff --git a/pkg/seata/synchronizers.go b/pkg/seata/synchronizers.go
index 30c165c..ba0defb 100644
--- a/pkg/seata/synchronizers.go
+++ b/pkg/seata/synchronizers.go
@@ -21,11 +21,11 @@ import (
"bytes"
"context"
"encoding/json"
- "errors"
"fmt"
"io"
"net/http"
"net/url"
+ "time"
seatav1alpha1 "github.com/apache/seata-k8s/api/v1alpha1"
"github.com/apache/seata-k8s/pkg/utils"
@@ -35,6 +35,13 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
)
+const (
+ // HTTP client timeout
+ httpClientTimeout = 30 * time.Second
+ // HTTP request timeout
+ httpRequestTimeout = 10 * time.Second
+)
+
func SyncService(curr *apiv1.Service, next *apiv1.Service) {
curr.Spec.Ports = next.Spec.Ports
}
@@ -52,62 +59,88 @@ type rspData struct {
}
func changeCluster(s *seatav1alpha1.SeataServer, i int32, username string,
password string) error {
- client := http.Client{}
+ // Create HTTP client with timeout
+ client := &http.Client{
+ Timeout: httpClientTimeout,
+ }
host := fmt.Sprintf("%s-%d.%s.%s.svc.cluster.local:%d", s.Name, i,
s.Spec.ServiceName, s.Namespace, s.Spec.Ports.ConsolePort)
+ // Step 1: Login to get token
values := map[string]string{"username": username, "password": password}
- jsonValue, _ := json.Marshal(values)
- loginUrl := fmt.Sprintf("http://%s/api/v1/auth/login", host)
- rsp, err := client.Post(loginUrl, "application/json",
bytes.NewBuffer(jsonValue))
+ jsonValue, err := json.Marshal(values)
+ if err != nil {
+ return fmt.Errorf("failed to marshal login credentials: %w",
err)
+ }
+
+ loginURL := fmt.Sprintf("http://%s/api/v1/auth/login", host)
+ ctx, cancel := context.WithTimeout(context.Background(),
httpRequestTimeout)
+ defer cancel()
+
+ req, err := http.NewRequestWithContext(ctx, "POST", loginURL,
bytes.NewBuffer(jsonValue))
+ if err != nil {
+ return fmt.Errorf("failed to create login request: %w", err)
+ }
+ req.Header.Set("Content-Type", "application/json")
+
+ rsp, err := client.Do(req)
if err != nil {
- return err
+ return fmt.Errorf("failed to send login request to %s: %w",
host, err)
}
defer rsp.Body.Close()
- d := &rspData{}
- var tokenStr string
if rsp.StatusCode != http.StatusOK {
- return errors.New("login failed")
+ return fmt.Errorf("login failed with status code %d for host
%s", rsp.StatusCode, host)
}
body, err := io.ReadAll(rsp.Body)
if err != nil {
- return err
+ return fmt.Errorf("failed to read login response: %w", err)
}
- if err = json.Unmarshal(body, &d); err != nil {
- return err
+
+ loginData := &rspData{}
+ if err = json.Unmarshal(body, loginData); err != nil {
+ return fmt.Errorf("failed to unmarshal login response: %w", err)
}
- if !d.Success {
- return errors.New(d.Message)
+ if !loginData.Success {
+ return fmt.Errorf("login failed: %s", loginData.Message)
}
- tokenStr = d.Data
+ tokenStr := loginData.Data
- targetUrl :=
fmt.Sprintf("http://%s/metadata/v1/changeCluster?raftClusterStr=%s",
+ // Step 2: Call changeCluster API
+ targetURL :=
fmt.Sprintf("http://%s/metadata/v1/changeCluster?raftClusterStr=%s",
host, url.QueryEscape(utils.ConcatRaftServerAddress(s)))
- req, _ := http.NewRequest("POST", targetUrl, nil)
+
+ ctx, cancel = context.WithTimeout(context.Background(),
httpRequestTimeout)
+ defer cancel()
+
+ req, err = http.NewRequestWithContext(ctx, "POST", targetURL, nil)
+ if err != nil {
+ return fmt.Errorf("failed to create changeCluster request: %w",
err)
+ }
req.Header.Set("Authorization", tokenStr)
+
rsp, err = client.Do(req)
if err != nil {
- return err
+ return fmt.Errorf("failed to send changeCluster request to %s:
%w", host, err)
}
defer rsp.Body.Close()
- d = &rspData{}
if rsp.StatusCode != http.StatusOK {
- return errors.New("failed to changeCluster")
+ return fmt.Errorf("changeCluster failed with status code %d for
host %s", rsp.StatusCode, host)
}
body, err = io.ReadAll(rsp.Body)
if err != nil {
- return err
+ return fmt.Errorf("failed to read changeCluster response: %w",
err)
}
- if err = json.Unmarshal(body, &d); err != nil {
- return err
+ clusterData := &rspData{}
+ if err = json.Unmarshal(body, clusterData); err != nil {
+ return fmt.Errorf("failed to unmarshal changeCluster response:
%w", err)
}
- if !d.Success {
- return errors.New(d.Message)
+ if !clusterData.Success {
+ return fmt.Errorf("changeCluster failed: %s",
clusterData.Message)
}
return nil
}
diff --git a/pkg/seata/synchronizers_test.go b/pkg/seata/synchronizers_test.go
index 0fe931e..ba028c9 100644
--- a/pkg/seata/synchronizers_test.go
+++ b/pkg/seata/synchronizers_test.go
@@ -28,387 +28,339 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-func TestSyncService(t *testing.T) {
- // Current service
- curr := &apiv1.Service{
+// Helper function: create int32 pointer
+func int32Ptr(i int32) *int32 {
+ return &i
+}
+
+// Helper function: create test Service
+func createTestService(name, namespace string, ports []apiv1.ServicePort)
*apiv1.Service {
+ return &apiv1.Service{
ObjectMeta: metav1.ObjectMeta{
- Name: "test-service",
- Namespace: "default",
+ Name: name,
+ Namespace: namespace,
},
Spec: apiv1.ServiceSpec{
- Ports: []apiv1.ServicePort{
- {Name: "old-port", Port: 8080},
- },
+ Ports: ports,
},
}
-
- // Next/desired service
- next := &apiv1.Service{
- Spec: apiv1.ServiceSpec{
- Ports: []apiv1.ServicePort{
- {Name: "service-port", Port: 8091},
- {Name: "console-port", Port: 7091},
- {Name: "raft-port", Port: 9091},
- },
- },
- }
-
- SyncService(curr, next)
-
- // Verify ports are synced
- if len(curr.Spec.Ports) != 3 {
- t.Errorf("Expected 3 ports after sync, got %d",
len(curr.Spec.Ports))
- }
-
- portMap := make(map[string]int32)
- for _, port := range curr.Spec.Ports {
- portMap[port.Name] = port.Port
- }
-
- if portMap["service-port"] != 8091 {
- t.Errorf("Expected service-port 8091, got %d",
portMap["service-port"])
- }
- if portMap["console-port"] != 7091 {
- t.Errorf("Expected console-port 7091, got %d",
portMap["console-port"])
- }
- if portMap["raft-port"] != 9091 {
- t.Errorf("Expected raft-port 9091, got %d",
portMap["raft-port"])
- }
}
-func TestSyncStatefulSet(t *testing.T) {
- replicas1 := int32(1)
- replicas3 := int32(3)
-
- // Current StatefulSet
- curr := &appsv1.StatefulSet{
+// Helper function: create test StatefulSet
+func createTestStatefulSet(name, namespace string, replicas *int32, containers
[]apiv1.Container, labels map[string]string) *appsv1.StatefulSet {
+ return &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
- Name: "test-sts",
- Namespace: "default",
+ Name: name,
+ Namespace: namespace,
},
Spec: appsv1.StatefulSetSpec{
- Replicas: &replicas1,
- Template: apiv1.PodTemplateSpec{
- Spec: apiv1.PodSpec{
- Containers: []apiv1.Container{
- {
- Name: "old-container",
- Image: "old-image:v1",
- },
- },
- },
- },
- },
- }
-
- // Next/desired StatefulSet
- next := &appsv1.StatefulSet{
- Spec: appsv1.StatefulSetSpec{
- Replicas: &replicas3,
+ Replicas: replicas,
Template: apiv1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- "app": "seata",
- },
+ Labels: labels,
},
Spec: apiv1.PodSpec{
- Containers: []apiv1.Container{
- {
- Name: "seata-server",
- Image:
"apache/seata-server:latest",
- Resources:
apiv1.ResourceRequirements{
- Requests:
apiv1.ResourceList{
-
apiv1.ResourceCPU: resource.MustParse("500m"),
-
apiv1.ResourceMemory: resource.MustParse("1Gi"),
- },
- },
- },
- },
+ Containers: containers,
},
},
},
}
-
- SyncStatefulSet(curr, next)
-
- // Verify template is synced
- if len(curr.Spec.Template.Spec.Containers) != 1 {
- t.Errorf("Expected 1 container after sync, got %d",
len(curr.Spec.Template.Spec.Containers))
- }
-
- container := curr.Spec.Template.Spec.Containers[0]
- if container.Name != "seata-server" {
- t.Errorf("Expected container name 'seata-server', got '%s'",
container.Name)
- }
-
- if container.Image != "apache/seata-server:latest" {
- t.Errorf("Expected image 'apache/seata-server:latest', got
'%s'", container.Image)
- }
-
- // Verify replicas are synced
- if *curr.Spec.Replicas != 3 {
- t.Errorf("Expected 3 replicas after sync, got %d",
*curr.Spec.Replicas)
- }
-
- // Verify labels are synced
- if curr.Spec.Template.Labels["app"] != "seata" {
- t.Errorf("Expected template label app='seata', got '%s'",
curr.Spec.Template.Labels["app"])
- }
}
-func TestSyncStatefulSet_EmptyReplicas(t *testing.T) {
- replicas1 := int32(1)
-
- // Current StatefulSet
- curr := &appsv1.StatefulSet{
- Spec: appsv1.StatefulSetSpec{
- Replicas: nil,
- Template: apiv1.PodTemplateSpec{
- Spec: apiv1.PodSpec{
- Containers: []apiv1.Container{
- {Name: "old", Image: "old:v1"},
- },
- },
+func TestSyncService(t *testing.T) {
+ tests := []struct {
+ name string
+ currentPorts []apiv1.ServicePort
+ desiredPorts []apiv1.ServicePort
+ expectedPorts map[string]int32
+ }{
+ {
+ name: "sync multiple ports",
+ currentPorts: []apiv1.ServicePort{
+ {Name: "old-port", Port: 8080},
+ },
+ desiredPorts: []apiv1.ServicePort{
+ {Name: "service-port", Port: 8091},
+ {Name: "console-port", Port: 7091},
+ {Name: "raft-port", Port: 9091},
+ },
+ expectedPorts: map[string]int32{
+ "service-port": 8091,
+ "console-port": 7091,
+ "raft-port": 9091,
},
},
- }
-
- // Next StatefulSet
- next := &appsv1.StatefulSet{
- Spec: appsv1.StatefulSetSpec{
- Replicas: &replicas1,
- Template: apiv1.PodTemplateSpec{
- Spec: apiv1.PodSpec{
- Containers: []apiv1.Container{
- {Name: "new", Image: "new:v2"},
- },
- },
+ {
+ name: "sync single port",
+ currentPorts: []apiv1.ServicePort{
+ {Name: "old-port", Port: 8080},
+ {Name: "another-port", Port: 9090},
+ },
+ desiredPorts: []apiv1.ServicePort{
+ {Name: "new-port", Port: 8091},
+ },
+ expectedPorts: map[string]int32{
+ "new-port": 8091,
},
},
- }
-
- SyncStatefulSet(curr, next)
-
- if curr.Spec.Replicas == nil {
- t.Error("Expected replicas to be set")
- } else if *curr.Spec.Replicas != 1 {
- t.Errorf("Expected 1 replica, got %d", *curr.Spec.Replicas)
- }
-}
-
-func TestSyncService_EmptyPorts(t *testing.T) {
- curr := &apiv1.Service{
- Spec: apiv1.ServiceSpec{
- Ports: []apiv1.ServicePort{
+ {
+ name: "sync empty ports",
+ currentPorts: []apiv1.ServicePort{
{Name: "port1", Port: 8080},
{Name: "port2", Port: 9090},
},
+ desiredPorts: []apiv1.ServicePort{},
+ expectedPorts: map[string]int32{},
},
}
- next := &apiv1.Service{
- Spec: apiv1.ServiceSpec{
- Ports: []apiv1.ServicePort{},
- },
- }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ curr := createTestService("test-service", "default",
tt.currentPorts)
+ next := &apiv1.Service{
+ Spec: apiv1.ServiceSpec{
+ Ports: tt.desiredPorts,
+ },
+ }
- SyncService(curr, next)
+ SyncService(curr, next)
- if len(curr.Spec.Ports) != 0 {
- t.Errorf("Expected 0 ports after sync, got %d",
len(curr.Spec.Ports))
- }
-}
+ if len(curr.Spec.Ports) != len(tt.expectedPorts) {
+ t.Errorf("Expected %d ports after sync, got
%d", len(tt.expectedPorts), len(curr.Spec.Ports))
+ }
-func TestSyncStatefulSet_WithMultipleContainers(t *testing.T) {
- replicas2 := int32(2)
+ portMap := make(map[string]int32)
+ for _, port := range curr.Spec.Ports {
+ portMap[port.Name] = port.Port
+ }
- curr := &appsv1.StatefulSet{
- Spec: appsv1.StatefulSetSpec{
- Replicas: &replicas2,
- Template: apiv1.PodTemplateSpec{
- Spec: apiv1.PodSpec{
- Containers: []apiv1.Container{
- {Name: "container1", Image:
"image1:v1"},
- },
- },
- },
- },
+ for name, expectedPort := range tt.expectedPorts {
+ if actualPort, exists := portMap[name]; !exists
{
+ t.Errorf("Expected port %s to exist",
name)
+ } else if actualPort != expectedPort {
+ t.Errorf("Expected port %s to be %d,
got %d", name, expectedPort, actualPort)
+ }
+ }
+ })
}
+}
- next := &appsv1.StatefulSet{
- Spec: appsv1.StatefulSetSpec{
- Replicas: &replicas2,
- Template: apiv1.PodTemplateSpec{
- Spec: apiv1.PodSpec{
- Containers: []apiv1.Container{
- {Name: "container1", Image:
"image1:v2"},
- {Name: "sidecar", Image:
"sidecar:latest"},
+func TestSyncStatefulSet(t *testing.T) {
+ tests := []struct {
+ name string
+ currentReplicas *int32
+ currentContainers []apiv1.Container
+ currentLabels map[string]string
+ desiredReplicas *int32
+ desiredContainers []apiv1.Container
+ desiredLabels map[string]string
+ expectedReplicas int32
+ expectedContainers int
+ expectedLabels map[string]string
+ }{
+ {
+ name: "sync template and replicas",
+ currentReplicas: int32Ptr(1),
+ currentContainers: []apiv1.Container{
+ {Name: "old-container", Image: "old-image:v1"},
+ },
+ currentLabels: nil,
+ desiredReplicas: int32Ptr(3),
+ desiredContainers: []apiv1.Container{
+ {
+ Name: "seata-server",
+ Image: "apache/seata-server:latest",
+ Resources: apiv1.ResourceRequirements{
+ Requests: apiv1.ResourceList{
+ apiv1.ResourceCPU:
resource.MustParse("500m"),
+ apiv1.ResourceMemory:
resource.MustParse("1Gi"),
+ },
},
},
},
+ desiredLabels: map[string]string{"app": "seata"},
+ expectedReplicas: 3,
+ expectedContainers: 1,
+ expectedLabels: map[string]string{"app": "seata"},
},
- }
-
- SyncStatefulSet(curr, next)
-
- if len(curr.Spec.Template.Spec.Containers) != 2 {
- t.Errorf("Expected 2 containers after sync, got %d",
len(curr.Spec.Template.Spec.Containers))
- }
-
- if curr.Spec.Template.Spec.Containers[0].Image != "image1:v2" {
- t.Errorf("Expected first container image 'image1:v2', got
'%s'", curr.Spec.Template.Spec.Containers[0].Image)
- }
-
- if curr.Spec.Template.Spec.Containers[1].Name != "sidecar" {
- t.Errorf("Expected second container name 'sidecar', got '%s'",
curr.Spec.Template.Spec.Containers[1].Name)
- }
-}
-
-func TestSyncService_WithSinglePort(t *testing.T) {
- curr := &apiv1.Service{
- Spec: apiv1.ServiceSpec{
- Ports: []apiv1.ServicePort{
- {Name: "old-port", Port: 8080},
- {Name: "another-port", Port: 9090},
+ {
+ name: "set replicas when nil",
+ currentReplicas: nil,
+ currentContainers: []apiv1.Container{
+ {Name: "old", Image: "old:v1"},
+ },
+ desiredReplicas: int32Ptr(1),
+ desiredContainers: []apiv1.Container{
+ {Name: "new", Image: "new:v2"},
},
+ expectedReplicas: 1,
+ expectedContainers: 1,
},
- }
-
- next := &apiv1.Service{
- Spec: apiv1.ServiceSpec{
- Ports: []apiv1.ServicePort{
- {Name: "new-port", Port: 8091},
+ {
+ name: "sync multiple containers",
+ currentReplicas: int32Ptr(2),
+ currentContainers: []apiv1.Container{
+ {Name: "container1", Image: "image1:v1"},
+ },
+ desiredReplicas: int32Ptr(2),
+ desiredContainers: []apiv1.Container{
+ {Name: "container1", Image: "image1:v2"},
+ {Name: "sidecar", Image: "sidecar:latest"},
},
+ expectedReplicas: 2,
+ expectedContainers: 2,
},
- }
-
- SyncService(curr, next)
-
- if len(curr.Spec.Ports) != 1 {
- t.Errorf("Expected 1 port after sync, got %d",
len(curr.Spec.Ports))
- }
-
- if curr.Spec.Ports[0].Name != "new-port" {
- t.Errorf("Expected port name 'new-port', got '%s'",
curr.Spec.Ports[0].Name)
- }
-
- if curr.Spec.Ports[0].Port != 8091 {
- t.Errorf("Expected port 8091, got %d", curr.Spec.Ports[0].Port)
- }
-}
-
-func TestSyncStatefulSet_ReplicasScaleUp(t *testing.T) {
- replicas1 := int32(1)
- replicas5 := int32(5)
-
- curr := &appsv1.StatefulSet{
- Spec: appsv1.StatefulSetSpec{
- Replicas: &replicas1,
- Template: apiv1.PodTemplateSpec{
- Spec: apiv1.PodSpec{
- Containers: []apiv1.Container{
- {Name: "app", Image: "app:v1"},
- },
- },
+ {
+ name: "scale up replicas",
+ currentReplicas: int32Ptr(1),
+ currentContainers: []apiv1.Container{
+ {Name: "app", Image: "app:v1"},
},
+ desiredReplicas: int32Ptr(5),
+ desiredContainers: []apiv1.Container{
+ {Name: "app", Image: "app:v1"},
+ },
+ expectedReplicas: 5,
+ expectedContainers: 1,
},
- }
-
- next := &appsv1.StatefulSet{
- Spec: appsv1.StatefulSetSpec{
- Replicas: &replicas5,
- Template: apiv1.PodTemplateSpec{
- Spec: apiv1.PodSpec{
- Containers: []apiv1.Container{
- {Name: "app", Image: "app:v1"},
- },
- },
+ {
+ name: "scale down replicas",
+ currentReplicas: int32Ptr(5),
+ currentContainers: []apiv1.Container{
+ {Name: "app", Image: "app:v1"},
},
+ desiredReplicas: int32Ptr(2),
+ desiredContainers: []apiv1.Container{
+ {Name: "app", Image: "app:v1"},
+ },
+ expectedReplicas: 2,
+ expectedContainers: 1,
},
}
- SyncStatefulSet(curr, next)
-
- if *curr.Spec.Replicas != 5 {
- t.Errorf("Expected 5 replicas after scale up, got %d",
*curr.Spec.Replicas)
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ curr := createTestStatefulSet("test-sts", "default",
tt.currentReplicas, tt.currentContainers, tt.currentLabels)
+ next := createTestStatefulSet("", "",
tt.desiredReplicas, tt.desiredContainers, tt.desiredLabels)
+
+ SyncStatefulSet(curr, next)
+
+ // Verify replicas
+ if curr.Spec.Replicas == nil {
+ t.Fatal("Expected replicas to be set")
+ }
+ if *curr.Spec.Replicas != tt.expectedReplicas {
+ t.Errorf("Expected %d replicas, got %d",
tt.expectedReplicas, *curr.Spec.Replicas)
+ }
+
+ // Verify container count
+ if len(curr.Spec.Template.Spec.Containers) !=
tt.expectedContainers {
+ t.Errorf("Expected %d containers, got %d",
tt.expectedContainers, len(curr.Spec.Template.Spec.Containers))
+ }
+
+ // Verify labels if set
+ if tt.expectedLabels != nil {
+ for key, expectedValue := range
tt.expectedLabels {
+ if actualValue :=
curr.Spec.Template.Labels[key]; actualValue != expectedValue {
+ t.Errorf("Expected label
%s='%s', got '%s'", key, expectedValue, actualValue)
+ }
+ }
+ }
+ })
}
}
-func TestSyncStatefulSet_ReplicasScaleDown(t *testing.T) {
- replicas5 := int32(5)
- replicas2 := int32(2)
-
- curr := &appsv1.StatefulSet{
- Spec: appsv1.StatefulSetSpec{
- Replicas: &replicas5,
- Template: apiv1.PodTemplateSpec{
- Spec: apiv1.PodSpec{
- Containers: []apiv1.Container{
- {Name: "app", Image: "app:v1"},
- },
- },
- },
+func TestChangeCluster_ErrorHandling(t *testing.T) {
+ // Test error handling of changeCluster
+ // Without a real Seata server, this will trigger error path
+ tests := []struct {
+ name string
+ username string
+ password string
+ index int32
+ }{
+ {
+ name: "empty credentials",
+ username: "",
+ password: "",
+ index: 0,
+ },
+ {
+ name: "with credentials",
+ username: "admin",
+ password: "admin",
+ index: 0,
},
}
- next := &appsv1.StatefulSet{
- Spec: appsv1.StatefulSetSpec{
- Replicas: &replicas2,
- Template: apiv1.PodTemplateSpec{
- Spec: apiv1.PodSpec{
- Containers: []apiv1.Container{
- {Name: "app", Image: "app:v1"},
- },
- },
- },
- },
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ seataServer := createTestSeataServer("test-seata",
"default", 3)
+ err := changeCluster(seataServer, tt.index,
tt.username, tt.password)
+ if err == nil {
+ t.Log("changeCluster returned no error
(expected as there is no real server)")
+ } else {
+ t.Logf("changeCluster returned expected error:
%v", err)
+ }
+ })
}
+}
- SyncStatefulSet(curr, next)
+func TestSyncRaftCluster_ErrorHandling(t *testing.T) {
+ // Test error handling of SyncRaftCluster
+ // Without a real Seata server, this will test error path
+ ctx := context.Background()
+ seataServer := createTestSeataServer("test-seata", "default", 3)
- if *curr.Spec.Replicas != 2 {
- t.Errorf("Expected 2 replicas after scale down, got %d",
*curr.Spec.Replicas)
+ err := SyncRaftCluster(ctx, seataServer, "admin", "admin")
+ if err != nil {
+ t.Logf("SyncRaftCluster returned expected error (no real
server): %v", err)
}
}
-func TestChangeCluster_LoginSuccess(t *testing.T) {
- // This test demonstrates how to test HTTP-dependent functions
- // In a real scenario, we would need httptest server
- // For now, we test the error handling paths that don't require actual
HTTP calls
+// Boundary condition test: nil input
+func TestSyncService_NilInput(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Expected panic captured: %v", r)
+ }
+ }()
- seatav1alpha1 := createTestSeataServer()
+ // Test boundary case of nil input
+ var curr *apiv1.Service
+ next := &apiv1.Service{}
- // Test with invalid username/password to trigger error path
- err := changeCluster(seatav1alpha1, 0, "", "")
- if err == nil {
- t.Log("changeCluster returned no error (expected due to no real
server)")
- } else {
- t.Logf("changeCluster returned expected error: %v", err)
- }
+ // This should panic because curr is nil
+ SyncService(curr, next)
+ t.Error("Expected panic did not occur")
}
-func TestSyncRaftCluster_ErrorHandling(t *testing.T) {
- // Test SyncRaftCluster error handling
- // Without a real Seata server, this will error, which tests the error
path
+func TestSyncStatefulSet_NilInput(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Expected panic captured: %v", r)
+ }
+ }()
- ctx := context.Background()
- seatav1alpha1 := createTestSeataServer()
+ // Test boundary case of nil input
+ var curr *appsv1.StatefulSet
+ next := &appsv1.StatefulSet{}
- err := SyncRaftCluster(ctx, seatav1alpha1, "admin", "admin")
- if err != nil {
- t.Logf("SyncRaftCluster returned expected error without real
server: %v", err)
- }
+ // This should panic because curr is nil
+ SyncStatefulSet(curr, next)
+ t.Error("Expected panic did not occur")
}
-func createTestSeataServer() *seatav1alpha1.SeataServer {
+// Helper function: create test SeataServer with custom parameters
+func createTestSeataServer(name, namespace string, replicas int32)
*seatav1alpha1.SeataServer {
return &seatav1alpha1.SeataServer{
ObjectMeta: metav1.ObjectMeta{
- Name: "test-seata",
- Namespace: "default",
+ Name: name,
+ Namespace: namespace,
},
Spec: seatav1alpha1.SeataServerSpec{
ServiceName: "seata-server",
- Replicas: 3,
+ Replicas: replicas,
Ports: seatav1alpha1.Ports{
ServicePort: 8091,
ConsolePort: 7091,
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]