This is an automated email from the ASF dual-hosted git repository.
miaoliyao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/shardingsphere-on-cloud.git
The following commit(s) were added to refs/heads/main by this push:
new 8c88293 refactor(storage-node): refactor delete storage node logic
new a26e3e5 Merge pull request #359 from Xu-Wentao/storage-node
8c88293 is described below
commit 8c88293eaca44bc7a4664625bf26e4784a219ef2
Author: xuwentao <[email protected]>
AuthorDate: Wed May 10 19:34:01 2023 +0800
refactor(storage-node): refactor delete storage node logic
---
.../api/v1alpha1/storage_node_types.go | 4 +-
.../cmd/shardingsphere-operator/manager/option.go | 12 +-
.../controllers/storage_ndoe_controller_test.go | 327 ++++++++++++++-------
.../pkg/controllers/storage_node_controller.go | 237 ++++++++-------
.../pkg/reconcile/storagenode/aws/rdsinstance.go | 4 +-
.../test/e2e/storage_node_controller_test.go | 84 ++----
6 files changed, 373 insertions(+), 295 deletions(-)
diff --git a/shardingsphere-operator/api/v1alpha1/storage_node_types.go
b/shardingsphere-operator/api/v1alpha1/storage_node_types.go
index 7aecd86..d193026 100644
--- a/shardingsphere-operator/api/v1alpha1/storage_node_types.go
+++ b/shardingsphere-operator/api/v1alpha1/storage_node_types.go
@@ -140,7 +140,7 @@ type StorageNodeStatus struct {
const (
StorageNodeInstanceStatusAvailable = "available"
- StorageNodeInstanceStatusBackingup = "backingup"
+ StorageNodeInstanceStatusBackingUp = "backing-up"
StorageNodeInstanceStatusCreating = "creating"
StorageNodeInstanceStatusDeleting = "deleting"
StorageNodeInstanceStatusFailed = "failed"
@@ -150,6 +150,8 @@ const (
StorageNodeInstanceStatusStarting = "starting"
StorageNodeInstanceStatusStopped = "stopped"
StorageNodeInstanceStatusStopping = "stopping"
+
+ StorageNodeInstanceStatusReady = "Ready"
)
// AddCondition adds the given condition to the StorageNodeConditions.
diff --git
a/shardingsphere-operator/cmd/shardingsphere-operator/manager/option.go
b/shardingsphere-operator/cmd/shardingsphere-operator/manager/option.go
index ab69d45..d4ba29e 100644
--- a/shardingsphere-operator/cmd/shardingsphere-operator/manager/option.go
+++ b/shardingsphere-operator/cmd/shardingsphere-operator/manager/option.go
@@ -35,12 +35,10 @@ import (
dbmeshv1alpha1
"github.com/database-mesh/golang-sdk/kubernetes/api/v1alpha1"
"go.uber.org/zap/zapcore"
batchV1 "k8s.io/api/batch/v1"
- corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
- "k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
@@ -146,19 +144,11 @@ var featureGatesHandlers = map[string]FeatureGateHandler{
return nil
},
"StorageNode": func(mgr manager.Manager) error {
- eventBroadcaster := record.NewBroadcaster()
- recorder := eventBroadcaster.NewRecorder(
- mgr.GetScheme(),
- corev1.EventSource{
- Component:
controllers.StorageNodeControllerName,
- },
- )
-
reconciler := &controllers.StorageNodeReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: mgr.GetLogger(),
- Recorder: recorder,
+ Recorder:
mgr.GetEventRecorderFor(controllers.StorageNodeControllerName),
}
// init aws client if aws credentials are provided
diff --git
a/shardingsphere-operator/pkg/controllers/storage_ndoe_controller_test.go
b/shardingsphere-operator/pkg/controllers/storage_ndoe_controller_test.go
index e325dc6..3015dde 100644
--- a/shardingsphere-operator/pkg/controllers/storage_ndoe_controller_test.go
+++ b/shardingsphere-operator/pkg/controllers/storage_ndoe_controller_test.go
@@ -19,9 +19,12 @@ package controllers
import (
"context"
+ "time"
+
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/reconcile/storagenode/aws"
mock_aws
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/reconcile/storagenode/aws/mocks"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
"bou.ke/monkey"
dbmesh_aws "github.com/database-mesh/golang-sdk/aws"
@@ -30,7 +33,6 @@ import (
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
- corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
@@ -41,49 +43,108 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log/zap"
)
-var ctx = context.Background()
+const (
+ defaultTestNamespace = "test-namespace"
+ defaultTestDBClass = "test-database-class"
+ defaultTestStorageNode = "test-storage-node"
+ defaultTestInstanceIdentifier = "test-database-instance"
+)
+
+var (
+ ctx = context.Background()
+ fakeClient client.Client
+ reconciler *StorageNodeReconciler
+ mockCtrl *gomock.Controller
+ mockAws *mock_aws.MockIRdsClient
+)
+
+func fakeStorageNodeReconciler() {
+ logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
+
+ scheme := runtime.NewScheme()
+ Expect(dbmeshv1alpha1.AddToScheme(scheme)).To(Succeed())
+ Expect(v1alpha1.AddToScheme(scheme)).To(Succeed())
+ fakeClient = fake.NewClientBuilder().WithScheme(scheme).Build()
+
+ sess := dbmesh_aws.NewSessions().SetCredential("AwsRegion",
"AwsAccessKeyID", "AwsSecretAccessKey").Build()
+ reconciler = &StorageNodeReconciler{
+ Client: fakeClient,
+ Log: logf.Log,
+ Recorder: record.NewFakeRecorder(100),
+ AwsRDS: dbmesh_rds.NewService(sess["AwsRegion"]),
+ }
+}
+
+var _ = BeforeEach(func() {
+ fakeStorageNodeReconciler()
+})
var _ = Describe("StorageNode Controller Mock Test", func() {
- var fakeClient client.Client
- var reconciler *StorageNodeReconciler
BeforeEach(func() {
- logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter),
zap.UseDevMode(true)))
-
- scheme := runtime.NewScheme()
- Expect(dbmeshv1alpha1.AddToScheme(scheme)).To(Succeed())
- Expect(v1alpha1.AddToScheme(scheme)).To(Succeed())
- fakeClient = fake.NewClientBuilder().WithScheme(scheme).Build()
-
- eventBroadcaster := record.NewBroadcaster()
- recorder := eventBroadcaster.NewRecorder(
- scheme,
- corev1.EventSource{
- Component: "test-storage-node-controller",
+ // mock aws rds client
+ mockCtrl = gomock.NewController(GinkgoT())
+ mockAws = mock_aws.NewMockIRdsClient(mockCtrl)
+
+ monkey.Patch(aws.NewRdsClient, func(rds dbmesh_rds.RDS)
aws.IRdsClient {
+ return mockAws
+ })
+
+ // create default resource
+ dbClass := &dbmeshv1alpha1.DatabaseClass{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: defaultTestDBClass,
},
- )
+ Spec: dbmeshv1alpha1.DatabaseClassSpec{
+ Provisioner:
dbmeshv1alpha1.ProvisionerAWSRDSInstance,
+ },
+ }
- sess := dbmesh_aws.NewSessions().SetCredential("AwsRegion",
"AwsAccessKeyID", "AwsSecretAccessKey").Build()
- reconciler = &StorageNodeReconciler{
- Client: fakeClient,
- Log: logf.Log,
- Recorder: recorder,
- AwsRDS: dbmesh_rds.NewService(sess["AwsRegion"]),
+ storageNode := &v1alpha1.StorageNode{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: defaultTestStorageNode,
+ Namespace: defaultTestNamespace,
+ Annotations: map[string]string{
+
dbmeshv1alpha1.AnnotationsInstanceIdentifier: defaultTestInstanceIdentifier,
+ },
+ },
+ Spec: v1alpha1.StorageNodeSpec{
+ DatabaseClassName: defaultTestDBClass,
+ },
}
+ Expect(fakeClient.Create(ctx, dbClass)).Should(Succeed())
+ Expect(fakeClient.Create(ctx, storageNode)).Should(Succeed())
+ })
+
+ AfterEach(func() {
+ // delete default resource
+ Expect(fakeClient.Delete(ctx, &v1alpha1.StorageNode{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: defaultTestStorageNode,
+ Namespace: defaultTestNamespace,
+ },
+ })).Should(Succeed())
+ Expect(fakeClient.Delete(ctx, &dbmeshv1alpha1.DatabaseClass{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: defaultTestDBClass,
+ },
+ })).Should(Succeed())
+
+ mockCtrl.Finish()
+ monkey.UnpatchAll()
})
Context("create storage node", func() {
It("should create storage node successfully", func() {
storageNode := &v1alpha1.StorageNode{
ObjectMeta: metav1.ObjectMeta{
- Name: "test-storage-node",
- Namespace: "test-namespace",
+ Name: "test-storage-node-1",
+ Namespace: defaultTestNamespace,
},
Spec: v1alpha1.StorageNodeSpec{
- DatabaseClassName:
"test-database-class",
+ DatabaseClassName: defaultTestDBClass,
},
Status: v1alpha1.StorageNodeStatus{},
}
-
Expect(fakeClient.Create(ctx,
storageNode)).Should(Succeed())
sn := &v1alpha1.StorageNode{}
Expect(fakeClient.Get(ctx, client.ObjectKey{Name:
"test-storage-node", Namespace: "test-namespace"}, sn)).Should(Succeed())
@@ -95,8 +156,8 @@ var _ = Describe("StorageNode Controller Mock Test", func() {
It("should create storage node successfully", func() {
storageNode := &v1alpha1.StorageNode{
ObjectMeta: metav1.ObjectMeta{
- Name: "test-storage-node",
- Namespace: "test-namespace",
+ Name: "test-storage-node-2",
+ Namespace: defaultTestNamespace,
},
Spec: v1alpha1.StorageNodeSpec{
DatabaseClassName: "no-database",
@@ -106,8 +167,8 @@ var _ = Describe("StorageNode Controller Mock Test", func()
{
Expect(fakeClient.Create(ctx,
storageNode)).Should(Succeed())
req := ctrl.Request{
NamespacedName: client.ObjectKey{
- Name: "test-storage-node",
- Namespace: "test-namespace",
+ Name: "test-storage-node-2",
+ Namespace: defaultTestNamespace,
},
}
_, err := reconciler.Reconcile(ctx, req)
@@ -116,60 +177,17 @@ var _ = Describe("StorageNode Controller Mock Test",
func() {
})
})
- Context("reconcile storageNode with exist databaseClass", func() {
- var mockCtrl *gomock.Controller
- var mockAws *mock_aws.MockIRdsClient
- BeforeEach(func() {
- mockCtrl = gomock.NewController(GinkgoT())
- mockAws = mock_aws.NewMockIRdsClient(mockCtrl)
-
- monkey.Patch(aws.NewRdsClient, func(rds dbmesh_rds.RDS)
aws.IRdsClient {
- return mockAws
- })
-
- // create databaseClass
- dbClass := &dbmeshv1alpha1.DatabaseClass{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-database-class",
- },
- Spec: dbmeshv1alpha1.DatabaseClassSpec{
- Provisioner:
dbmeshv1alpha1.ProvisionerAWSRDSInstance,
- },
- }
- Expect(fakeClient.Create(ctx,
dbClass)).Should(Succeed())
-
- // create storageNode
- storageNode := &v1alpha1.StorageNode{
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-storage-node",
- Namespace: "test-namespace",
- Annotations: map[string]string{
-
dbmeshv1alpha1.AnnotationsInstanceIdentifier: "test-instance",
- },
- },
- Spec: v1alpha1.StorageNodeSpec{
- DatabaseClassName:
"test-database-class",
- },
- }
-
- Expect(fakeClient.Create(ctx,
storageNode)).Should(Succeed())
- })
-
- AfterEach(func() {
- mockCtrl.Finish()
- monkey.UnpatchAll()
- })
-
+ Context("reconcile storageNode", func() {
It("should reconcile successfully with Creating Instance",
func() {
req := ctrl.Request{
NamespacedName: client.ObjectKey{
- Name: "test-storage-node",
- Namespace: "test-namespace",
+ Name: defaultTestStorageNode,
+ Namespace: defaultTestNamespace,
},
}
rdsInstance := &dbmesh_rds.DescInstance{
- DBInstanceStatus: "creating",
+ DBInstanceStatus:
v1alpha1.StorageNodeInstanceStatusCreating,
Endpoint: dbmesh_rds.Endpoint{
Address: "127.0.0.1",
Port: 3306,
@@ -186,19 +204,19 @@ var _ = Describe("StorageNode Controller Mock Test",
func() {
Expect(fakeClient.Get(ctx, client.ObjectKey{Name:
"test-storage-node", Namespace: "test-namespace"}, newSN)).Should(Succeed())
Expect(newSN.Status.Phase).To(Equal(v1alpha1.StorageNodePhaseNotReady))
Expect(newSN.Status.Instances).To(HaveLen(1))
-
Expect(newSN.Status.Instances[0].Status).To(Equal("creating"))
+
Expect(newSN.Status.Instances[0].Status).To(Equal(v1alpha1.StorageNodeInstanceStatusCreating))
})
It("should reconcile successfully with Available Instance",
func() {
req := ctrl.Request{
NamespacedName: client.ObjectKey{
- Name: "test-storage-node",
- Namespace: "test-namespace",
+ Name: defaultTestStorageNode,
+ Namespace: defaultTestNamespace,
},
}
rdsInstance := &dbmesh_rds.DescInstance{
- DBInstanceStatus: "available",
+ DBInstanceStatus:
v1alpha1.StorageNodeInstanceStatusAvailable,
Endpoint: dbmesh_rds.Endpoint{
Address: "127.0.0.1",
Port: 3306,
@@ -211,44 +229,155 @@ var _ = Describe("StorageNode Controller Mock Test",
func() {
Expect(err).To(BeNil())
newSN := &v1alpha1.StorageNode{}
- Expect(fakeClient.Get(ctx, client.ObjectKey{Name:
"test-storage-node", Namespace: "test-namespace"}, newSN)).Should(Succeed())
+ Expect(fakeClient.Get(ctx, client.ObjectKey{Name:
defaultTestStorageNode, Namespace: defaultTestNamespace},
newSN)).Should(Succeed())
Expect(newSN.Status.Phase).To(Equal(v1alpha1.StorageNodePhaseReady))
Expect(newSN.Status.Instances).To(HaveLen(1))
-
Expect(newSN.Status.Instances[0].Status).To(Equal("Ready"))
+
Expect(newSN.Status.Instances[0].Status).To(Equal(v1alpha1.StorageNodeInstanceStatusReady))
})
+ })
- It("should reconcile successfully when storage node be
deleted", func() {
- req := ctrl.Request{
- NamespacedName: client.ObjectKey{
- Name: "test-storage-node",
- Namespace: "test-namespace",
+ Context("reconcile storage node in Ready status when it's been
deleted", func() {
+ var (
+ rdsInstanceAvailable = dbmesh_rds.DescInstance{
+ DBInstanceIdentifier:
defaultTestInstanceIdentifier,
+ DBInstanceStatus:
v1alpha1.StorageNodeInstanceStatusAvailable,
+ Endpoint: dbmesh_rds.Endpoint{
+ Address: "127.0.0.1",
+ Port: 3306,
},
}
-
- rdsInstance := &dbmesh_rds.DescInstance{
- DBInstanceStatus: "available",
+ instanceInDeleting = dbmesh_rds.DescInstance{
+ DBInstanceIdentifier:
defaultTestInstanceIdentifier,
+ DBInstanceStatus:
v1alpha1.StorageNodeInstanceStatusDeleting,
Endpoint: dbmesh_rds.Endpoint{
Address: "127.0.0.1",
Port: 3306,
},
}
-
- // mock aws rds client, get instance
- mockAws.EXPECT().GetInstance(gomock.Any(),
gomock.Any()).Return(rdsInstance, nil).AnyTimes()
+ )
+ It("should be successful when instance is in available status",
func() {
+ deletingStorageNode := "test-deleting-storage-node"
+ req := ctrl.Request{
+ NamespacedName: client.ObjectKey{
+ Name: deletingStorageNode,
+ Namespace: defaultTestNamespace,
+ },
+ }
+ readyStorageNode := &v1alpha1.StorageNode{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: deletingStorageNode,
+ Namespace: defaultTestNamespace,
+ Annotations: map[string]string{
+
dbmeshv1alpha1.AnnotationsInstanceIdentifier: defaultTestInstanceIdentifier,
+ },
+ },
+ Spec:
v1alpha1.StorageNodeSpec{DatabaseClassName: defaultTestDBClass},
+ }
+ Expect(fakeClient.Create(ctx,
readyStorageNode)).Should(Succeed())
+ // mock aws rds client, get instance and return
available status
+ mockAws.EXPECT().GetInstance(gomock.Any(),
gomock.Any()).Return(&rdsInstanceAvailable, nil)
// reconcile storage node, add instance and set status
to ready
_, err := reconciler.Reconcile(ctx, req)
Expect(err).To(BeNil())
// delete storage node
- sn := &v1alpha1.StorageNode{}
- Expect(fakeClient.Get(ctx, client.ObjectKey{Name:
"test-storage-node", Namespace: "test-namespace"}, sn)).Should(Succeed())
- Expect(fakeClient.Delete(ctx, sn)).Should(Succeed())
-
+ Expect(fakeClient.Delete(ctx, &v1alpha1.StorageNode{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: deletingStorageNode,
+ Namespace: defaultTestNamespace,
+ },
+ })).Should(Succeed())
// mock aws rds client, delete instance
+ mockAws.EXPECT().GetInstance(gomock.Any(),
gomock.Any()).Return(&rdsInstanceAvailable, nil)
mockAws.EXPECT().DeleteInstance(gomock.Any(),
gomock.Any(), gomock.Any()).Return(nil)
+ mockAws.EXPECT().GetInstance(gomock.Any(),
gomock.Any()).Return(&instanceInDeleting, nil)
_, err = reconciler.Reconcile(ctx, req)
Expect(err).To(BeNil())
+
+ // storage node status should be deleting
+ deletingSN := &v1alpha1.StorageNode{}
+ Expect(fakeClient.Get(ctx, client.ObjectKey{Name:
deletingStorageNode, Namespace: defaultTestNamespace},
deletingSN)).Should(Succeed())
+
Expect(deletingSN.Status.Phase).To(Equal(v1alpha1.StorageNodePhaseDeleting))
+ })
+
+ It("should be successful when instance is in deleting status",
func() {
+ deletedStorageNodeName := "test-deleted-storage-node"
+ req := ctrl.Request{
+ NamespacedName: client.ObjectKey{
+ Name: deletedStorageNodeName,
+ Namespace: defaultTestNamespace,
+ },
+ }
+ deleteTime := metav1.NewTime(time.Now())
+ storageNode := &v1alpha1.StorageNode{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: deletedStorageNodeName,
+ Namespace: defaultTestNamespace,
+ Finalizers: []string{
+ FinalizerName,
+ },
+ DeletionTimestamp: &deleteTime,
+ },
+ Spec: v1alpha1.StorageNodeSpec{
+ DatabaseClassName: defaultTestDBClass,
+ },
+ Status: v1alpha1.StorageNodeStatus{
+ Phase:
v1alpha1.StorageNodePhaseDeleting,
+ Instances: []v1alpha1.InstanceStatus{
+ {
+ Status:
v1alpha1.StorageNodeInstanceStatusDeleting,
+ Endpoint:
v1alpha1.Endpoint{
+ Address:
"127.0.0.1",
+ Port: 3306,
+ },
+ },
+ },
+ },
+ }
+ Expect(fakeClient.Create(ctx,
storageNode)).Should(Succeed())
+ // mock aws rds client, get nil instance
+ mockAws.EXPECT().GetInstance(gomock.Any(),
gomock.Any()).Return(nil, nil)
+ _, err := reconciler.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+
+ deletedCompleteSN := &v1alpha1.StorageNode{}
+ Expect(fakeClient.Get(ctx, client.ObjectKey{Name:
deletedStorageNodeName, Namespace: defaultTestNamespace},
deletedCompleteSN)).Should(Succeed())
+
Expect(deletedCompleteSN.Status.Phase).To(Equal(v1alpha1.StorageNodePhaseDeleteComplete))
+ })
+
+ It("should be successful when storage node is delete complete
status", func() {
+ deletedCompletedStorageNodeName :=
"test-delete-completed-storage-node"
+ req := ctrl.Request{
+ NamespacedName: client.ObjectKey{
+ Name:
deletedCompletedStorageNodeName,
+ Namespace: defaultTestNamespace,
+ },
+ }
+ deleteTime := metav1.NewTime(time.Now())
+ storageNode := &v1alpha1.StorageNode{
+ ObjectMeta: metav1.ObjectMeta{
+ Name:
deletedCompletedStorageNodeName,
+ Namespace: defaultTestNamespace,
+ Finalizers: []string{
+ FinalizerName,
+ },
+ DeletionTimestamp: &deleteTime,
+ },
+ Spec: v1alpha1.StorageNodeSpec{
+ DatabaseClassName: defaultTestDBClass,
+ },
+ Status: v1alpha1.StorageNodeStatus{
+ Phase:
v1alpha1.StorageNodePhaseDeleteComplete,
+ },
+ }
+ Expect(fakeClient.Create(ctx,
storageNode)).Should(Succeed())
+
+ _, err := reconciler.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ finalSN := &v1alpha1.StorageNode{}
+ err = fakeClient.Get(ctx, client.ObjectKey{Name:
deletedCompletedStorageNodeName, Namespace: defaultTestNamespace}, finalSN)
+ Expect(apierrors.IsNotFound(err)).To(BeTrue())
})
})
})
diff --git a/shardingsphere-operator/pkg/controllers/storage_node_controller.go
b/shardingsphere-operator/pkg/controllers/storage_node_controller.go
index f0e00a1..b2efb9e 100644
--- a/shardingsphere-operator/pkg/controllers/storage_node_controller.go
+++ b/shardingsphere-operator/pkg/controllers/storage_node_controller.go
@@ -22,16 +22,17 @@ import (
"fmt"
"reflect"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/reconcile/storagenode/aws"
+
"github.com/database-mesh/golang-sdk/aws/client/rds"
dbmeshv1alpha1
"github.com/database-mesh/golang-sdk/kubernetes/api/v1alpha1"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
+ "k8s.io/utils/strings/slices"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -53,49 +54,94 @@ type StorageNodeReconciler struct {
// Reconcile handles main function of this controller
// nolint:gocognit
func (r *StorageNodeReconciler) Reconcile(ctx context.Context, req
ctrl.Request) (ctrl.Result, error) {
- logger := r.Log.WithValues(StorageNodeControllerName,
req.NamespacedName)
+ r.Log.WithValues(StorageNodeControllerName, req.NamespacedName)
- logger.Info("Reconciling StorageNode")
+ r.Log.Info("Reconciling StorageNode")
+ // get storage node
node := &v1alpha1.StorageNode{}
if err := r.Get(ctx, req.NamespacedName, node); err != nil {
if client.IgnoreNotFound(err) == nil {
- logger.Info(fmt.Sprintf("StorageNode [%s:%s] is not
exist", req.Namespace, req.Name))
+ r.Log.Info(fmt.Sprintf("StorageNode [%s:%s] is not
exist", req.Namespace, req.Name))
return ctrl.Result{}, nil
}
- logger.Error(err, fmt.Sprintf("unable to fetch StorageNode
[%s:%s]", req.Namespace, req.Name))
+ r.Log.Error(err, fmt.Sprintf("unable to fetch StorageNode
[%s:%s]", req.Namespace, req.Name))
return ctrl.Result{Requeue: true}, err
}
// Get databaseClass with storageNode.Spec.DatabaseClassName
databaseClass, err := r.getDatabaseClass(ctx, node)
if err != nil {
- logger.Error(err, fmt.Sprintf("unable to fetch DatabaseClass
[%s]", node.Spec.DatabaseClassName))
+ r.Log.Error(err, fmt.Sprintf("unable to fetch DatabaseClass
[%s]", node.Spec.DatabaseClassName))
return ctrl.Result{Requeue: true}, err
}
- // TODO: when storage node needed finalized, set deletion timestamp and
set status to deleting, waiting database instance deleted. and then remove
finalizer and delete storage node.
-
// finalize storage node
- if err := r.finalize(ctx, node, databaseClass); err != nil {
- logger.Error(err, fmt.Sprintf("unable to finalize StorageNode
[%s:%s]", node.GetNamespace(), node.GetName()))
- return ctrl.Result{}, err
+ // nolint: nestif
+ if node.ObjectMeta.DeletionTimestamp.IsZero() {
+ // The object is not being deleted, so if it does not have our
finalizer,
+ // then lets add the finalizer and update the object. This is
equivalent to registering our finalizer.
+ if !slices.Contains(node.ObjectMeta.Finalizers, FinalizerName) {
+ node.ObjectMeta.Finalizers =
append(node.ObjectMeta.Finalizers, FinalizerName)
+ if err := r.Update(ctx, node); err != nil {
+ return ctrl.Result{}, err
+ }
+ }
+ } else if slices.Contains(node.ObjectMeta.Finalizers, FinalizerName) {
+ switch node.Status.Phase {
+ case v1alpha1.StorageNodePhaseReady,
v1alpha1.StorageNodePhaseNotReady:
+ if err := r.deleteDatabaseCluster(ctx, node,
databaseClass); err != nil {
+ return ctrl.Result{RequeueAfter:
defaultRequeueTime}, err
+ }
+ case v1alpha1.StorageNodePhaseDeleting:
+ ins, err := aws.NewRdsClient(r.AwsRDS).GetInstance(ctx,
node)
+ if err != nil {
+ return ctrl.Result{RequeueAfter:
defaultRequeueTime}, err
+ }
+ if ins == nil {
+ // update storage node status to
v1alpha1.StorageNodePhaseDeleteComplete
+ node.Status.Phase =
v1alpha1.StorageNodePhaseDeleteComplete
+ node.Status.Instances = nil
+ if err := r.Status().Update(ctx, node); err !=
nil {
+ r.Log.Error(err, "failed to update
storage node status")
+ }
+ return ctrl.Result{RequeueAfter:
defaultRequeueTime}, err
+ }
+ r.Log.V(2).Info("RDS instance is still deleting")
+ return ctrl.Result{RequeueAfter: defaultRequeueTime},
err
+ case v1alpha1.StorageNodePhaseDeleteComplete:
+ // remove our finalizer from the list and update it.
+ node.ObjectMeta.Finalizers = slices.Filter([]string{},
node.ObjectMeta.Finalizers, func(f string) bool {
+ return f != FinalizerName
+ })
+ if err := r.Update(ctx, node); err != nil {
+ r.Log.Error(err, "failed to remove finalizer")
+ }
+ return ctrl.Result{}, nil
+ default:
+ r.Recorder.Event(node, corev1.EventTypeWarning,
fmt.Sprintf("Delete [%s:%s] Failed", node.GetNamespace(), node.GetName()),
"StorageNode is not in a valid phase")
+ return ctrl.Result{RequeueAfter: defaultRequeueTime},
err
+ }
}
+ return r.reconcile(ctx, databaseClass, node)
+}
+
+func (r *StorageNodeReconciler) reconcile(ctx context.Context, dbClass
*dbmeshv1alpha1.DatabaseClass, node *v1alpha1.StorageNode) (ctrl.Result, error)
{
// reconcile storage node with databaseClass
- switch databaseClass.Spec.Provisioner {
+ switch dbClass.Spec.Provisioner {
case dbmeshv1alpha1.ProvisionerAWSRDSInstance:
- if err := r.reconcileAwsRdsInstance(ctx,
aws.NewRdsClient(r.AwsRDS), node, databaseClass); err != nil {
- logger.Error(err, fmt.Sprintf("unable to reconcile AWS
RDS Instance [%s:%s], err:%s", node.GetNamespace(), node.GetName(),
err.Error()))
+ if err := r.reconcileAwsRdsInstance(ctx,
aws.NewRdsClient(r.AwsRDS), node, dbClass); err != nil {
+ r.Log.Error(err, fmt.Sprintf("unable to reconcile AWS
RDS Instance [%s:%s], err:%s", node.GetNamespace(), node.GetName(),
err.Error()))
r.Recorder.Event(node, corev1.EventTypeWarning,
fmt.Sprintf("Reconcile [%s:%s] Failed", node.GetNamespace(), node.GetName()),
err.Error())
}
case dbmeshv1alpha1.ProvisionerAWSAurora:
- if err := r.reconcileAwsAurora(ctx, aws.NewRdsClient(r.AwsRDS),
node, databaseClass); err != nil {
+ if err := r.reconcileAwsAurora(ctx, aws.NewRdsClient(r.AwsRDS),
node, dbClass); err != nil {
r.Recorder.Event(node, corev1.EventTypeWarning,
fmt.Sprintf("Reconcile [%s:%s] Failed", node.GetNamespace(), node.GetName()),
err.Error())
}
default:
- r.Recorder.Event(node, corev1.EventTypeWarning,
"UnsupportedDatabaseProvisioner", fmt.Sprintf("unsupported database provisioner
[%s]", databaseClass.Spec.Provisioner))
- logger.Error(nil, fmt.Sprintf("unsupported database provisioner
[%s]", databaseClass.Spec.Provisioner))
+ r.Recorder.Event(node, corev1.EventTypeWarning,
"UnsupportedDatabaseProvisioner", fmt.Sprintf("unsupported database provisioner
[%s]", dbClass.Spec.Provisioner))
+ r.Log.Error(nil, fmt.Sprintf("unsupported database provisioner
[%s]", dbClass.Spec.Provisioner))
}
// update status
@@ -105,7 +151,7 @@ func (r *StorageNodeReconciler) Reconcile(ctx
context.Context, req ctrl.Request)
node.Status = desiredState
err := r.Status().Update(ctx, node)
if err != nil {
- logger.Error(err, fmt.Sprintf("unable to update
StorageNode [%s:%s] status", req.Namespace, req.Name))
+ r.Log.Error(err, fmt.Sprintf("unable to update
StorageNode [%s:%s]", node.GetNamespace(), node.GetName()))
return ctrl.Result{Requeue: true}, err
}
}
@@ -134,83 +180,12 @@ func (r *StorageNodeReconciler) getDatabaseClass(ctx
context.Context, node *v1al
return databaseClass, nil
}
-func removeString(finalizers []string, name string) []string {
- var result []string
- for _, finalizer := range finalizers {
- if finalizer != name {
- result = append(result, finalizer)
- }
- }
- return result
-}
-
-func containsString(finalizers []string, name string) bool {
- for _, finalizer := range finalizers {
- if finalizer == name {
- return true
- }
- }
- return false
-}
-
-// nolint:nestif
-func (r *StorageNodeReconciler) finalize(ctx context.Context, node
*v1alpha1.StorageNode, databaseClass *dbmeshv1alpha1.DatabaseClass) error {
- if node.ObjectMeta.DeletionTimestamp.IsZero() {
- // The object is not being deleted, so if it does not have our
finalizer,
- // then lets add the finalizer and update the object. This is
equivalent to registering our finalizer.
- if !containsString(node.ObjectMeta.Finalizers, FinalizerName) {
- node.ObjectMeta.Finalizers =
append(node.ObjectMeta.Finalizers, FinalizerName)
- if err := r.Update(ctx, node); err != nil {
- return err
- }
- }
- } else if containsString(node.ObjectMeta.Finalizers, FinalizerName) {
- switch node.Status.Phase {
- case v1alpha1.StorageNodePhaseDeleting:
- instance := &rds.DescInstance{}
- ins, err := aws.NewRdsClient(r.AwsRDS).GetInstance(ctx,
node)
- if err != nil {
- return err
- }
- if reflect.DeepEqual(ins, instance) {
- node.Status.Phase =
v1alpha1.StorageNodePhaseDeleteComplete
- node.Status.Instances = nil
- if err := r.Update(ctx, node); err != nil {
- return err
- }
- r.Log.Info("RDS instance has been successfully
deleted")
- return nil
- }
- r.Log.Info("RDS instance is still deleting")
- return nil
- case v1alpha1.StorageNodePhaseDeleteComplete:
- // remove our finalizer from the list and update it.
- node.ObjectMeta.Finalizers =
removeString(node.ObjectMeta.Finalizers, FinalizerName)
- return r.Update(ctx, node)
- case v1alpha1.StorageNodePhaseReady,
v1alpha1.StorageNodePhaseNotReady:
- // The object is being deleted
- if err := r.deleteDatabaseCluster(ctx, node,
databaseClass); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
// nolint:gocritic,gocognit
func computeDesiredState(status v1alpha1.StorageNodeStatus)
v1alpha1.StorageNodeStatus {
// Initialize a new status object based on the current state
desiredState := status
- // TODO: set enums for aws instance status
-
- // If the cluster status is not empty, then we compute the phase based
on the cluster status
- clusterStatus := ""
- if status.Cluster.Status != "" {
- if status.Cluster.Status == "available" {
- clusterStatus = "Ready"
- }
- }
+ clusterStatus := status.Cluster.Status
if (clusterStatus == "" || clusterStatus == "Ready") &&
allInstancesReady(status.Instances) {
desiredState.Phase = v1alpha1.StorageNodePhaseReady
@@ -218,16 +193,17 @@ func computeDesiredState(status
v1alpha1.StorageNodeStatus) v1alpha1.StorageNode
desiredState.Phase = v1alpha1.StorageNodePhaseNotReady
}
- instancenum := len(status.Instances)
- if (status.Phase == v1alpha1.StorageNodePhaseDeleting || status.Phase
== v1alpha1.StorageNodePhaseDeleteComplete) && instancenum == 0 {
- desiredState.Phase = v1alpha1.StorageNodePhaseDeleteComplete
- }
- for _, ins := range status.Instances {
+ for idx := range status.Instances {
+ ins := &status.Instances[idx]
if ins.Status == v1alpha1.StorageNodeInstanceStatusDeleting {
desiredState.Phase = v1alpha1.StorageNodePhaseDeleting
}
}
+ if (status.Phase == v1alpha1.StorageNodePhaseDeleting || status.Phase
== v1alpha1.StorageNodePhaseDeleteComplete) && len(status.Instances) == 0 {
+ desiredState.Phase = v1alpha1.StorageNodePhaseDeleteComplete
+ }
+
newSNConditions := status.Conditions
// Update the cluster ready condition if the cluster status is not empty
@@ -293,7 +269,7 @@ func (r *StorageNodeReconciler) reconcileAwsRdsInstance(ctx
context.Context, cli
if node.Status.Phase == v1alpha1.StorageNodePhaseDeleteComplete {
return nil
}
- //ins := &rds.DescInstance{}
+
instance, err := client.GetInstance(ctx, node)
if err != nil {
return err
@@ -311,23 +287,19 @@ func (r *StorageNodeReconciler)
reconcileAwsRdsInstance(ctx context.Context, cli
}
}
- r.Log.Info(fmt.Sprintf("RDS instance [%s] status is [%s]",
instance.DBInstanceIdentifier, instance.DBInstanceStatus))
-
- newStatus := updateInstanceStatus(node, instance)
- node.Status.Instances = newStatus
- if err := r.Status().Update(ctx, node); err != nil {
- r.Log.Error(err, fmt.Sprintf("Failed to update instance status
for node [%s:%s]", node.GetNamespace(), node.GetName()))
+ if err := updateAWSRDSInstanceStatus(node, instance); err != nil {
+ return fmt.Errorf("updateAWSRDSInstanceStatus failed: %w", err)
}
- r.Recorder.Eventf(node, corev1.EventTypeNormal, "Reconcile",
"Reconciled RDS instance %s, status is %s", instance.DBInstanceIdentifier,
instance.DBInstanceStatus)
+
return nil
}
-func updateInstanceStatus(node *v1alpha1.StorageNode, instance
*rds.DescInstance) []v1alpha1.InstanceStatus {
+func updateAWSRDSInstanceStatus(node *v1alpha1.StorageNode, instance
*rds.DescInstance) error {
instances := make([]v1alpha1.InstanceStatus, 0)
status := instance.DBInstanceStatus
- if status == "available" {
- status = "Ready"
+ if status == v1alpha1.StorageNodeInstanceStatusAvailable {
+ status = v1alpha1.StorageNodeInstanceStatusReady
}
instances = append(instances, v1alpha1.InstanceStatus{
@@ -337,7 +309,9 @@ func updateInstanceStatus(node *v1alpha1.StorageNode,
instance *rds.DescInstance
},
Status: status,
})
- return instances
+
+ node.Status.Instances = instances
+ return nil
}
func (r *StorageNodeReconciler) reconcileAwsAurora(ctx context.Context, client
aws.IRdsClient, node *v1alpha1.StorageNode, dbClass
*dbmeshv1alpha1.DatabaseClass) error {
@@ -404,21 +378,9 @@ func updateClusterStatus(ctx context.Context, node
*v1alpha1.StorageNode, client
func (r *StorageNodeReconciler) deleteDatabaseCluster(ctx context.Context,
node *v1alpha1.StorageNode, databaseClass *dbmeshv1alpha1.DatabaseClass) error {
switch databaseClass.Spec.Provisioner {
case dbmeshv1alpha1.ProvisionerAWSRDSInstance:
- instance := &rds.DescInstance{}
- ins, err := aws.NewRdsClient(r.AwsRDS).GetInstance(ctx, node)
- if err != nil {
- return err
- }
- if reflect.DeepEqual(ins, instance) || ins.DBInstanceStatus ==
v1alpha1.StorageNodeInstanceStatusDeleting {
- return nil
- }
- if err := aws.NewRdsClient(r.AwsRDS).DeleteInstance(ctx, node,
databaseClass); err != nil {
- return err
- }
- for i := 0; i < len(node.Status.Instances); i++ {
- node.Status.Instances[i].Status =
v1alpha1.StorageNodeInstanceStatusDeleting
+ if err := r.deleteAWSRDSInstance(ctx,
aws.NewRdsClient(r.AwsRDS), node, databaseClass); err != nil {
+ return fmt.Errorf("delete aws rds instance failed: %w",
err)
}
- return r.Update(ctx, node)
case dbmeshv1alpha1.ProvisionerAWSAurora:
if err := aws.NewRdsClient(r.AwsRDS).DeleteAuroraCluster(ctx,
node, databaseClass); err != nil {
return err
@@ -429,6 +391,37 @@ func (r *StorageNodeReconciler) deleteDatabaseCluster(ctx
context.Context, node
return nil
}
+func (r *StorageNodeReconciler) deleteAWSRDSInstance(ctx context.Context,
client aws.IRdsClient, node *v1alpha1.StorageNode, databaseClass
*dbmeshv1alpha1.DatabaseClass) error {
+ instance, err := client.GetInstance(ctx, node)
+ if err != nil {
+ return err
+ }
+
+ if instance == nil {
+ r.Log.Info(fmt.Sprintf("instance [%s] is not found",
node.Annotations[dbmeshv1alpha1.AnnotationsInstanceIdentifier]))
+ return nil
+ }
+
+ if instance.DBInstanceStatus ==
v1alpha1.StorageNodeInstanceStatusDeleting {
+ r.Log.Info(fmt.Sprintf("instance [%s] is deleting",
node.Annotations[dbmeshv1alpha1.AnnotationsInstanceIdentifier]))
+ return nil
+ }
+
+ if err := client.DeleteInstance(ctx, node, databaseClass); err != nil {
+ r.Recorder.Eventf(node, corev1.EventTypeWarning,
"DeleteFailed", "Failed to delete instance [%s]: %s",
node.Annotations[dbmeshv1alpha1.AnnotationsInstanceIdentifier], err.Error())
+ return err
+ }
+
+ r.Recorder.Event(node, corev1.EventTypeNormal, "Deleting",
fmt.Sprintf("instance [%s] is deleting",
node.Annotations[dbmeshv1alpha1.AnnotationsInstanceIdentifier]))
+
+ // update instance status
+ if err := updateAWSRDSInstanceStatus(node, instance); err != nil {
+ return fmt.Errorf("updateAWSRDSInstanceStatus failed: %w", err)
+ }
+
+ return nil
+}
+
// SetupWithManager sets up the controller with the Manager
func (r *StorageNodeReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
diff --git
a/shardingsphere-operator/pkg/reconcile/storagenode/aws/rdsinstance.go
b/shardingsphere-operator/pkg/reconcile/storagenode/aws/rdsinstance.go
index cee517b..764d3b4 100644
--- a/shardingsphere-operator/pkg/reconcile/storagenode/aws/rdsinstance.go
+++ b/shardingsphere-operator/pkg/reconcile/storagenode/aws/rdsinstance.go
@@ -183,6 +183,7 @@ func (c *RdsClient) DeleteInstance(ctx context.Context,
node *v1alpha1.StorageNo
if err != nil {
return err
}
+
if ins == nil || ins.DBInstanceStatus ==
v1alpha1.StorageNodeInstanceStatusDeleting {
return nil
}
@@ -196,10 +197,9 @@ func (c *RdsClient) DeleteInstance(ctx context.Context,
node *v1alpha1.StorageNo
case dbmeshv1alpha1.DatabaseReclaimRetain:
isDeleteBackup, isSkipFinalSnapshot = false, true
}
+
instance.SetDeleteAutomateBackups(isDeleteBackup)
instance.SetSkipFinalSnapshot(isSkipFinalSnapshot)
- // instance.SetDeleteAutomateBackups(true)
- // instance.SetSkipFinalSnapshot(true)
return instance.Delete(ctx)
}
diff --git a/shardingsphere-operator/test/e2e/storage_node_controller_test.go
b/shardingsphere-operator/test/e2e/storage_node_controller_test.go
index 19e230e..0d10692 100644
--- a/shardingsphere-operator/test/e2e/storage_node_controller_test.go
+++ b/shardingsphere-operator/test/e2e/storage_node_controller_test.go
@@ -25,9 +25,9 @@ import (
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/reconcile/storagenode/aws"
+ dbmesh_rds "github.com/database-mesh/golang-sdk/aws/client/rds"
"bou.ke/monkey"
- dbmesh_rds "github.com/database-mesh/golang-sdk/aws/client/rds"
dbmeshv1alpha1
"github.com/database-mesh/golang-sdk/kubernetes/api/v1alpha1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -39,7 +39,6 @@ var _ = Describe("StorageNode Controller Suite Test", func() {
var databaseClassName = "test-database-class"
BeforeEach(func() {
-
databaseClass := &dbmeshv1alpha1.DatabaseClass{
ObjectMeta: metav1.ObjectMeta{
Name: databaseClassName,
@@ -67,30 +66,25 @@ var _ = Describe("StorageNode Controller Suite Test",
func() {
})
Context("reconcile storageNode", func() {
- BeforeEach(func() {
+ AfterEach(func() {
+ monkey.UnpatchAll()
+ })
- // mock get instance func returns Available status
+ It("should create success", func() {
+ // mock get instance func returns success
monkey.PatchInstanceMethod(reflect.TypeOf(&aws.RdsClient{}), "GetInstance",
func(_ *aws.RdsClient, _ context.Context, _ *v1alpha1.StorageNode)
(*dbmesh_rds.DescInstance, error) {
return &dbmesh_rds.DescInstance{
- DBInstanceStatus: "available",
+ DBInstanceStatus:
v1alpha1.StorageNodeInstanceStatusAvailable,
Endpoint: dbmesh_rds.Endpoint{
Address: "127.0.0.1",
Port: 3306,
},
}, nil
})
-
// mock delete instance func returns success
monkey.PatchInstanceMethod(reflect.TypeOf(&aws.RdsClient{}), "DeleteInstance",
func(_ *aws.RdsClient, _ context.Context, _ *v1alpha1.StorageNode, _
*dbmeshv1alpha1.DatabaseClass) error {
return nil
})
- })
-
- AfterEach(func() {
- monkey.UnpatchAll()
- })
-
- It("should be success", func() {
nodeName := "test-storage-node-ready"
node := &v1alpha1.StorageNode{
@@ -120,56 +114,26 @@ var _ = Describe("StorageNode Controller Suite Test",
func() {
Expect(k8sClient.Delete(ctx, node)).Should(Succeed())
})
- Context("reconcile storageNode with Creating instance", func() {
- BeforeEach(func() {
- // mock get instance func returns creating
status
-
monkey.PatchInstanceMethod(reflect.TypeOf(&aws.RdsClient{}), "GetInstance",
func(_ *aws.RdsClient, _ context.Context, _ *v1alpha1.StorageNode)
(*dbmesh_rds.DescInstance, error) {
- return &dbmesh_rds.DescInstance{
- DBInstanceStatus: "creating",
- Endpoint: dbmesh_rds.Endpoint{
- Address: "127.0.0.1",
- Port: 3306,
- },
- }, nil
- })
- // mock delete instance func return success
-
monkey.PatchInstanceMethod(reflect.TypeOf(&aws.RdsClient{}), "DeleteInstance",
func(_ *aws.RdsClient, _ context.Context, _ *v1alpha1.StorageNode, _
*dbmeshv1alpha1.DatabaseClass) error {
- return nil
- })
- })
-
- AfterEach(func() {
- monkey.UnpatchAll()
- })
-
- It("should be success", func() {
- nodeName := "test-storage-node-creating"
- node := &v1alpha1.StorageNode{
- ObjectMeta: metav1.ObjectMeta{
- Name: nodeName,
- Namespace: "default",
- Annotations: map[string]string{
-
dbmeshv1alpha1.AnnotationsInstanceIdentifier: "test-instance-identifier",
- },
- },
- Spec: v1alpha1.StorageNodeSpec{
- DatabaseClassName:
databaseClassName,
+ It("should delete success", func() {
+ nodeName := "test-storage-node-delete"
+ node := &v1alpha1.StorageNode{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: nodeName,
+ Namespace: "default",
+ Annotations: map[string]string{
+
dbmeshv1alpha1.AnnotationsInstanceIdentifier: "test-instance-identifier",
},
- }
-
- // create resource
- Expect(k8sClient.Create(ctx,
node)).Should(Succeed())
+ },
+ Spec: v1alpha1.StorageNodeSpec{
+ DatabaseClassName: databaseClassName,
+ },
+ }
+ Expect(k8sClient.Create(ctx, node)).Should(Succeed())
- // check storage node status
- Eventually(func()
v1alpha1.StorageNodePhaseStatus {
- newSN := &v1alpha1.StorageNode{}
- Expect(k8sClient.Get(ctx,
client.ObjectKey{Name: nodeName, Namespace: "default"},
newSN)).Should(Succeed())
- return newSN.Status.Phase
- }, 10*time.Second,
1*time.Second).Should(Equal(v1alpha1.StorageNodePhaseNotReady))
+ getNode := &v1alpha1.StorageNode{}
+ Expect(k8sClient.Get(ctx, client.ObjectKey{Name:
nodeName, Namespace: "default"}, getNode)).Should(Succeed())
- // delete resource
- Expect(k8sClient.Delete(ctx,
node)).Should(Succeed())
- })
+ // delete storage node
})
})
})