This is an automated email from the ASF dual-hosted git repository.
zhaojinchao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/shardingsphere-on-cloud.git
The following commit(s) were added to refs/heads/main by this push:
new 8859d74 fix(operator): fix readyNodes and conditions in ProxyStatus
(#177)
8859d74 is described below
commit 8859d74a2c54548c7f2c1a6eb138b66bd9361b59
Author: liyao <[email protected]>
AuthorDate: Wed Jan 11 12:58:54 2023 +0800
fix(operator): fix readyNodes and conditions in ProxyStatus (#177)
* wip: add test for deployment reconcile
Signed-off-by: mlycore <[email protected]>
* chore: add Deployed as new Condition
Signed-off-by: mlycore <[email protected]>
* chore: update reconcileStatus
Signed-off-by: mlycore <[email protected]>
* chore: fix unit test
Signed-off-by: mlycore <[email protected]>
* chore: remove useless comments
Signed-off-by: mlycore <[email protected]>
* fix: split newConditions to updateReadyConditions and
updateNotReadyConditions
Signed-off-by: mlycore <[email protected]>
Signed-off-by: mlycore <[email protected]>
---
.../api/v1alpha1/proxy_status.go | 18 +-
.../api/v1alpha1/zz_generated.deepcopy.go | 2 +-
.../pkg/controllers/proxy_controller.go | 25 +-
.../pkg/reconcile/deployment.go | 135 +++++++
.../pkg/reconcile/deployment_test.go | 422 +++++++++++++++++++++
5 files changed, 572 insertions(+), 30 deletions(-)
diff --git a/shardingsphere-operator/api/v1alpha1/proxy_status.go
b/shardingsphere-operator/api/v1alpha1/proxy_status.go
index af6ffb7..278e3cb 100644
--- a/shardingsphere-operator/api/v1alpha1/proxy_status.go
+++ b/shardingsphere-operator/api/v1alpha1/proxy_status.go
@@ -33,10 +33,12 @@ type ConditionType string
// ConditionType shows some states during the startup process of
ShardingSphere-Proxy
const (
+ ConditionDeployed ConditionType = "Deployed"
ConditionInitialized ConditionType = "Initialized"
ConditionStarted ConditionType = "Started"
ConditionReady ConditionType = "Ready"
ConditionUnknown ConditionType = "Unknown"
+ ConditionFailed ConditionType = "Failed"
)
// ProxyStatus defines the observed state of ShardingSphereProxy
@@ -48,7 +50,7 @@ type ProxyStatus struct {
Phase PhaseStatus `json:"phase"`
//Conditions The conditions array, the reason and message fields
- Conditions Conditions `json:"conditions"`
+ Conditions []Condition `json:"conditions"`
//ReadyNodes shows the number of replicas that ShardingSphere-Proxy is
running normally
ReadyNodes int32 `json:"readyNodes"`
}
@@ -56,14 +58,14 @@ type ProxyStatus struct {
type Conditions []Condition
// Condition
-// | **condition** | **status** | **directions**|
+// | **phase** | **condition** | **descriptions**|
// | ------------- | ---------- |
---------------------------------------------------- |
-// | Initialized | true | Initialization successful|
-// | Initialized | false | initialization failed|
-// | Started | true | pod started successfully but not ready|
-// | Started | false | pod started failed|
-// | Ready | true | The pod is ready and can provide external
services|
-// | Unknown | true | ShardingSphere-Proxy failed to start
correctly due to some problems |
+// | NotReady | Deployed | pods are deployed but are not created or
currently pending|
+// | NotReady | Started | pods are started but not satisfy ready
requirements|
+// | Ready | Ready | minimum pods satisfy ready requirements|
+// | NotReady | Unknown | can not locate the status of pods |
+// | NotReady | Failed | ShardingSphere-Proxy failed to start
correctly due to some problems|
+
type Condition struct {
Type ConditionType `json:"type"`
Status v1.ConditionStatus `json:"status"`
diff --git a/shardingsphere-operator/api/v1alpha1/zz_generated.deepcopy.go
b/shardingsphere-operator/api/v1alpha1/zz_generated.deepcopy.go
index 0b649d4..a7d0009 100644
--- a/shardingsphere-operator/api/v1alpha1/zz_generated.deepcopy.go
+++ b/shardingsphere-operator/api/v1alpha1/zz_generated.deepcopy.go
@@ -276,7 +276,7 @@ func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
- *out = make(Conditions, len(*in))
+ *out = make([]Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
diff --git a/shardingsphere-operator/pkg/controllers/proxy_controller.go
b/shardingsphere-operator/pkg/controllers/proxy_controller.go
index e582448..9df369a 100644
--- a/shardingsphere-operator/pkg/controllers/proxy_controller.go
+++ b/shardingsphere-operator/pkg/controllers/proxy_controller.go
@@ -103,7 +103,7 @@ func (r *ProxyReconciler) reconcile(ctx context.Context,
req ctrl.Request, rt *v
return res, err
}
- return ctrl.Result{}, nil
+ return ctrl.Result{RequeueAfter: WaitingForReady}, nil
}
func (r *ProxyReconciler) reconcileDeployment(ctx context.Context,
namespacedName types.NamespacedName) (ctrl.Result, error) {
@@ -216,7 +216,6 @@ func (r *ProxyReconciler) reconcilePodList(ctx
context.Context, namespace, name
}
result := ctrl.Result{}
- readyNodes := reconcile.CountingReadyPods(podList)
rt, err := r.getRuntimeShardingSphereProxy(ctx, types.NamespacedName{
Namespace: namespace,
@@ -225,31 +224,15 @@ func (r *ProxyReconciler) reconcilePodList(ctx
context.Context, namespace, name
if err != nil {
return ctrl.Result{}, err
}
- if reconcile.IsRunning(podList) {
- if readyNodes < miniReadyCount {
- result.RequeueAfter = WaitingForReady
- if readyNodes != rt.Status.ReadyNodes {
- rt.SetPodStarted(readyNodes)
- }
- } else {
- if rt.Status.Phase != v1alpha1.StatusReady {
- rt.SetReady(readyNodes)
- } else if readyNodes != rt.Spec.Replicas {
- rt.UpdateReadyNodes(readyNodes)
- }
- }
- } else {
- // TODO: Waiting for pods to start exceeds the maximum number
of retries
- rt.SetPodNotStarted(readyNodes)
- result.RequeueAfter = WaitingForReady
- }
+
+ rt.Status = reconcile.ReconcileStatus(*podList, *rt)
// TODO: Compare Status with or without modification
if err := r.Status().Update(ctx, rt); err != nil {
return result, err
}
- return result, nil
+ return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
diff --git a/shardingsphere-operator/pkg/reconcile/deployment.go
b/shardingsphere-operator/pkg/reconcile/deployment.go
index 3b9ba10..7274142 100644
--- a/shardingsphere-operator/pkg/reconcile/deployment.go
+++ b/shardingsphere-operator/pkg/reconcile/deployment.go
@@ -24,6 +24,7 @@ import (
"strings"
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
+
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -37,6 +38,9 @@ func NewDeployment(ssproxy *v1alpha1.ShardingSphereProxy)
*v1.Deployment {
const (
AnnoRollingUpdateMaxSurge =
"shardingsphereproxy.shardingsphere.org/rolling-update-max-surge"
AnnoRollingUpdateMaxUnavailable =
"shardingsphereproxy.shardingsphere.org/rolling-update-max-unavailable"
+
+ //miniReadyCount Minimum number of replicas that can be served
+ miniReadyCount = 1
)
func ConstructCascadingDeployment(proxy *v1alpha1.ShardingSphereProxy)
*v1.Deployment {
@@ -336,3 +340,134 @@ func updateSSProxyContainer(proxy
*v1alpha1.ShardingSphereProxy, act *v1.Deploym
}
return exp
}
+
+func getReadyNodes(podlist corev1.PodList) int32 {
+ var cnt int32
+ for _, p := range podlist.Items {
+ if p.Status.Phase == corev1.PodRunning {
+ for _, c := range p.Status.Conditions {
+ if c.Type == corev1.PodReady && c.Status ==
corev1.ConditionTrue {
+ for _, con := range
p.Status.ContainerStatuses {
+ if con.Name == "proxy" &&
con.Ready {
+ cnt++
+ }
+ }
+ }
+ }
+ }
+ }
+ return cnt
+}
+
+func ReconcileStatus(podlist corev1.PodList, rt v1alpha1.ShardingSphereProxy)
v1alpha1.ProxyStatus {
+ readyNodes := getReadyNodes(podlist)
+
+ rt.Status.ReadyNodes = readyNodes
+ if rt.Spec.Replicas == 0 {
+ rt.Status.Phase = v1alpha1.StatusNotReady
+ } else {
+ if readyNodes < miniReadyCount {
+ rt.Status.Phase = v1alpha1.StatusNotReady
+ } else {
+ rt.Status.Phase = v1alpha1.StatusReady
+ }
+ }
+
+ if rt.Status.Phase == v1alpha1.StatusReady {
+ rt.Status.Conditions =
updateReadyConditions(rt.Status.Conditions, v1alpha1.Condition{
+ Type: v1alpha1.ConditionReady,
+ Status: metav1.ConditionTrue,
+ LastUpdateTime: metav1.Now(),
+ })
+ } else {
+ cond := clusterCondition(podlist)
+ rt.Status.Conditions =
updateNotReadyConditions(rt.Status.Conditions, cond)
+ }
+
+ return rt.Status
+}
+
+func newConditions(conditions []v1alpha1.Condition, cond v1alpha1.Condition)
[]v1alpha1.Condition {
+ if conditions == nil {
+ conditions = []v1alpha1.Condition{}
+ }
+ if cond.Type == "" {
+ return conditions
+ }
+
+ found := false
+ for idx, _ := range conditions {
+ if conditions[idx].Type == cond.Type {
+ conditions[idx].LastUpdateTime = cond.LastUpdateTime
+ conditions[idx].Status = cond.Status
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ conditions = append(conditions, cond)
+ }
+
+ return conditions
+}
+
+func updateReadyConditions(conditions []v1alpha1.Condition, cond
v1alpha1.Condition) []v1alpha1.Condition {
+ return newConditions(conditions, cond)
+}
+
+func updateNotReadyConditions(conditions []v1alpha1.Condition, cond
v1alpha1.Condition) []v1alpha1.Condition {
+ cur := newConditions(conditions, cond)
+
+ for idx, _ := range cur {
+ if conditions[idx].Type == v1alpha1.ConditionReady {
+ conditions[idx].LastUpdateTime = metav1.Now()
+ conditions[idx].Status = metav1.ConditionFalse
+ }
+ }
+
+ return conditions
+}
+
+func clusterCondition(podlist corev1.PodList) v1alpha1.Condition {
+ cond := v1alpha1.Condition{}
+ if len(podlist.Items) == 0 {
+ return cond
+ }
+
+ condStarted := v1alpha1.Condition{
+ Type: v1alpha1.ConditionStarted,
+ Status: metav1.ConditionTrue,
+ LastUpdateTime: metav1.Now(),
+ }
+ condUnknown := v1alpha1.Condition{
+ Type: v1alpha1.ConditionUnknown,
+ Status: metav1.ConditionTrue,
+ LastUpdateTime: metav1.Now(),
+ }
+ condDeployed := v1alpha1.Condition{
+ Type: v1alpha1.ConditionDeployed,
+ Status: metav1.ConditionTrue,
+ LastUpdateTime: metav1.Now(),
+ }
+ condFailed := v1alpha1.Condition{
+ Type: v1alpha1.ConditionFailed,
+ Status: metav1.ConditionTrue,
+ LastUpdateTime: metav1.Now(),
+ }
+
+ //FIXME: do not capture ConditionStarted in some cases
+ for _, p := range podlist.Items {
+ switch p.Status.Phase {
+ case corev1.PodRunning:
+ return condStarted
+ case corev1.PodUnknown:
+ return condUnknown
+ case corev1.PodPending:
+ return condDeployed
+ case corev1.PodFailed:
+ return condFailed
+ }
+ }
+ return cond
+}
diff --git a/shardingsphere-operator/pkg/reconcile/deployment_test.go
b/shardingsphere-operator/pkg/reconcile/deployment_test.go
index 0278d57..96645a1 100644
--- a/shardingsphere-operator/pkg/reconcile/deployment_test.go
+++ b/shardingsphere-operator/pkg/reconcile/deployment_test.go
@@ -457,3 +457,425 @@ func Test_UpdateDeployment(t *testing.T) {
assert.EqualValues(t, c.proxy.Spec.StartupProbe,
exp.Spec.Template.Spec.Containers[0].StartupProbe, c.message)
}
}
+
+func Test_ReconcileStatus(t *testing.T) {
+ cases := []struct {
+ name string
+ podlist v1.PodList
+ exp *v1alpha1.ShardingSphereProxy
+ spec v1alpha1.ProxySpec
+ message string
+ }{
+ {
+ name: "empty Podlist and replicas is zero",
+ podlist: v1.PodList{Items: []v1.Pod{}},
+ exp: &v1alpha1.ShardingSphereProxy{
+ Spec: v1alpha1.ProxySpec{
+ Replicas: 0,
+ },
+ Status: v1alpha1.ProxyStatus{
+ Phase: v1alpha1.StatusNotReady,
+ Conditions: []v1alpha1.Condition{},
+ ReadyNodes: 0,
+ },
+ },
+ message: "#0 empty Podlist and replicas is zero should
be ok",
+ },
+ {
+ name: "#1 empty Podlist and replicas is not zero",
+ podlist: v1.PodList{Items: []v1.Pod{
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodPending,
+ },
+ },
+ }},
+ spec: v1alpha1.ProxySpec{Replicas: 1},
+ exp: &v1alpha1.ShardingSphereProxy{
+ Spec: v1alpha1.ProxySpec{
+ Replicas: 1,
+ },
+ Status: v1alpha1.ProxyStatus{
+ Phase: v1alpha1.StatusNotReady,
+ Conditions: []v1alpha1.Condition{
+ {
+ Type:
v1alpha1.ConditionDeployed,
+ Status:
metav1.ConditionTrue,
+ },
+ },
+ ReadyNodes: 0,
+ },
+ },
+ message: "#1 empty Podlist and replicas is not zero
should be ok",
+ },
+ {
+ name: "#2 one pending",
+ podlist: v1.PodList{Items: []v1.Pod{
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodPending,
+ Conditions: []v1.PodCondition{
+ {
+ Type:
v1.PodScheduled,
+ Status:
v1.ConditionTrue,
+ },
+ },
+ },
+ },
+ }},
+ exp: &v1alpha1.ShardingSphereProxy{
+ Spec: v1alpha1.ProxySpec{
+ Replicas: 1,
+ },
+ Status: v1alpha1.ProxyStatus{
+ Phase: v1alpha1.StatusNotReady,
+ Conditions: []v1alpha1.Condition{
+ {
+ Type:
v1alpha1.ConditionDeployed,
+ Status:
metav1.ConditionTrue,
+ },
+ },
+ ReadyNodes: 0,
+ },
+ },
+ message: "#2 one pending should be ok",
+ },
+ {
+ name: "#3 one scheduled but not initialized",
+ podlist: v1.PodList{Items: []v1.Pod{
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodRunning,
+ Conditions: []v1.PodCondition{
+ {
+ Type:
v1.PodInitialized,
+ Status:
v1.ConditionTrue,
+ },
+ },
+ },
+ },
+ }},
+ exp: &v1alpha1.ShardingSphereProxy{
+ Spec: v1alpha1.ProxySpec{Replicas: 1},
+ Status: v1alpha1.ProxyStatus{
+ Phase: v1alpha1.StatusNotReady,
+ Conditions: []v1alpha1.Condition{
+ {
+ Type:
v1alpha1.ConditionStarted,
+ Status:
metav1.ConditionTrue,
+ },
+ },
+ ReadyNodes: 0,
+ },
+ },
+ message: "#3 one scheduled but not initialized should
be ok",
+ },
+ {
+ name: "#4 two scheduled but not started",
+ podlist: v1.PodList{Items: []v1.Pod{
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodRunning,
+ Conditions: []v1.PodCondition{
+ {
+ Type:
v1.PodInitialized,
+ Status:
v1.ConditionTrue,
+ },
+ },
+ },
+ },
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodRunning,
+ Conditions: []v1.PodCondition{
+ {
+ Type:
v1.PodInitialized,
+ Status:
v1.ConditionTrue,
+ },
+ },
+ },
+ },
+ }},
+ exp: &v1alpha1.ShardingSphereProxy{
+ Spec: v1alpha1.ProxySpec{Replicas: 2},
+ Status: v1alpha1.ProxyStatus{
+ Phase: v1alpha1.StatusNotReady,
+ Conditions: []v1alpha1.Condition{
+ {
+ Type:
v1alpha1.ConditionStarted,
+ Status:
metav1.ConditionTrue,
+ },
+ },
+ ReadyNodes: 0,
+ },
+ },
+ message: "#4 two scheduled but not started should be
ok",
+ },
+ {
+ name: "#5 two started but not ready",
+ podlist: v1.PodList{Items: []v1.Pod{
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodRunning,
+ Conditions: []v1.PodCondition{
+ {
+ Type:
v1.PodInitialized,
+ Status:
v1.ConditionTrue,
+ },
+ },
+ },
+ },
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodRunning,
+ Conditions: []v1.PodCondition{
+ {
+ Type:
v1.PodInitialized,
+ Status:
v1.ConditionTrue,
+ },
+ },
+ },
+ },
+ }},
+ exp: &v1alpha1.ShardingSphereProxy{
+ Spec: v1alpha1.ProxySpec{Replicas: 2},
+ Status: v1alpha1.ProxyStatus{
+ Phase: v1alpha1.StatusNotReady,
+ Conditions: []v1alpha1.Condition{
+ {
+ Type:
v1alpha1.ConditionStarted,
+ Status:
metav1.ConditionTrue,
+ },
+ },
+ ReadyNodes: 0,
+ },
+ },
+ message: "#5 two started but not ready ok",
+ },
+ {
+ name: "#6 one started and one ready",
+ podlist: v1.PodList{Items: []v1.Pod{
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodRunning,
+ Conditions: []v1.PodCondition{
+ {
+ Type:
v1.PodInitialized,
+ Status:
v1.ConditionTrue,
+ },
+ },
+ },
+ },
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodRunning,
+ Conditions: []v1.PodCondition{
+ {
+ Type:
v1.PodReady,
+ Status:
v1.ConditionTrue,
+ },
+ },
+ ContainerStatuses:
[]v1.ContainerStatus{
+ {
+ Name: "proxy",
+ Ready: true,
+ },
+ },
+ },
+ },
+ }},
+ exp: &v1alpha1.ShardingSphereProxy{
+ Spec: v1alpha1.ProxySpec{Replicas: 2},
+ Status: v1alpha1.ProxyStatus{
+ Phase: v1alpha1.StatusReady,
+ Conditions: []v1alpha1.Condition{
+ {
+ Type:
v1alpha1.ConditionReady,
+ Status:
metav1.ConditionTrue,
+ },
+ },
+ ReadyNodes: 1,
+ },
+ },
+
+ message: "#6 one started and one ready should be ok",
+ },
+ {
+ name: "#7 two ready",
+ podlist: v1.PodList{Items: []v1.Pod{
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodRunning,
+ Conditions: []v1.PodCondition{
+ {
+ Type:
v1.PodReady,
+ Status:
v1.ConditionTrue,
+ },
+ },
+ ContainerStatuses:
[]v1.ContainerStatus{
+ {
+ Name: "proxy",
+ Ready: true,
+ },
+ },
+ },
+ },
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodRunning,
+ Conditions: []v1.PodCondition{
+ {
+ Type:
v1.PodReady,
+ Status:
v1.ConditionTrue,
+ },
+ },
+ ContainerStatuses:
[]v1.ContainerStatus{
+ {
+ Name: "proxy",
+ Ready: true,
+ },
+ },
+ },
+ },
+ }},
+ exp: &v1alpha1.ShardingSphereProxy{
+ Spec: v1alpha1.ProxySpec{Replicas: 2},
+ Status: v1alpha1.ProxyStatus{
+ Phase: v1alpha1.StatusReady,
+ Conditions: []v1alpha1.Condition{
+ {
+ Type:
v1alpha1.ConditionReady,
+ Status:
metav1.ConditionTrue,
+ },
+ },
+ ReadyNodes: 2,
+ },
+ },
+ message: "#7 two ready should be ok",
+ },
+ }
+
+ for _, c := range cases {
+ act := ReconcileStatus(c.podlist, *c.exp)
+ assertReadyNodes(t, c.exp.Status.ReadyNodes, act.ReadyNodes,
c.message)
+ assertPhase(t, c.exp.Status.Phase, act.Phase, c.message)
+ assertConditions(t, c.exp.Status.Conditions, act.Conditions,
c.message)
+ }
+}
+
+func assertReadyNodes(t *testing.T, exp, act int32, message string) bool {
+ return assert.Equal(t, exp, act, message)
+}
+
+func assertPhase(t *testing.T, exp, act v1alpha1.PhaseStatus, message string)
bool {
+ return assert.Equal(t, exp, act, message)
+}
+
+func assertConditions(t *testing.T, exp, act []v1alpha1.Condition, message
string) bool {
+ if !assert.Equal(t, len(exp), len(act), message) {
+ return false
+ }
+ for idx, _ := range exp {
+ if !assert.Equal(t, exp[idx].Type, act[idx].Type, message) {
+ return false
+ }
+ if !assert.Equal(t, exp[idx].Status, act[idx].Status, message) {
+ return false
+ }
+ }
+ return true
+}
+
+func Test_ClusterConditions(t *testing.T) {
+ cases := []struct {
+ name string
+ podlist v1.PodList
+ exp v1alpha1.Condition
+ message string
+ }{
+ {
+ name: "#0",
+ podlist: v1.PodList{Items: []v1.Pod{
+ {},
+ }},
+ exp: v1alpha1.Condition{},
+ },
+ {
+ name: "#1 PodFailed",
+ podlist: v1.PodList{Items: []v1.Pod{
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodFailed,
+ },
+ },
+ }},
+ exp: v1alpha1.Condition{
+ Type: v1alpha1.ConditionFailed,
+ Status: metav1.ConditionTrue,
+ LastUpdateTime: metav1.Now(),
+ },
+ },
+ {
+ name: "#2 PodPending",
+ podlist: v1.PodList{Items: []v1.Pod{
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodPending,
+ },
+ },
+ }},
+ exp: v1alpha1.Condition{
+ Type: v1alpha1.ConditionDeployed,
+ Status: metav1.ConditionTrue,
+ LastUpdateTime: metav1.Now(),
+ },
+ },
+ {
+ name: "#3 PodRunning",
+ podlist: v1.PodList{Items: []v1.Pod{
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodRunning,
+ },
+ },
+ }},
+ exp: v1alpha1.Condition{
+ Type: v1alpha1.ConditionStarted,
+ Status: metav1.ConditionTrue,
+ LastUpdateTime: metav1.Now(),
+ },
+ },
+ {
+ name: "#4 PodUnknown",
+ podlist: v1.PodList{Items: []v1.Pod{
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodUnknown,
+ },
+ },
+ }},
+ exp: v1alpha1.Condition{
+ Type: v1alpha1.ConditionUnknown,
+ Status: metav1.ConditionTrue,
+ LastUpdateTime: metav1.Now(),
+ },
+ },
+ {
+ name: "#5 PodSucceeded",
+ podlist: v1.PodList{Items: []v1.Pod{
+ {
+ Status: v1.PodStatus{
+ Phase: v1.PodSucceeded,
+ },
+ },
+ }},
+ exp: v1alpha1.Condition{},
+ },
+ }
+
+ for _, c := range cases {
+ act := clusterCondition(c.podlist)
+ assert.Equal(t, c.exp.Type, act.Type, c.name)
+ assert.Equal(t, c.exp.Status, act.Status, c.name)
+ }
+}