Xu-Wentao commented on code in PR #316:
URL:
https://github.com/apache/shardingsphere-on-cloud/pull/316#discussion_r1174818081
##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -153,57 +167,88 @@ func (r *ShardingSphereChaosReconciler)
reconcileChaos(ctx context.Context, ssCh
func (r *ShardingSphereChaosReconciler) reconcileConfigMap(ctx
context.Context, ssChaos *sschaosv1alpha1.ShardingSphereChaos) error {
logger := r.Log.WithValues("reconcile configmap", ssChaos.Name)
namespaceName := types.NamespacedName{Namespace: ssChaos.Namespace,
Name: ssChaos.Name}
- rConfigmap, isExist, err := r.getConfigMapByNamespacedName(ctx,
namespaceName)
+ rConfigmap, err := r.getConfigMapByNamespacedName(ctx, namespaceName)
if err != nil {
logger.Error(err, "get configmap error")
return err
}
- if isExist {
+ if rConfigmap != nil {
return r.updateConfigMap(ctx, ssChaos, rConfigmap)
}
- return r.CreateConfigMap(ctx, ssChaos)
-}
-
-func (r *ShardingSphereChaosReconciler) reconcileJob(ctx context.Context,
ssChaos *sschaosv1alpha1.ShardingSphereChaos) error {
- logger := r.Log.WithValues("reconcile job", ssChaos.Name)
- namespaceName := types.NamespacedName{Namespace: ssChaos.Namespace,
Name: ssChaos.Name}
- rJob, isExist, err := r.getJobByNamespacedName(ctx, namespaceName)
+ err = r.CreateConfigMap(ctx, ssChaos)
if err != nil {
- logger.Error(err, "get job err")
+ r.Events.Event(ssChaos, "Warning", "Created",
fmt.Sprintf("configmap created fail %s", err))
return err
}
+
+ r.Events.Event(ssChaos, "Normal", "Created", "configmap created
successfully")
+ return nil
+}
+
+func (r *ShardingSphereChaosReconciler) reconcileJob(ctx context.Context,
ssChaos *sschaosv1alpha1.ShardingSphereChaos) error {
var nowInjectRequirement reconcile.InjectRequirement
if ssChaos.Status.Phase == "" || ssChaos.Status.Phase ==
sschaosv1alpha1.PhaseBeforeExperiment || ssChaos.Status.Phase ==
sschaosv1alpha1.PhaseAfterExperiment {
nowInjectRequirement = reconcile.Experimental
}
- if ssChaos.Status.Phase == sschaosv1alpha1.PhaseInChaos ||
ssChaos.Status.Phase == sschaosv1alpha1.PhaseRecoveredChaos {
+ if ssChaos.Status.Phase == sschaosv1alpha1.PhaseInChaos {
nowInjectRequirement = reconcile.Pressure
}
- if isExist {
+ if ssChaos.Status.Phase == sschaosv1alpha1.PhaseRecoveredChaos {
+ nowInjectRequirement = reconcile.Verify
+ }
Review Comment:
use switch case is better
```go
switch ssChaos.Status.Phase {
case "", sschaosv1alpha1.PhaseBeforeExperiment,
sschaosv1alpha1.PhaseAfterExperiment:
nowInjectRequirement = reconcile.Experimental
case sschaosv1alpha1.PhaseInChaos:
nowInjectRequirement = reconcile.Pressure
case sschaosv1alpha1.PhaseRecoveredChaos:
nowInjectRequirement = reconcile.Verify
}
```
##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +258,183 @@ func (r *ShardingSphereChaosReconciler)
reconcileStatus(ctx context.Context, nam
if err != nil {
return err
}
- rt.Status = ssChaos.Status
+ setRtStatus(rt, ssChaos)
return r.Status().Update(ctx, rt)
}
+func getRequirement(ssChaos *sschaosv1alpha1.ShardingSphereChaos)
reconcile.InjectRequirement {
+ var jobName reconcile.InjectRequirement
+ if ssChaos.Status.Phase == sschaosv1alpha1.PhaseBeforeExperiment ||
ssChaos.Status.Phase == sschaosv1alpha1.PhaseAfterExperiment {
+ jobName = reconcile.Experimental
+ }
+ if ssChaos.Status.Phase == sschaosv1alpha1.PhaseInChaos {
+ jobName = reconcile.Pressure
+ }
+ if ssChaos.Status.Phase == sschaosv1alpha1.PhaseRecoveredChaos {
+ jobName = reconcile.Verify
+ }
+ return jobName
+}
+
+func getJobSuccessAndFail(conditions []batchV1.JobCondition) (bool, bool) {
+ var (
+ failed = false
+ succeeded = false
+ )
+
+ for i := range conditions {
+ r := conditions[i]
+ if r.Type == batchV1.JobComplete {
+ if r.Status == v1.ConditionTrue {
+ succeeded = true
+ }
+ if r.Status == v1.ConditionFalse {
+ succeeded = false
+ }
+ }
+
+ if r.Type == batchV1.JobFailed {
+ if r.Status == v1.ConditionTrue {
+ failed = true
+ }
+ if r.Status == v1.ConditionFalse {
+ failed = false
+ }
+ }
+ }
+
+ return succeeded, failed
+}
+
+func setDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+ if ssChaos.Status.Phase == "" {
+ ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+ }
+ if ssChaos.Status.Result == nil || len(ssChaos.Status.Result) == 0 {
+ ssChaos.Status.Result = []sschaosv1alpha1.Result{}
+ }
+}
+
+func setRtStatus(rt *sschaosv1alpha1.ShardingSphereChaos, ssChaos
*sschaosv1alpha1.ShardingSphereChaos) {
+ rt.Status.Result = []sschaosv1alpha1.Result{}
+ for i := range ssChaos.Status.Result {
+ r := &ssChaos.Status.Result[i]
+ rt.Status.Result = append(rt.Status.Result,
sschaosv1alpha1.Result{
+ Success: r.Success,
+ Detail: sschaosv1alpha1.Detail{
+ Time: metav1.Time{Time: time.Now()},
+ Msg: r.Detail.Msg,
+ },
+ })
+ }
+ rt.Status.Phase = ssChaos.Status.Phase
+ rt.Status.ChaosCondition = ssChaos.Status.ChaosCondition
+}
+
+func (r *ShardingSphereChaosReconciler) updateRecoveredJob(ctx
context.Context, ssChaos *sschaosv1alpha1.ShardingSphereChaos, rJob
*batchV1.Job) error {
+ if isResultExist(rJob) {
+ return nil
+ }
+ for i := range ssChaos.Status.Result {
+ r := &ssChaos.Status.Result[i]
+ if len(r.Detail.Msg) >= len(VerifyJobCheck) &&
r.Detail.Msg[:len(VerifyJobCheck)] == VerifyJobCheck {
Review Comment:
equals `strings.HasPrefix(r.Detail.Msg, VerifyJobCheck)` ?
##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +258,183 @@ func (r *ShardingSphereChaosReconciler)
reconcileStatus(ctx context.Context, nam
if err != nil {
return err
}
- rt.Status = ssChaos.Status
+ setRtStatus(rt, ssChaos)
return r.Status().Update(ctx, rt)
}
+func getRequirement(ssChaos *sschaosv1alpha1.ShardingSphereChaos)
reconcile.InjectRequirement {
+ var jobName reconcile.InjectRequirement
+ if ssChaos.Status.Phase == sschaosv1alpha1.PhaseBeforeExperiment ||
ssChaos.Status.Phase == sschaosv1alpha1.PhaseAfterExperiment {
+ jobName = reconcile.Experimental
+ }
+ if ssChaos.Status.Phase == sschaosv1alpha1.PhaseInChaos {
+ jobName = reconcile.Pressure
+ }
+ if ssChaos.Status.Phase == sschaosv1alpha1.PhaseRecoveredChaos {
+ jobName = reconcile.Verify
+ }
+ return jobName
+}
+
+func getJobSuccessAndFail(conditions []batchV1.JobCondition) (bool, bool) {
+ var (
+ failed = false
+ succeeded = false
+ )
+
+ for i := range conditions {
+ r := conditions[i]
+ if r.Type == batchV1.JobComplete {
+ if r.Status == v1.ConditionTrue {
+ succeeded = true
+ }
+ if r.Status == v1.ConditionFalse {
+ succeeded = false
+ }
+ }
+
+ if r.Type == batchV1.JobFailed {
+ if r.Status == v1.ConditionTrue {
+ failed = true
+ }
+ if r.Status == v1.ConditionFalse {
+ failed = false
+ }
+ }
+ }
+
+ return succeeded, failed
+}
+
+func setDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+ if ssChaos.Status.Phase == "" {
+ ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+ }
+ if ssChaos.Status.Result == nil || len(ssChaos.Status.Result) == 0 {
+ ssChaos.Status.Result = []sschaosv1alpha1.Result{}
+ }
+}
+
+func setRtStatus(rt *sschaosv1alpha1.ShardingSphereChaos, ssChaos
*sschaosv1alpha1.ShardingSphereChaos) {
+ rt.Status.Result = []sschaosv1alpha1.Result{}
+ for i := range ssChaos.Status.Result {
+ r := &ssChaos.Status.Result[i]
+ rt.Status.Result = append(rt.Status.Result,
sschaosv1alpha1.Result{
+ Success: r.Success,
+ Detail: sschaosv1alpha1.Detail{
+ Time: metav1.Time{Time: time.Now()},
+ Msg: r.Detail.Msg,
+ },
+ })
+ }
+ rt.Status.Phase = ssChaos.Status.Phase
+ rt.Status.ChaosCondition = ssChaos.Status.ChaosCondition
+}
+
+func (r *ShardingSphereChaosReconciler) updateRecoveredJob(ctx
context.Context, ssChaos *sschaosv1alpha1.ShardingSphereChaos, rJob
*batchV1.Job) error {
+ if isResultExist(rJob) {
+ return nil
+ }
+ for i := range ssChaos.Status.Result {
+ r := &ssChaos.Status.Result[i]
+ if len(r.Detail.Msg) >= len(VerifyJobCheck) &&
r.Detail.Msg[:len(VerifyJobCheck)] == VerifyJobCheck {
+ return nil
+ }
+ }
+ logOpts := &v1.PodLogOptions{}
+ pod, err := r.getPodHaveLog(ctx, rJob)
+ if err != nil {
+ return err
+ }
+ podNamespacedName := types.NamespacedName{
+ Namespace: pod.Namespace,
+ Name: pod.Name,
+ }
+ succeeded, failed := getJobSuccessAndFail(rJob.Status.Conditions)
+ result := &sschaosv1alpha1.Result{}
+ if succeeded {
+ log, err := r.getPodLog(ctx, podNamespacedName, logOpts)
+ if ssChaos.Spec.Expect.Verify == "" ||
ssChaos.Spec.Expect.Verify == log {
+ result.Success = true
+ result.Detail = sschaosv1alpha1.Detail{
+ Time: metav1.Time{Time: time.Now()},
+ Msg: fmt.Sprintf("%s: job succeeded",
VerifyJobCheck),
+ }
+ } else {
+ if err != nil {
+ return err
+ }
Review Comment:
move err check to where it created.
##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +258,183 @@ func (r *ShardingSphereChaosReconciler)
reconcileStatus(ctx context.Context, nam
if err != nil {
return err
}
- rt.Status = ssChaos.Status
+ setRtStatus(rt, ssChaos)
return r.Status().Update(ctx, rt)
}
+func getRequirement(ssChaos *sschaosv1alpha1.ShardingSphereChaos)
reconcile.InjectRequirement {
+ var jobName reconcile.InjectRequirement
+ if ssChaos.Status.Phase == sschaosv1alpha1.PhaseBeforeExperiment ||
ssChaos.Status.Phase == sschaosv1alpha1.PhaseAfterExperiment {
+ jobName = reconcile.Experimental
+ }
+ if ssChaos.Status.Phase == sschaosv1alpha1.PhaseInChaos {
+ jobName = reconcile.Pressure
+ }
+ if ssChaos.Status.Phase == sschaosv1alpha1.PhaseRecoveredChaos {
+ jobName = reconcile.Verify
+ }
+ return jobName
+}
+
+func getJobSuccessAndFail(conditions []batchV1.JobCondition) (bool, bool) {
+ var (
+ failed = false
+ succeeded = false
+ )
+
+ for i := range conditions {
+ r := conditions[i]
+ if r.Type == batchV1.JobComplete {
+ if r.Status == v1.ConditionTrue {
+ succeeded = true
+ }
+ if r.Status == v1.ConditionFalse {
+ succeeded = false
+ }
+ }
+
+ if r.Type == batchV1.JobFailed {
+ if r.Status == v1.ConditionTrue {
+ failed = true
+ }
+ if r.Status == v1.ConditionFalse {
+ failed = false
+ }
+ }
+ }
+
+ return succeeded, failed
+}
+
+func setDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+ if ssChaos.Status.Phase == "" {
+ ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+ }
+ if ssChaos.Status.Result == nil || len(ssChaos.Status.Result) == 0 {
Review Comment:
no need to determine `len(ssChaos.Status.Result) == 0`, coz it's equals
`ssChaos.Status.Result = []sschaosv1alpha1.Result{}`
##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -325,25 +530,20 @@ func (r *ShardingSphereChaosReconciler)
CreateConfigMap(ctx context.Context, cha
}
func (r *ShardingSphereChaosReconciler) updateJob(ctx context.Context,
requirement reconcile.InjectRequirement, chao
*sschaosv1alpha1.ShardingSphereChaos, cur *batchV1.Job) error {
- exp, err := reconcile.UpdateJob(chao, requirement, cur)
+ isEqual, err := reconcile.JudgeJobEqual(chao, requirement, cur)
Review Comment:
maybe `isJobChanged` is a better name
##########
shardingsphere-operator/pkg/controllers/shardingsphere_chaos_controller.go:
##########
@@ -213,10 +258,183 @@ func (r *ShardingSphereChaosReconciler)
reconcileStatus(ctx context.Context, nam
if err != nil {
return err
}
- rt.Status = ssChaos.Status
+ setRtStatus(rt, ssChaos)
return r.Status().Update(ctx, rt)
}
+func getRequirement(ssChaos *sschaosv1alpha1.ShardingSphereChaos)
reconcile.InjectRequirement {
+ var jobName reconcile.InjectRequirement
+ if ssChaos.Status.Phase == sschaosv1alpha1.PhaseBeforeExperiment ||
ssChaos.Status.Phase == sschaosv1alpha1.PhaseAfterExperiment {
+ jobName = reconcile.Experimental
+ }
+ if ssChaos.Status.Phase == sschaosv1alpha1.PhaseInChaos {
+ jobName = reconcile.Pressure
+ }
+ if ssChaos.Status.Phase == sschaosv1alpha1.PhaseRecoveredChaos {
+ jobName = reconcile.Verify
+ }
+ return jobName
+}
+
+func getJobSuccessAndFail(conditions []batchV1.JobCondition) (bool, bool) {
+ var (
+ failed = false
+ succeeded = false
+ )
+
+ for i := range conditions {
+ r := conditions[i]
+ if r.Type == batchV1.JobComplete {
+ if r.Status == v1.ConditionTrue {
+ succeeded = true
+ }
+ if r.Status == v1.ConditionFalse {
+ succeeded = false
+ }
+ }
+
+ if r.Type == batchV1.JobFailed {
+ if r.Status == v1.ConditionTrue {
+ failed = true
+ }
+ if r.Status == v1.ConditionFalse {
+ failed = false
+ }
+ }
+ }
+
+ return succeeded, failed
+}
+
+func setDefault(ssChaos *sschaosv1alpha1.ShardingSphereChaos) {
+ if ssChaos.Status.Phase == "" {
+ ssChaos.Status.Phase = sschaosv1alpha1.PhaseBeforeExperiment
+ }
+ if ssChaos.Status.Result == nil || len(ssChaos.Status.Result) == 0 {
+ ssChaos.Status.Result = []sschaosv1alpha1.Result{}
+ }
+}
+
+func setRtStatus(rt *sschaosv1alpha1.ShardingSphereChaos, ssChaos
*sschaosv1alpha1.ShardingSphereChaos) {
+ rt.Status.Result = []sschaosv1alpha1.Result{}
+ for i := range ssChaos.Status.Result {
+ r := &ssChaos.Status.Result[i]
+ rt.Status.Result = append(rt.Status.Result,
sschaosv1alpha1.Result{
+ Success: r.Success,
+ Detail: sschaosv1alpha1.Detail{
+ Time: metav1.Time{Time: time.Now()},
+ Msg: r.Detail.Msg,
+ },
+ })
+ }
+ rt.Status.Phase = ssChaos.Status.Phase
+ rt.Status.ChaosCondition = ssChaos.Status.ChaosCondition
+}
+
+func (r *ShardingSphereChaosReconciler) updateRecoveredJob(ctx
context.Context, ssChaos *sschaosv1alpha1.ShardingSphereChaos, rJob
*batchV1.Job) error {
+ if isResultExist(rJob) {
+ return nil
+ }
+ for i := range ssChaos.Status.Result {
+ r := &ssChaos.Status.Result[i]
+ if len(r.Detail.Msg) >= len(VerifyJobCheck) &&
r.Detail.Msg[:len(VerifyJobCheck)] == VerifyJobCheck {
+ return nil
+ }
+ }
+ logOpts := &v1.PodLogOptions{}
+ pod, err := r.getPodHaveLog(ctx, rJob)
+ if err != nil {
+ return err
+ }
+ podNamespacedName := types.NamespacedName{
+ Namespace: pod.Namespace,
+ Name: pod.Name,
+ }
+ succeeded, failed := getJobSuccessAndFail(rJob.Status.Conditions)
+ result := &sschaosv1alpha1.Result{}
+ if succeeded {
+ log, err := r.getPodLog(ctx, podNamespacedName, logOpts)
+ if ssChaos.Spec.Expect.Verify == "" ||
ssChaos.Spec.Expect.Verify == log {
+ result.Success = true
+ result.Detail = sschaosv1alpha1.Detail{
+ Time: metav1.Time{Time: time.Now()},
+ Msg: fmt.Sprintf("%s: job succeeded",
VerifyJobCheck),
+ }
+ } else {
+ if err != nil {
+ return err
+ }
+ result.Success = false
+ result.Detail = sschaosv1alpha1.Detail{
+ Time: metav1.Time{Time: time.Now()},
+ Msg: fmt.Sprintf("%s: %s", VerifyJobCheck,
log),
+ }
+ }
+ }
+
+ if failed {
+ log, err := r.getPodLog(ctx, podNamespacedName, logOpts)
+ if err != nil {
+ return err
+ }
+ result.Success = false
+ result.Detail = sschaosv1alpha1.Detail{
+ Time: metav1.Time{Time: time.Now()},
+ Msg: fmt.Sprintf("%s: %s", VerifyJobCheck, log),
+ }
+ }
+ ssChaos.Status.Result = updateResult(ssChaos.Status.Result, *result,
VerifyJobCheck)
+ return nil
+}
+
+func (r *ShardingSphereChaosReconciler) getPodHaveLog(ctx context.Context,
rJob *batchV1.Job) (*v1.Pod, error) {
+ pods := &v1.PodList{}
+ if err := r.List(ctx, pods, client.MatchingLabels{"controller-uid":
rJob.Spec.Template.Labels["controller-uid"]}); err != nil {
+ return nil, err
+ }
+ var pod *v1.Pod
+ for i := range pods.Items {
+ pod = &pods.Items[i]
+ break
+ }
Review Comment:
check if `pods.Item` is empty first.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]