wilfred-s commented on code in PR #971:
URL: https://github.com/apache/yunikorn-k8shim/pull/971#discussion_r2221019227


##########
test/e2e/simple_preemptor/simple_preemptor_suite_test.go:
##########
@@ -46,6 +54,86 @@ func TestSimplePreemptor(t *testing.T) {
        ginkgo.RunSpecs(t, "TestSimplePreemptor", 
ginkgo.Label("TestSimplePreemptor"))
 }
 
+var _ = ginkgo.BeforeSuite(func() {
+       _, filename, _, _ := runtime.Caller(0)
+       suiteName = common.GetSuiteName(filename)
+       // Initializing kubectl client
+       kClient = k8s.KubeCtl{}
+       Ω(kClient.SetClient()).To(gomega.BeNil())
+       // Initializing rest client
+       restClient = yunikorn.RClient{}
+       Ω(restClient).NotTo(gomega.BeNil())
+
+       yunikorn.EnsureYuniKornConfigsPresent()
+       yunikorn.UpdateConfigMapWrapper(oldConfigMap, "")
+
+       ginkgo.By("Port-forward the scheduler pod")
+       var err = kClient.PortForwardYkSchedulerPod()
+       Ω(err).NotTo(gomega.HaveOccurred())
+
+       var nodes *v1.NodeList
+       nodes, err = kClient.GetNodes()
+       Ω(err).NotTo(gomega.HaveOccurred())
+       Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Events cant be empty")
+
+       // Extract node allocatable resources
+       for _, node := range nodes.Items {
+               // skip master if it's marked as such
+               node := node
+               if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
+                       continue
+               }
+               if Worker1 == "" {
+                       Worker1 = node.Name
+                       Worker1Res = node.Status.Allocatable.Memory()
+               } else if Worker2 == "" {
+                       Worker2 = node.Name
+                       Worker2Res = node.Status.Allocatable.Memory()
+               } else {
+                       nodesToTaint = append(nodesToTaint, node.Name)
+               }
+       }
+       ginkgo.By("Worker1:" + Worker1)
+       ginkgo.By("Worker2:" + Worker2)
+
+       ginkgo.By("Tainting some nodes..")
+       err = kClient.TaintNodes(nodesToTaint, taintKey, "value", 
v1.TaintEffectNoSchedule)
+       Ω(err).NotTo(gomega.HaveOccurred())
+
+       var pods *v1.PodList
+       totalPodQuantity1 := *resource.NewQuantity(0, resource.DecimalSI)
+       totalPodQuantity2 := *resource.NewQuantity(0, resource.DecimalSI)
+       pods, err = kClient.GetPods("yunikorn")
+       if err == nil {
+               for _, pod := range pods.Items {
+                       for _, c := range pod.Spec.Containers {
+                               if pod.Spec.NodeName == Worker1 {
+                                       
totalPodQuantity1.Add(*resource.NewQuantity(c.Resources.Requests.Memory().Value(),
 resource.DecimalSI))
+                               } else if pod.Spec.NodeName == Worker2 {
+                                       
totalPodQuantity2.Add(*resource.NewQuantity(c.Resources.Requests.Memory().Value(),
 resource.DecimalSI))
+                               }
+                       }
+               }
+       }
+       Worker1Res.Sub(totalPodQuantity1)
+       sleepPodMemLimit1 = int64(float64(Worker1Res.Value())/3.5) / (1000 * 
1000)
+       Worker2Res.Sub(totalPodQuantity2)
+       sleepPodMemLimit2 = int64(float64(Worker2Res.Value())/3.5) / (1000 * 
1000)
+})
+
+var _ = ginkgo.AfterSuite(func() {
+

Review Comment:
   nit: unneeded empty line



##########
test/e2e/simple_preemptor/simple_preemptor_test.go:
##########
@@ -52,73 +50,6 @@ var sleepPodMemLimit2 int64
 var taintKey = "e2e_test_simple_preemptor"
 var nodesToTaint []string
 
-var _ = ginkgo.BeforeSuite(func() {
-       _, filename, _, _ := runtime.Caller(0)
-       suiteName = common.GetSuiteName(filename)
-       // Initializing kubectl client
-       kClient = k8s.KubeCtl{}
-       Ω(kClient.SetClient()).To(gomega.BeNil())
-       // Initializing rest client
-       restClient = yunikorn.RClient{}
-       Ω(restClient).NotTo(gomega.BeNil())
-
-       yunikorn.EnsureYuniKornConfigsPresent()
-       yunikorn.UpdateConfigMapWrapper(oldConfigMap, "")
-
-       ginkgo.By("Port-forward the scheduler pod")
-       var err = kClient.PortForwardYkSchedulerPod()
-       Ω(err).NotTo(gomega.HaveOccurred())
-
-       var nodes *v1.NodeList
-       nodes, err = kClient.GetNodes()
-       Ω(err).NotTo(gomega.HaveOccurred())
-       Ω(len(nodes.Items)).NotTo(gomega.BeZero(), "Events cant be empty")
-
-       // Extract node allocatable resources
-       for _, node := range nodes.Items {
-               // skip master if it's marked as such
-               node := node
-               if k8s.IsMasterNode(&node) || !k8s.IsComputeNode(&node) {
-                       continue
-               }
-               if Worker1 == "" {
-                       Worker1 = node.Name
-                       Worker1Res = node.Status.Allocatable.Memory()
-               } else if Worker2 == "" {
-                       Worker2 = node.Name
-                       Worker2Res = node.Status.Allocatable.Memory()
-               } else {
-                       nodesToTaint = append(nodesToTaint, node.Name)
-               }
-       }
-       ginkgo.By("Worker1:" + Worker1)
-       ginkgo.By("Worker2:" + Worker2)
-
-       ginkgo.By("Tainting some nodes..")
-       err = kClient.TaintNodes(nodesToTaint, taintKey, "value", 
v1.TaintEffectNoSchedule)
-       Ω(err).NotTo(gomega.HaveOccurred())
-
-       var pods *v1.PodList
-       totalPodQuantity1 := *resource.NewQuantity(0, resource.DecimalSI)
-       totalPodQuantity2 := *resource.NewQuantity(0, resource.DecimalSI)
-       pods, err = kClient.GetPods("yunikorn")
-       if err == nil {
-               for _, pod := range pods.Items {
-                       for _, c := range pod.Spec.Containers {
-                               if pod.Spec.NodeName == Worker1 {
-                                       
totalPodQuantity1.Add(*resource.NewQuantity(c.Resources.Requests.Memory().Value(),
 resource.DecimalSI))
-                               } else if pod.Spec.NodeName == Worker2 {
-                                       
totalPodQuantity2.Add(*resource.NewQuantity(c.Resources.Requests.Memory().Value(),
 resource.DecimalSI))
-                               }
-                       }
-               }
-       }
-       Worker1Res.Sub(totalPodQuantity1)
-       sleepPodMemLimit1 = int64(float64(Worker1Res.Value())/3.5) / (1000 * 
1000)
-       Worker2Res.Sub(totalPodQuantity2)
-       sleepPodMemLimit2 = int64(float64(Worker2Res.Value())/3.5) / (1000 * 
1000)
-})
-
 var _ = ginkgo.BeforeEach(func() {

Review Comment:
   agree: should be moved between the `Describe` and the `It` below



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to