This is an automated email from the ASF dual-hosted git repository.
pbacsko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/yunikorn-k8shim.git
The following commit(s) were added to refs/heads/master by this push:
new e756d2dc [YUNIKORN-1039] add e2e test which uses volumes (#687)
e756d2dc is described below
commit e756d2dc6fa878942b94adb71a6c5f02e9d406bd
Author: targetoee <[email protected]>
AuthorDate: Wed Nov 8 09:22:08 2023 +0100
[YUNIKORN-1039] add e2e test which uses volumes (#687)
Closes: #687
Signed-off-by: Peter Bacsko <[email protected]>
---
test/e2e/framework/helpers/k8s/k8s_utils.go | 146 +++++++++
test/e2e/framework/helpers/k8s/pod_conf.go | 22 ++
test/e2e/framework/helpers/k8s/pv_conf.go | 131 ++++++++
.../persistent_volume_suite_test.go | 50 +++
.../persistent_volume/persistent_volume_test.go | 348 +++++++++++++++++++++
5 files changed, 697 insertions(+)
diff --git a/test/e2e/framework/helpers/k8s/k8s_utils.go
b/test/e2e/framework/helpers/k8s/k8s_utils.go
index 8120d2ef..fc651f10 100644
--- a/test/e2e/framework/helpers/k8s/k8s_utils.go
+++ b/test/e2e/framework/helpers/k8s/k8s_utils.go
@@ -37,7 +37,9 @@ import (
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
authv1 "k8s.io/api/rbac/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
+ storagev1 "k8s.io/api/storage/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/httpstream"
@@ -318,6 +320,10 @@ func (k *KubeCtl) GetService(serviceName string, namespace
string) (*v1.Service,
return k.clientSet.CoreV1().Services(namespace).Get(context.TODO(),
serviceName, metav1.GetOptions{})
}
+func (k *KubeCtl) CreateService(service *v1.Service, namespace string)
(*v1.Service, error) {
+ return k.clientSet.CoreV1().Services(namespace).Create(context.TODO(),
service, metav1.CreateOptions{})
+}
+
// Func to create a namespace provided a name
func (k *KubeCtl) CreateNamespace(namespace string, annotations
map[string]string) (*v1.Namespace, error) {
// create namespace
@@ -862,6 +868,10 @@ func (k *KubeCtl) DeleteServiceAccount(accountName string,
namespace string) err
return
k.clientSet.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(),
accountName, metav1.DeleteOptions{})
}
+func (k *KubeCtl) CreateClusterRole(clusterRole *rbacv1.ClusterRole)
(*rbacv1.ClusterRole, error) {
+ return k.clientSet.RbacV1().ClusterRoles().Create(context.TODO(),
clusterRole, metav1.CreateOptions{})
+}
+
func (k *KubeCtl) CreateClusterRoleBinding(
roleName string,
role string,
@@ -881,6 +891,10 @@ func (k *KubeCtl) CreateClusterRoleBinding(
}, metav1.CreateOptions{})
}
+func (k *KubeCtl) DeleteClusterRole(roleName string) error {
+ return k.clientSet.RbacV1().ClusterRoles().Delete(context.TODO(),
roleName, metav1.DeleteOptions{})
+}
+
func (k *KubeCtl) DeleteClusterRoleBindings(roleName string) error {
return
k.clientSet.RbacV1().ClusterRoleBindings().Delete(context.TODO(), roleName,
metav1.DeleteOptions{})
}
@@ -1397,3 +1411,135 @@ func (k *KubeCtl) DeleteWorkloadAndPods(objectName
string, wlType WorkloadType,
err = k.WaitForPodCount(namespace, 0, 10*time.Second)
gomega.Ω(err).ShouldNot(gomega.HaveOccurred())
}
+
+func (k *KubeCtl) CreatePersistentVolume(pv *v1.PersistentVolume)
(*v1.PersistentVolume, error) {
+ return k.clientSet.CoreV1().PersistentVolumes().Create(context.TODO(),
pv, metav1.CreateOptions{})
+}
+
+func (k *KubeCtl) CreatePersistentVolumeClaim(pvc *v1.PersistentVolumeClaim,
ns string) (*v1.PersistentVolumeClaim, error) {
+ return
k.clientSet.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc,
metav1.CreateOptions{})
+}
+
+func (k *KubeCtl) CreateStorageClass(sc *storagev1.StorageClass)
(*storagev1.StorageClass, error) {
+ return k.clientSet.StorageV1().StorageClasses().Create(context.TODO(),
sc, metav1.CreateOptions{})
+}
+
+func (k *KubeCtl) GetPersistentVolume(name string) (*v1.PersistentVolume,
error) {
+ pv, err := k.clientSet.CoreV1().PersistentVolumes().Get(context.TODO(),
name, metav1.GetOptions{})
+ return pv, err
+}
+
+func (k *KubeCtl) WaitForPersistentVolumeAvailable(name string, timeout
time.Duration) error {
+ return wait.PollUntilContextTimeout(context.TODO(),
time.Millisecond*200, timeout, true, k.isPersistentVolumeAvailable(name))
+}
+
+func (k *KubeCtl) WaitForPersistentVolumeClaimPresent(namespace string, name
string, timeout time.Duration) error {
+ return wait.PollUntilContextTimeout(context.TODO(),
time.Millisecond*200, timeout, true,
k.isPersistentVolumeClaimPresent(namespace, name))
+}
+
+func (k *KubeCtl) isPersistentVolumeAvailable(name string)
wait.ConditionWithContextFunc {
+ return func(context.Context) (bool, error) {
+ pv, err := k.GetPersistentVolume(name)
+ if err != nil {
+ return false, err
+ }
+ if pv.Status.Phase == v1.VolumeAvailable {
+ return true, nil
+ }
+ return false, nil
+ }
+}
+
+func (k *KubeCtl) isPersistentVolumeClaimPresent(namespace string, name
string) wait.ConditionWithContextFunc {
+ return func(context.Context) (bool, error) {
+ _, err :=
k.clientSet.CoreV1().PersistentVolumeClaims(namespace).Get(context.TODO(),
name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+ }
+}
+
+func (k *KubeCtl) GetPvcNameListFromNs(namespace string) ([]string, error) {
+ var arr []string
+ pvcList, err :=
k.clientSet.CoreV1().PersistentVolumeClaims(namespace).List(context.TODO(),
metav1.ListOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ for _, item := range pvcList.Items {
+ arr = append(arr, item.Name)
+ }
+ return arr, nil
+}
+
+func (k *KubeCtl) GetPvNameListFromNs(namespace string) ([]string, error) {
+ var arr []string
+ pvList, err :=
k.clientSet.CoreV1().PersistentVolumes().List(context.TODO(),
metav1.ListOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ for _, item := range pvList.Items {
+ if item.Spec.ClaimRef.Namespace == namespace {
+ arr = append(arr, item.Name)
+ }
+ }
+ return arr, nil
+}
+
+func (k *KubeCtl) DeletePersistentVolume(pvName string) error {
+ err := k.clientSet.CoreV1().PersistentVolumes().Delete(context.TODO(),
pvName, metav1.DeleteOptions{})
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (k *KubeCtl) DeletePersistentVolumeClaim(pvcName string, namespace
string) error {
+ err :=
k.clientSet.CoreV1().PersistentVolumeClaims(namespace).Delete(context.TODO(),
pvcName, metav1.DeleteOptions{})
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (k *KubeCtl) DeletePVCs(namespace string) error {
+ // Delete all PVC by namespace
+ var PvcList, err = k.GetPvcNameListFromNs(namespace)
+ if err != nil {
+ return err
+ }
+
+ for _, each := range PvcList {
+ err = k.DeletePersistentVolumeClaim(each, namespace)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (k *KubeCtl) DeletePVs(namespace string) error {
+ // Delete all PV by namespace
+ var PvcList, err = k.GetPvNameListFromNs(namespace)
+ if err != nil {
+ return err
+ }
+
+ for _, item := range PvcList {
+ err = k.DeletePersistentVolume(item)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (k *KubeCtl) DeleteStorageClass(scName string) error {
+ err := k.clientSet.StorageV1().StorageClasses().Delete(context.TODO(),
scName, metav1.DeleteOptions{})
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/test/e2e/framework/helpers/k8s/pod_conf.go
b/test/e2e/framework/helpers/k8s/pod_conf.go
index b8280c3b..7ea68ccc 100644
--- a/test/e2e/framework/helpers/k8s/pod_conf.go
+++ b/test/e2e/framework/helpers/k8s/pod_conf.go
@@ -158,6 +158,9 @@ type TestPodConfig struct {
RestartPolicy v1.RestartPolicy
Command []string
InitContainerSleepSecs int
+ PvcName string
+ PvName string
+ VolumeName string
}
func InitTestPod(conf TestPodConfig) (*v1.Pod, error) { //nolint:funlen
@@ -227,6 +230,25 @@ func InitTestPod(conf TestPodConfig) (*v1.Pod, error) {
//nolint:funlen
},
}
}
+ if conf.PvcName != "" || conf.PvName != "" {
+ if conf.VolumeName == "" {
+ conf.VolumeName = "vol-" + common.RandSeq(5)
+ }
+ if conf.PvcName != "" {
+ pod.Spec.Volumes = []v1.Volume{
+ {
+ Name: conf.VolumeName,
+ VolumeSource: v1.VolumeSource{
+ PersistentVolumeClaim:
&v1.PersistentVolumeClaimVolumeSource{
+ ClaimName: conf.PvcName,
+ },
+ },
+ },
+ }
+ } else if conf.PvName != "" {
+ pod.Spec.Volumes = []v1.Volume{}
+ }
+ }
return pod, nil
}
diff --git a/test/e2e/framework/helpers/k8s/pv_conf.go
b/test/e2e/framework/helpers/k8s/pv_conf.go
new file mode 100644
index 00000000..0d4c21a1
--- /dev/null
+++ b/test/e2e/framework/helpers/k8s/pv_conf.go
@@ -0,0 +1,131 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package k8s
+
+import (
+ v1 "k8s.io/api/core/v1"
+ storagev1 "k8s.io/api/storage/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "k8s.io/apimachinery/pkg/api/resource"
+)
+
+type PvConfig struct {
+ Name string
+ Labels map[string]string
+ Capacity string
+ AccessModes []v1.PersistentVolumeAccessMode
+ Type string
+ Path string
+ NodeAffinity *v1.VolumeNodeAffinity
+ StorageClass string
+}
+
+const (
+ LocalTypePv string = "Local"
+)
+
+func InitPersistentVolume(conf PvConfig) (*v1.PersistentVolume, error) {
+ pv := &v1.PersistentVolume{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: conf.Name,
+ },
+ Spec: v1.PersistentVolumeSpec{
+ Capacity: v1.ResourceList{
+ v1.ResourceStorage:
resource.MustParse(conf.Capacity),
+ },
+ AccessModes: conf.AccessModes,
+ PersistentVolumeReclaimPolicy:
v1.PersistentVolumeReclaimRetain,
+ StorageClassName: conf.StorageClass,
+ },
+ }
+ if conf.Type == LocalTypePv {
+ pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
+ Local: &v1.LocalVolumeSource{
+ Path: conf.Path,
+ },
+ }
+ if conf.NodeAffinity == nil {
+ // Create fake condition which won't exclude anything
+ pv.Spec.NodeAffinity = &v1.VolumeNodeAffinity{
+ Required: &v1.NodeSelector{
+ NodeSelectorTerms:
[]v1.NodeSelectorTerm{
+ {
+ MatchExpressions:
[]v1.NodeSelectorRequirement{
+ {
+ Key:
"fakeKey",
+
Operator: v1.NodeSelectorOpNotIn,
+ Values:
[]string{"fakeValue"},
+ },
+ },
+ },
+ },
+ },
+ }
+ } else {
+ pv.Spec.NodeAffinity = conf.NodeAffinity
+ }
+ }
+ return pv, nil
+}
+
+type ScConfig struct {
+ Name string
+ Provisioner string
+ Parameters map[string]string
+}
+
+func InitStorageClass(conf ScConfig) (*storagev1.StorageClass, error) {
+ sc := &storagev1.StorageClass{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: conf.Name,
+ },
+ Provisioner: conf.Provisioner,
+ Parameters: conf.Parameters,
+ }
+ return sc, nil
+}
+
+type PvcConfig struct {
+ Name string
+ Capacity string
+ VolumeName string
+ StorageClassName string
+}
+
+func InitPersistentVolumeClaim(conf PvcConfig) (*v1.PersistentVolumeClaim,
error) {
+ pvc := &v1.PersistentVolumeClaim{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: conf.Name,
+ },
+ Spec: v1.PersistentVolumeClaimSpec{
+ AccessModes:
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
+ Resources: v1.ResourceRequirements{
+ Requests: v1.ResourceList{
+ v1.ResourceStorage:
resource.MustParse(conf.Capacity),
+ },
+ },
+ },
+ }
+ if conf.VolumeName != "" {
+ pvc.Spec.VolumeName = conf.VolumeName
+ }
+ if conf.StorageClassName != "" {
+ pvc.Spec.StorageClassName = &conf.StorageClassName
+ }
+ return pvc, nil
+}
diff --git a/test/e2e/persistent_volume/persistent_volume_suite_test.go
b/test/e2e/persistent_volume/persistent_volume_suite_test.go
new file mode 100644
index 00000000..e20f973c
--- /dev/null
+++ b/test/e2e/persistent_volume/persistent_volume_suite_test.go
@@ -0,0 +1,50 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package persistent_volume
+
+import (
+ "path/filepath"
+ "testing"
+
+ "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/gomega"
+
+ "github.com/apache/yunikorn-k8shim/test/e2e/framework/configmanager"
+)
+
+func init() {
+ configmanager.YuniKornTestConfig.ParseFlags()
+}
+
+func TestPersistentVolume(t *testing.T) {
+ ginkgo.ReportAfterSuite("TestPersistentVolume", func(report
ginkgo.Report) {
+ err := reporters.GenerateJUnitReportWithConfig(
+ report,
+ filepath.Join(configmanager.YuniKornTestConfig.LogDir,
"TEST-persistent_volume_junit.xml"),
+ reporters.JunitReportConfig{OmitSpecLabels: true},
+ )
+ Ω(err).NotTo(HaveOccurred())
+ })
+ gomega.RegisterFailHandler(ginkgo.Fail)
+ ginkgo.RunSpecs(t, "TestPersistentVolume",
ginkgo.Label("TestPersistentVolume"))
+}
+
+var Ω = gomega.Ω
+var HaveOccurred = gomega.HaveOccurred
diff --git a/test/e2e/persistent_volume/persistent_volume_test.go
b/test/e2e/persistent_volume/persistent_volume_test.go
new file mode 100644
index 00000000..e641f793
--- /dev/null
+++ b/test/e2e/persistent_volume/persistent_volume_test.go
@@ -0,0 +1,348 @@
+/*
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package persistent_volume
+
+import (
+ "time"
+
+ "github.com/onsi/gomega"
+
+ "k8s.io/apimachinery/pkg/api/resource"
+
+ appsv1 "k8s.io/api/apps/v1"
+ v1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ storagev1 "k8s.io/api/storage/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/onsi/ginkgo/v2"
+
+ "github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/common"
+ "github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/k8s"
+ "github.com/apache/yunikorn-k8shim/test/e2e/framework/helpers/yunikorn"
+)
+
+var kClient k8s.KubeCtl
+var restClient yunikorn.RClient
+var dev = "dev-" + common.RandSeq(5)
+
+const (
+ LocalTypePv = "Local"
+ StandardScName = "standard"
+)
+
+var _ = ginkgo.BeforeSuite(func() {
+ // Initializing kubectl client
+ kClient = k8s.KubeCtl{}
+ Ω(kClient.SetClient()).To(gomega.BeNil())
+
+ // Initializing rest client
+ restClient = yunikorn.RClient{}
+ Ω(restClient).NotTo(gomega.BeNil())
+
+ yunikorn.EnsureYuniKornConfigsPresent()
+
+ // Create namespace
+ ginkgo.By("Create namespace " + dev)
+ ns, err := kClient.CreateNamespace(dev, nil)
+ Ω(err).NotTo(HaveOccurred())
+ Ω(ns.Status.Phase).To(gomega.Equal(v1.NamespaceActive))
+})
+
+var _ = ginkgo.AfterSuite(func() {
+ // Clean up
+ ginkgo.By("Deleting PVCs and PVs")
+ err := kClient.DeletePVCs(dev)
+ err2 := kClient.DeletePVs(dev)
+ ginkgo.By("Tearing down namespace: " + dev)
+ err3 := kClient.TearDownNamespace(dev)
+
+ Ω(err).NotTo(HaveOccurred())
+ Ω(err2).NotTo(HaveOccurred())
+ Ω(err3).NotTo(HaveOccurred())
+})
+
+var _ = ginkgo.Describe("PersistentVolume", func() {
+ ginkgo.It("Verify_static_binding_of_local_pv", func() {
+ pvName := "local-pv-" + common.RandSeq(5)
+ conf := k8s.PvConfig{
+ Name: pvName,
+ Capacity: "1Gi",
+ AccessModes:
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
+ Type: LocalTypePv,
+ Path: "/tmp",
+ StorageClass: StandardScName,
+ }
+
+ ginkgo.By("Create local type pv " + pvName)
+ pvObj, err := k8s.InitPersistentVolume(conf)
+ Ω(err).NotTo(HaveOccurred())
+ _, err = kClient.CreatePersistentVolume(pvObj)
+ Ω(err).NotTo(HaveOccurred())
+ Ω(kClient.WaitForPersistentVolumeAvailable(pvName,
60*time.Second)).NotTo(HaveOccurred())
+
+ pvcName := "pvc-" + common.RandSeq(5)
+ pvcConf := k8s.PvcConfig{
+ Name: pvcName,
+ Capacity: "1Gi",
+ VolumeName: pvName,
+ }
+
+ ginkgo.By("Create pvc " + pvcName + ", which binds to " +
pvName)
+ pvcObj, err := k8s.InitPersistentVolumeClaim(pvcConf)
+ Ω(err).NotTo(HaveOccurred())
+ _, err = kClient.CreatePersistentVolumeClaim(pvcObj, dev)
+ Ω(err).NotTo(HaveOccurred())
+ Ω(kClient.WaitForPersistentVolumeClaimPresent(dev, pvcName,
60*time.Second)).NotTo(HaveOccurred())
+
+ podName := "pod-" + common.RandSeq(5)
+ podConf := k8s.TestPodConfig{
+ Name: podName,
+ Namespace: dev,
+ PvcName: pvcName,
+ }
+
+ ginkgo.By("Create pod " + podName + ", which uses pvc " +
pvcName)
+ podObj, err := k8s.InitTestPod(podConf)
+ Ω(err).NotTo(HaveOccurred())
+ _, err = kClient.CreatePod(podObj, dev)
+ Ω(err).NotTo(HaveOccurred())
+
+ ginkgo.By("Check pod " + podName + " is successfully running")
+ err = kClient.WaitForPodRunning(dev, podName, 60*time.Second)
+ Ω(err).NotTo(HaveOccurred())
+ })
+
+ ginkgo.It("Verify_dynamic_bindng_with_nfs_server", func() {
+ ginkgo.By("Start creating nfs provisioner.")
+
+ // Create nfs server and related rbac
+ saName := "nfs-service-account"
+ crName := "nfs-cluster-role"
+ crbName := "nfs-cluster-role-binding" //nolint:gosec
+ serverName := "nfs-provisioner"
+ scName := "nfs-sc"
+ createNfsRbac(saName, crName, crbName)
+ createNfsProvisioner(saName, serverName, scName)
+
+ // Create pvc using storageclass
+ pvcName := "pvc-" + common.RandSeq(5)
+ pvcConf := k8s.PvcConfig{
+ Name: pvcName,
+ Capacity: "1Gi",
+ StorageClassName: scName,
+ }
+
+ ginkgo.By("Create pvc " + pvcName + ", which uses storage class
" + scName)
+ pvcObj, err := k8s.InitPersistentVolumeClaim(pvcConf)
+ Ω(err).NotTo(HaveOccurred())
+ _, err = kClient.CreatePersistentVolumeClaim(pvcObj, dev)
+ Ω(err).NotTo(HaveOccurred())
+ Ω(kClient.WaitForPersistentVolumeClaimPresent(dev, pvcName,
60*time.Second)).NotTo(HaveOccurred())
+
+ // Create pod
+ podName := "pod-" + common.RandSeq(5)
+ podConf := k8s.TestPodConfig{
+ Name: podName,
+ Namespace: dev,
+ PvcName: pvcName,
+ }
+
+ ginkgo.By("Create pod " + podName + " with pvc " + pvcName)
+ podObj, err := k8s.InitTestPod(podConf)
+ Ω(err).NotTo(HaveOccurred())
+ _, err = kClient.CreatePod(podObj, dev)
+ Ω(err).NotTo(HaveOccurred())
+
+ ginkgo.By("Check pod " + podName + " is successfully running")
+ err = kClient.WaitForPodRunning(dev, podName, 60*time.Second)
+ Ω(err).NotTo(HaveOccurred())
+
+ deleteNfsRelatedRoles(saName, crName, crbName)
+ deleteNfsProvisioner(serverName, scName)
+ })
+})
+
+func createNfsRbac(svaName string, crName string, crbName string) {
+ // Create service account, cluster role and role binding
+ ginkgo.By("Create service account " + svaName)
+ _, err := kClient.CreateServiceAccount(svaName, dev)
+ Ω(err).NotTo(HaveOccurred())
+
+ nfsClusterRole := &rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: crName,
+ },
+ Rules: []rbacv1.PolicyRule{
+ {
+ APIGroups: []string{"*"},
+ Resources: []string{
+ "nodes", "nodes/proxy",
+ "namespaces", "services", "pods",
"pods/exec",
+ "deployments", "deployments/finalizers",
+ "replicationcontrollers", "replicasets",
+ "statefulsets", "daemonsets",
+ "events", "endpoints", "configmaps",
"secrets", "jobs", "cronjobs",
+ "storageclasses",
"persistentvolumeclaims", "persistentvolumes",
+ },
+ Verbs: []string{"*"},
+ },
+ {
+ APIGroups: []string{"openebs.io"},
+ Resources: []string{"*"},
+ Verbs: []string{"*"},
+ },
+ },
+ }
+ ginkgo.By("Create cluster role " + crName)
+ _, err = kClient.CreateClusterRole(nfsClusterRole)
+ Ω(err).NotTo(HaveOccurred())
+
+ ginkgo.By("Create cluster role binding " + crbName)
+ _, err = kClient.CreateClusterRoleBinding(crbName, crName, dev, svaName)
+ Ω(err).NotTo(HaveOccurred())
+}
+
+func createNfsProvisioner(svaName string, serverName string, scName string) {
+ // Create nfs provisioner
+ nfsProvisioner := &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: serverName,
+ Namespace: dev,
+ Labels: map[string]string{
+ "name": serverName,
+ },
+ },
+ Spec: appsv1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "name": serverName,
+ },
+ },
+ Template: v1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ "name": serverName,
+ },
+ },
+ Spec: v1.PodSpec{
+ ServiceAccountName: svaName,
+ Containers: []v1.Container{
+ {
+ Name:
"nfs-provisioner",
+ Image:
"openebs/provisioner-nfs:0.10.0",
+ Env: []v1.EnvVar{
+ {
+ Name:
"NODE_NAME",
+
ValueFrom: &v1.EnvVarSource{
+
FieldRef: &v1.ObjectFieldSelector{
+
FieldPath: "spec.nodeName",
+
},
+ },
+ },
+ {
+ Name:
"OPENEBS_NAMESPACE",
+
ValueFrom: &v1.EnvVarSource{
+
FieldRef: &v1.ObjectFieldSelector{
+
FieldPath: "metadata.namespace",
+
},
+ },
+ },
+ {
+ Name:
"OPENEBS_SERVICE_ACCOUNT",
+
ValueFrom: &v1.EnvVarSource{
+
FieldRef: &v1.ObjectFieldSelector{
+
FieldPath: "spec.serviceAccountName",
+
},
+ },
+ },
+ {
+ Name:
"OPENEBS_IO_ENABLE_ANALYTICS",
+ Value:
"true",
+ },
+ {
+ Name:
"OPENEBS_IO_NFS_SERVER_USE_CLUSTERIP",
+ Value:
"true",
+ },
+ {
+ Name:
"OPENEBS_IO_INSTALLER_TYPE",
+ Value:
"openebs-operator-nfs",
+ },
+ {
+ Name:
"OPENEBS_IO_NFS_SERVER_IMG",
+ Value:
"openebs/nfs-server-alpine:0.10.0",
+ },
+ },
+ Resources:
v1.ResourceRequirements{
+ Requests:
v1.ResourceList{
+ "cpu":
resource.MustParse("50m"),
+
"memory": resource.MustParse("50M"),
+ },
+ Limits:
v1.ResourceList{
+ "cpu":
resource.MustParse("200m"),
+
"memory": resource.MustParse("200M"),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ ginkgo.By("Create nfs provisioner " + serverName)
+ _, err := kClient.CreateDeployment(nfsProvisioner, dev)
+ Ω(err).NotTo(HaveOccurred())
+
+ // Create storage class
+ sc := &storagev1.StorageClass{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: scName,
+ Annotations: map[string]string{
+ "openebs.io/cas-type": "nfsrwx",
+ "cas.openebs.io/config": "- name:
NFSServerType\n value: \"kernel\"\n- name: BackendStorageClass\n value:
\"standard\"\n",
+ },
+ },
+ Provisioner: "openebs.io/nfsrwx",
+ }
+
+ ginkgo.By("Create storage class " + scName)
+ _, err = kClient.CreateStorageClass(sc)
+ Ω(err).NotTo(HaveOccurred())
+}
+
+func deleteNfsRelatedRoles(serviceAccount string, clusterRole string,
clusterRoleBinding string) {
+ ginkgo.By("Deleting NFS related roles and bindings")
+ err := kClient.DeleteClusterRoleBindings(clusterRoleBinding)
+ err2 := kClient.DeleteClusterRole(clusterRole)
+ err3 := kClient.DeleteServiceAccount(serviceAccount, dev)
+
+ Ω(err).NotTo(HaveOccurred())
+ Ω(err2).NotTo(HaveOccurred())
+ Ω(err3).NotTo(HaveOccurred())
+}
+
+func deleteNfsProvisioner(deployName string, scName string) {
+ ginkgo.By("Deleting NFS deployment and storage class")
+ err := kClient.DeleteDeployment(deployName, dev)
+ err2 := kClient.DeleteStorageClass(scName)
+
+ Ω(err).NotTo(HaveOccurred())
+ Ω(err2).NotTo(HaveOccurred())
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]