pbacsko commented on code in PR #915:
URL: https://github.com/apache/yunikorn-k8shim/pull/915#discussion_r1806368001
##########
test/e2e/framework/helpers/k8s/k8s_utils.go:
##########
@@ -1779,3 +1822,96 @@ func (k *KubeCtl) DeleteStorageClass(scName string)
error {
}
return nil
}
+
+func (k *KubeCtl) GetSecrets(namespace string) (*v1.SecretList, error) {
+ return k.clientSet.CoreV1().Secrets(namespace).List(context.TODO(),
metav1.ListOptions{})
+}
+
+func (k *KubeCtl) GetSecretValue(namespace, secretName, key string) (string,
error) {
+ var secret *v1.Secret
+ var err error
+
+ // Retry loop in case secret is not yet populated
+ for i := 0; i < 5; i++ {
+ secret, err =
k.clientSet.CoreV1().Secrets(namespace).Get(context.TODO(), secretName,
metav1.GetOptions{})
+ if err != nil {
+ return "", err
+ }
+
+ // Check if the secret contains data
+ if len(secret.Data) > 0 {
+ value, ok := secret.Data[key]
+ if !ok {
+ return "", fmt.Errorf("key %s not found in
secret %s", key, secretName)
+ }
+ return string(value), nil
+ }
+
+ // Wait before retrying
+ time.Sleep(2 * time.Second)
Review Comment:
You don't need to code the retry. We can take advantage of existing k8s
functions.
To be consistent with existing code, let's use the following:
```
func (k *KubeCtl) GetSecretValue(namespace, secretName, key string) (string,
error) {
secret, err := k.GetSecret(namespace, secretName)
if err != nil {
return "", err
}
// Check if the key exists in the secret
value, ok := secret.Data[key]
if !ok {
return "", fmt.Errorf("key %s not found in secret %s", key,
secretName)
}
return string(value), nil
}
func (k *KubeCtl) GetSecret(namespace, secretName string) (*v1.Secret,
error) {
secret, err :=
k.clientSet.CoreV1().Secrets(namespace).Get(context.TODO(), secretName,
metav1.GetOptions{})
if err != nil {
return nil, err
}
return secret, nil
}
func (k *KubeCtl) WaitForSecret(namespace, secretName string, timeout
time.Duration) error {
var cond wait.ConditionFunc
cond = func() (done bool, err error) {
secret, err := k.GetSecret(namespace, secretName)
if err != nil {
return false, err
}
if secret != nil {
return false, nil
}
return true, nil
}
return wait.PollUntilContextTimeout(context.TODO(), time.Second,
timeout, false, cond.WithContext())
}
```
First, you can `kClient.WaitForSecret()` to indicate that you're waiting for
a secret to appear over the API. Then call `kClient.GetSecretValue()`. By
having these 3 methods, all of them are usable indepdently of one another.
##########
test/e2e/framework/helpers/k8s/k8s_utils.go:
##########
@@ -1779,3 +1755,83 @@ func (k *KubeCtl) DeleteStorageClass(scName string)
error {
}
return nil
}
+
+func (k *KubeCtl) GetSecrets(namespace string) (*v1.SecretList, error) {
+ return k.clientSet.CoreV1().Secrets(namespace).List(context.TODO(),
metav1.ListOptions{})
+}
+
+// GetSecretValue retrieves the value for a specific key from a Kubernetes
secret.
+func (k *KubeCtl) GetSecretValue(namespace, secretName, key string) (string,
error) {
+ secret, err := k.getSecretWithRetry(namespace, secretName)
+ if err != nil {
+ return "", err
+ }
+ // Check if the key exists in the secret
+ value, ok := secret.Data[key]
+ if !ok {
+ return "", fmt.Errorf("key %s not found in secret %s", key,
secretName)
+ }
+ return string(value), nil
+}
+
+// getSecretWithRetry retries getting the secret until it's populated with
data, up to a limit.
+func (k *KubeCtl) getSecretWithRetry(namespace, secretName string)
(*v1.Secret, error) {
+ var secret *v1.Secret
+ var err error
+
+ for i := 0; i < 5; i++ {
+ secret, err =
k.clientSet.CoreV1().Secrets(namespace).Get(context.TODO(), secretName,
metav1.GetOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ // If secret contains data, return it
+ if len(secret.Data) > 0 {
+ return secret, nil
+ }
+
+ // Wait before retrying
+ time.Sleep(2 * time.Second)
+ }
+
+ return nil, fmt.Errorf("secret %s has no data after retries",
secretName)
+}
+
+func WriteConfigToFile(config *rest.Config, kubeconfigPath string) error {
Review Comment:
I'm OK with this for the time being, let's not waste time. We can figure out
later if needed.
##########
test/e2e/user_group_limit/user_group_limit_test.go:
##########
@@ -909,40 +926,227 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
checkUsageWildcardGroups(groupTestType, group2, sandboxQueue1,
[]*v1.Pod{group2Sandbox1Pod1, group2Sandbox1Pod2, group2Sandbox1Pod3})
})
+ ginkgo.It("Verify User info for the non kube admin user", func() {
+ var clientset *kubernetes.Clientset
+ var namespace = "default"
+ var serviceAccountName = "test-user-sa"
+ var podName = "test-pod"
+ var secretName = "test-user-sa-token" // #nosec G101
+
+ ginkgo.By("Update config")
+ // The wait wrapper still can't fully guarantee that the config
in AdmissionController has been updated.
+ admissionCustomConfig = map[string]string{
+ "log.core.scheduler.ugm.level": "debug",
+ amconf.AMAccessControlBypassAuth: constants.False,
+ }
+
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "",
admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ // remove placement rules so we can control
queue
+ sc.Partitions[0].PlacementRules = nil
+ err := common.AddQueue(sc,
constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
+ Name: "default",
+ Limits: []configs.Limit{
+ {
+ Limit: "user
entry",
+ Users:
[]string{user1},
+ MaxApplications: 1,
+ MaxResources:
map[string]string{
+
siCommon.Memory: fmt.Sprintf("%dM", mediumMem),
+ },
+ },
+ {
+ Limit: "user2
entry",
+ Users:
[]string{user2},
+ MaxApplications: 2,
+ MaxResources:
map[string]string{
+
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
+ },
+ },
+ }})
+ if err != nil {
+ return err
+ }
+ return common.AddQueue(sc,
constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name:
"sandbox2"})
+ })
+ })
+ // Create Service Account
+ ginkgo.By("Creating Service Account...")
+ sa, err := kClient.CreateServiceAccount(serviceAccountName,
namespace)
+ gomega.Ω(err).NotTo(HaveOccurred())
+ // Create a ClusterRole with necessary permissions
+ ginkgo.By("Creating ClusterRole...")
+ clusterRole := &rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "pod-creator-role",
+ },
+ Rules: []rbacv1.PolicyRule{
+ {
+ APIGroups: []string{""},
+ Resources: []string{"pods",
"serviceaccounts", "test-user-sa"},
+ Verbs: []string{"create", "get",
"list", "watch", "delete"},
+ },
+ },
+ }
+ _, err = kClient.CreateClusterRole(clusterRole)
+ gomega.Ω(err).NotTo(HaveOccurred())
+ // Create a ClusterRoleBinding to bind the ClusterRole to the
service account
+ ginkgo.By("Creating ClusterRoleBinding...")
+ clusterRoleBinding := &rbacv1.ClusterRoleBinding{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "pod-creator-role-binding",
+ },
+ RoleRef: rbacv1.RoleRef{
+ APIGroup: "rbac.authorization.k8s.io",
+ Kind: "ClusterRole",
+ Name: "pod-creator-role",
+ },
+ Subjects: []rbacv1.Subject{
+ {
+ Kind: "ServiceAccount",
+ Name: sa.Name,
+ Namespace: namespace,
+ },
+ },
+ }
+ _, err =
kClient.CreateClusterRoleBinding(clusterRoleBinding.ObjectMeta.Name,
clusterRoleBinding.RoleRef.Name, clusterRoleBinding.Subjects[0].Namespace,
clusterRoleBinding.Subjects[0].Name)
+ gomega.Ω(err).NotTo(HaveOccurred())
+ // Create a Secret for the Service Account
+ ginkgo.By("Creating Secret for the Service Account...")
+ // create a object of v1.Secret
+ secret := &v1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: secretName,
+ Annotations: map[string]string{
+ "kubernetes.io/service-account.name":
serviceAccountName,
+ },
+ },
+ Type: v1.SecretTypeServiceAccountToken,
+ }
+ _, err = kClient.CreateSecret(secret, namespace)
+ gomega.Ω(err).NotTo(HaveOccurred())
+ // Get the token value from the Secret
+ ginkgo.By("Getting the token value from the Secret...")
+ userTokenValue, err := kClient.GetSecretValue(namespace,
secretName, "token")
+ gomega.Ω(err).NotTo(HaveOccurred())
+ // use deep copy not to hardcode the kubeconfig
+ config, err := kClient.GetKubeConfig()
+ gomega.Ω(err).NotTo(HaveOccurred())
+ config.BearerToken = userTokenValue
+ newConf := rest.CopyConfig(config) // copy existing config
+ // Use token-based authentication instead of client certificates
+ newConf.CAFile = ""
+ newConf.CertFile = ""
+ newConf.KeyFile = ""
+ newConf.BearerToken = userTokenValue
+ kubeconfigPath := filepath.Join(os.TempDir(),
"test-user-config")
+ err = k8s.WriteConfigToFile(newConf, kubeconfigPath)
+ gomega.Ω(err).NotTo(HaveOccurred())
+ config, err = clientcmd.BuildConfigFromFlags("", kubeconfigPath)
+ gomega.Ω(err).NotTo(HaveOccurred())
+ clientset, err = kubernetes.NewForConfig(config)
+ gomega.Ω(err).NotTo(HaveOccurred())
+ ginkgo.By("Creating Pod...")
+ pod := &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: podName,
+ Annotations: map[string]string{
+ "created-by":
fmt.Sprintf("system:serviceaccount:%s:%s", namespace, serviceAccountName),
+ "user-token": userTokenValue, // Log
the token in the annotation
+ },
+ Labels: map[string]string{"applicationId":
"test-app"},
+ },
+ Spec: v1.PodSpec{
+ ServiceAccountName: serviceAccountName,
+ Containers: []v1.Container{
+ {
+ Name: "nginx",
+ Image: "nginx",
+ Ports:
[]v1.ContainerPort{{ContainerPort: 80}},
+ },
+ },
+ },
+ }
+ _, err =
clientset.CoreV1().Pods(namespace).Create(context.TODO(), pod,
metav1.CreateOptions{})
+ gomega.Ω(err).NotTo(HaveOccurred())
+ createdPod, err :=
clientset.CoreV1().Pods(namespace).Get(context.TODO(), podName,
metav1.GetOptions{})
+ gomega.Ω(err).NotTo(HaveOccurred())
+ ginkgo.By("Verifying User Info...")
+ userInfo, err := GetUserInfoFromPodAnnotation(createdPod)
+ gomega.Ω(err).NotTo(HaveOccurred())
+ // user info should contain the substring
"system:serviceaccount:default:test-user-sa"
+ gomega.Ω(strings.Contains(fmt.Sprintf("%v", userInfo),
"system:serviceaccount:default:test-user-sa")).To(gomega.BeTrue())
+ // Cleanup
+ ginkgo.By("Cleaning up resources...")
+ err = clientset.CoreV1().Pods(namespace).Delete(context.TODO(),
podName, metav1.DeleteOptions{})
+ gomega.Ω(err).NotTo(HaveOccurred())
+ err =
clientset.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(),
serviceAccountName, metav1.DeleteOptions{})
+ gomega.Ω(err).NotTo(HaveOccurred())
+ err = kClient.DeleteClusterRole("pod-creator-role")
+ gomega.Ω(err).NotTo(HaveOccurred())
+ err =
kClient.DeleteClusterRoleBindings("pod-creator-role-binding")
+ gomega.Ω(err).NotTo(HaveOccurred())
Review Comment:
If the test fail, we never reach this part. This is supposed to be in a
deferred section:
That is:
```
defer func() {
// Cleanup
ginkgo.By("Cleaning up resources...")
err =
clientset.CoreV1().Pods(namespace).Delete(context.TODO(), podName,
metav1.DeleteOptions{})
gomega.Ω(err).NotTo(HaveOccurred())
err =
clientset.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(),
serviceAccountName, metav1.DeleteOptions{})
gomega.Ω(err).NotTo(HaveOccurred())
err =
kClient.DeleteClusterRole("pod-creator-role")
gomega.Ω(err).NotTo(HaveOccurred())
err =
kClient.DeleteClusterRoleBindings("pod-creator-role-binding")
gomega.Ω(err).NotTo(HaveOccurred())
}
// Create Service Account
ginkgo.By("Creating Service Account...")
sa, err := kClient.CreateServiceAccount(serviceAccountName,
namespace)
gomega.Ω(err).NotTo(HaveOccurred())
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]