pbacsko commented on code in PR #915:
URL: https://github.com/apache/yunikorn-k8shim/pull/915#discussion_r1761992219
##########
test/e2e/user_group_limit/user_group_limit_test.go:
##########
@@ -911,6 +925,206 @@ var _ = ginkgo.Describe("UserGroupLimit", func() {
checkUsageWildcardGroups(groupTestType, group2, sandboxQueue1,
[]*v1.Pod{group2Sandbox1Pod1, group2Sandbox1Pod2, group2Sandbox1Pod3})
})
+ ginkgo.It("Verify User info for the non kube admin user", func() {
+ var clientset *kubernetes.Clientset
+ var namespace = "default"
+ var serviceAccountName = "test-user-sa"
+ var podName = "test-pod"
+ var secretName = "test-user-sa-token" // #nosec G101
+ clientset = kClient.GetClient()
+ ginkgo.By("Update config")
+ // The wait wrapper still can't fully guarantee that the config
in AdmissionController has been updated.
+ admissionCustomConfig = map[string]string{
+ "log.core.scheduler.ugm.level": "debug",
+ amconf.AMAccessControlBypassAuth: constants.False,
+ }
+
yunikorn.WaitForAdmissionControllerRefreshConfAfterAction(func() {
+
yunikorn.UpdateCustomConfigMapWrapperWithMap(oldConfigMap, "",
admissionCustomConfig, func(sc *configs.SchedulerConfig) error {
+ // remove placement rules so we can control
queue
+ sc.Partitions[0].PlacementRules = nil
+
+ err := common.AddQueue(sc,
constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{
+ Name: "default",
+ Limits: []configs.Limit{
+ {
+ Limit: "user
entry",
+ Users:
[]string{user1},
+ MaxApplications: 1,
+ MaxResources:
map[string]string{
+
siCommon.Memory: fmt.Sprintf("%dM", mediumMem),
+ },
+ },
+ {
+ Limit: "user2
entry",
+ Users:
[]string{user2},
+ MaxApplications: 2,
+ MaxResources:
map[string]string{
+
siCommon.Memory: fmt.Sprintf("%dM", largeMem),
+ },
+ },
+ }})
+ if err != nil {
+ return err
+ }
+ return common.AddQueue(sc,
constants.DefaultPartition, constants.RootQueue, configs.QueueConfig{Name:
"sandbox2"})
+ })
+ })
+ // Backup the existing kubeconfig
+ oldKubeconfigPath := filepath.Join(os.Getenv("HOME"), ".kube",
"config")
+ if _, err := os.Stat(oldKubeconfigPath); !os.IsNotExist(err) {
+ oldKubeconfigContent, err =
os.ReadFile(oldKubeconfigPath)
+ gomega.Ω(err).NotTo(HaveOccurred())
+ }
+ // Create Service Account
+ ginkgo.By("Creating Service Account...")
+ _, err :=
clientset.CoreV1().ServiceAccounts(namespace).Create(context.TODO(),
&v1.ServiceAccount{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: serviceAccountName,
+ },
+ }, metav1.CreateOptions{})
+ gomega.Ω(err).NotTo(HaveOccurred())
+ // Create a ClusterRole with necessary permissions
+ ginkgo.By("Creating ClusterRole...")
+ clusterRole := &rbacv1.ClusterRole{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "pod-creator-role",
+ },
+ Rules: []rbacv1.PolicyRule{
+ {
+ APIGroups: []string{""},
+ Resources: []string{"pods",
"serviceaccounts"},
+ Verbs: []string{"create", "get",
"list", "watch", "delete"},
+ },
+ },
+ }
+ _, err = kClient.CreateClusterRole(clusterRole)
+ gomega.Ω(err).NotTo(HaveOccurred())
+ // Create a ClusterRoleBinding to bind the ClusterRole to the
service account
+ ginkgo.By("Creating ClusterRoleBinding...")
+ clusterRoleBinding := &rbacv1.ClusterRoleBinding{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "pod-creator-role-binding",
+ },
+ RoleRef: rbacv1.RoleRef{
+ APIGroup: "rbac.authorization.k8s.io",
+ Kind: "ClusterRole",
+ Name: "pod-creator-role",
+ },
+ Subjects: []rbacv1.Subject{
+ {
+ Kind: "ServiceAccount",
+ Name: "test-user-sa",
+ Namespace: "default",
+ },
+ },
+ }
+ _, err =
kClient.CreateClusterRoleBinding(clusterRoleBinding.ObjectMeta.Name,
clusterRoleBinding.RoleRef.Name, clusterRoleBinding.Subjects[0].Namespace,
clusterRoleBinding.Subjects[0].Name)
+ gomega.Ω(err).NotTo(HaveOccurred())
+ // Create a Secret for the Service Account
+ ginkgo.By("Creating Secret for the Service Account...")
+ _, err =
clientset.CoreV1().Secrets(namespace).Create(context.TODO(), &v1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: secretName,
+ Annotations: map[string]string{
+ "kubernetes.io/service-account.name":
serviceAccountName,
+ },
+ },
+ Type: v1.SecretTypeServiceAccountToken,
+ }, metav1.CreateOptions{})
Review Comment:
Please use `KubeCtl.CreateSecret()` if possible. If the annotation is
necessary, create a new method with the name `CreateSecretWithAnnotation(secret
*v1.Secret, namespace string, annotations map[string]string)` to avoid calling
this directly.
##########
test/e2e/user_group_limit/user_group_limit_test.go:
##########
@@ -1022,3 +1249,45 @@ func checkUsageWildcardGroups(testType TestType, name
string, queuePath string,
Ω(resourceUsageDAO.ResourceUsage.Resources["pods"]).To(gomega.Equal(resources.Quantity(len(expectedRunningPods))))
Ω(resourceUsageDAO.RunningApplications).To(gomega.ConsistOf(appIDs...))
}
+
+func createKubeconfig(path, currentContext, clusterCA, clusterServer,
userTokenValue string) error {
+ kubeconfigTemplate := `
+apiVersion: v1
+kind: Config
+current-context: ${CURRENT_CONTEXT}
+contexts:
+- name: ${CURRENT_CONTEXT}
+ context:
+ cluster: ${CURRENT_CONTEXT}
+ user: test-user
+clusters:
+- name: ${CURRENT_CONTEXT}
+ cluster:
+ certificate-authority-data: ${CLUSTER_CA}
+ server: ${CLUSTER_SERVER}
+users:
+- name: test-user
+ user:
+ token: ${USER_TOKEN_VALUE}
+`
+ // Replace placeholders in the template
+ kubeconfigContent := strings.ReplaceAll(kubeconfigTemplate,
"${CURRENT_CONTEXT}", currentContext)
+ kubeconfigContent = strings.ReplaceAll(kubeconfigContent,
"${CLUSTER_CA}", clusterCA)
+ kubeconfigContent = strings.ReplaceAll(kubeconfigContent,
"${CLUSTER_SERVER}", clusterServer)
+ kubeconfigContent = strings.ReplaceAll(kubeconfigContent,
"${USER_TOKEN_VALUE}", userTokenValue)
+
+ // Write the kubeconfig YAML to the file
+ err := os.WriteFile(path, []byte(kubeconfigContent), 0600)
+ gomega.Ω(err).NotTo(gomega.HaveOccurred())
+ return nil
+}
Review Comment:
All of this should NOT be necessary. Way too complicated to mess around with
separate `kubectl` calls.
You can do this:
```
config, _ := kClient.GetKubeConfig() // handle error in real code
newConf := config.DeepCopy() // copy existing config
newConf .TLSClientConfig.CertFile = "" // remove cert file
newConf .TLSClientConfig.KeyFile = "" // remove key file
newConf .BearerToken = "<base64Token>" // set token that is retrieved in
the test
_ = kClient.SetClientFromConfig(newConf)
```
After this point, `kClient` will use the token for authentication there's no
need to delete/restore anything.
New method is necessary in `KubeCtl`:
```
func (k *KubeCtl) SetClientFromConfig(conf *rest.Config) error {
k.kubeConfig = conf.DeepCopy()
k.clientSet, err = kubernetes.NewForConfig(k.kubeConfig) // creates
new clientset
return err
}
```
Also, try to retrieve the secret token using `KubeCtl`. We might need to
create a new method for it, but again, it shouldn't involve `kubectl`:
```
func (k *KubeCtl) GetSecret(namespace string) (*v1.Secret, error) {
return k.clientSet.CoreV1().Secrets(namespace).Get(context.TODO(),
namespace, metav1.GetOptions{})
}
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]