Copilot commented on code in PR #12386:
URL: https://github.com/apache/cloudstack/pull/12386#discussion_r2821682829


##########
ui/src/views/compute/CreateKubernetesCluster.vue:
##########
@@ -840,6 +919,22 @@ export default {
           size: values.size,
           clustertype: 'CloudManaged'
         }
+        if (this.isAdminOrDomainAdmin()) {
+          const selectedGroupIds = [...this.controlAffinityGroups, 
...this.workerAffinityGroups, ...this.etcdAffinityGroups]
+          const selectedGroups = selectedGroupIds.map(id => 
this.affinityGroups.find(affinityGroup => affinityGroup.id === 
id)).filter(Boolean)
+          if (selectedGroups.length > 0) {
+            const hasMixedAccounts = selectedGroups.some(ag => ag.account !== 
selectedGroups[0].account)
+            if (hasMixedAccounts) {
+              this.$notification.error({
+                message: 
this.$t('message.error.affinity.groups.different.accounts')

Review Comment:
   Missing localization string for error message. The code references 
'message.error.affinity.groups.different.accounts' but this key is not defined 
in en.json. Add the following entry to ui/public/locales/en.json:
   
   "message.error.affinity.groups.different.accounts": "Affinity groups from 
different accounts cannot be used together"
   ```suggestion
                   message: 'Affinity groups from different accounts cannot be 
used together'
   ```



##########
test/integration/component/test_kubernetes_cluster_affinity_groups.py:
##########
@@ -0,0 +1,931 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Tests for Kubernetes cluster affinity groups feature"""
+
+import unittest
+from marvin.cloudstackTestCase import cloudstackTestCase
+from marvin.cloudstackAPI import (listInfrastructure,
+                                  listKubernetesSupportedVersions,
+                                  addKubernetesSupportedVersion,
+                                  deleteKubernetesSupportedVersion,
+                                  listKubernetesClusters,
+                                  createKubernetesCluster,
+                                  stopKubernetesCluster,
+                                  startKubernetesCluster,
+                                  deleteKubernetesCluster,
+                                  scaleKubernetesCluster,
+                                  destroyVirtualMachine,
+                                  deleteNetwork)
+from marvin.cloudstackException import CloudstackAPIException
+from marvin.lib.base import (ServiceOffering,
+                             Account,
+                             AffinityGroup,
+                             Configurations)
+from marvin.lib.utils import (cleanup_resources,
+                              random_gen)
+from marvin.lib.common import (get_zone,
+                               get_domain)
+from marvin.sshClient import SshClient
+from nose.plugins.attrib import attr
+from marvin.lib.decoratorGenerators import skipTestIf
+
+import time
+
+_multiprocess_shared_ = True
+
+RAND_SUFFIX = random_gen()
+
+
+class TestKubernetesClusterAffinityGroups(cloudstackTestCase):
+    """
+    Tests for CKS Affinity Groups feature (since 4.23.0)
+
+    This feature allows specifying different affinity groups for each
+    Kubernetes node type (CONTROL, WORKER, ETCD).
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        testClient = super(TestKubernetesClusterAffinityGroups, 
cls).getClsTestClient()
+        if testClient is None:
+            raise unittest.SkipTest("Marvin test client not available - check 
marvin configuration")
+        cls.apiclient = testClient.getApiClient()
+        cls.services = testClient.getParsedTestDataConfig()
+        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
+        cls.hypervisor = testClient.getHypervisorInfo()
+        cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
+
+        cls.hypervisorNotSupported = False
+        if cls.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
+            cls.hypervisorNotSupported = True
+
+        cls.setup_failed = False
+        cls._cleanup = []
+        cls.kubernetes_version_ids = []
+        cls.initial_configuration_cks_enabled = None
+
+        cls.k8s_version = cls.services.get("cks_kubernetes_version_upgrade_to",
+                                           
cls.services.get("cks_kubernetes_version_upgrade_from"))
+
+        if cls.hypervisorNotSupported == False:
+            cls.endpoint_url = Configurations.list(cls.apiclient, 
name="endpoint.url")[0].value
+            if "localhost" in cls.endpoint_url:
+                endpoint_url = "http://%s:%d/client/api"; % 
(cls.mgtSvrDetails["mgtSvrIp"], cls.mgtSvrDetails["port"])
+                cls.debug("Setting endpoint.url to %s" % endpoint_url)
+                Configurations.update(cls.apiclient, "endpoint.url", 
endpoint_url)
+
+            cls.initial_configuration_cks_enabled = Configurations.list(
+                cls.apiclient, 
name="cloud.kubernetes.service.enabled")[0].value
+            if cls.initial_configuration_cks_enabled not in ["true", True]:
+                cls.debug("Enabling CloudStack Kubernetes Service plugin and 
restarting management server")
+                Configurations.update(cls.apiclient, 
"cloud.kubernetes.service.enabled", "true")
+                cls.restartServer()
+
+            cls.cks_service_offering = None
+
+            if cls.setup_failed == False:
+                try:
+                    cls.kubernetes_version = cls.addKubernetesSupportedVersion(
+                        
cls.services["cks_kubernetes_versions"][cls.k8s_version])
+                    
cls.kubernetes_version_ids.append(cls.kubernetes_version.id)
+                except Exception as e:
+                    cls.setup_failed = True
+                    cls.debug("Failed to get Kubernetes version ISO in ready 
state: %s" % e)
+
+            if cls.setup_failed == False:
+                cks_offering_data = cls.services["cks_service_offering"]
+                cks_offering_data["name"] = 'CKS-Instance-' + random_gen()
+                cls.cks_service_offering = ServiceOffering.create(
+                    cls.apiclient,
+                    cks_offering_data
+                )
+                cls._cleanup.append(cls.cks_service_offering)
+
+                cls.domain = get_domain(cls.apiclient)
+                cls.account = Account.create(
+                    cls.apiclient,
+                    cls.services["account"],
+                    domainid=cls.domain.id
+                )
+                cls._cleanup.append(cls.account)
+
+        cls.default_network = None
+
+        return
+
+    @classmethod
+    def tearDownClass(cls):
+        # Delete added Kubernetes supported version
+        for version_id in cls.kubernetes_version_ids:
+            try:
+                cls.deleteKubernetesSupportedVersion(version_id)
+            except Exception as e:
+                cls.debug("Error during cleanup for Kubernetes versions: %s" % 
e)
+
+        # Restore CKS enabled
+        if cls.initial_configuration_cks_enabled not in ["true", True]:
+            cls.debug("Restoring Kubernetes Service enabled value")
+            Configurations.update(cls.apiclient, 
"cloud.kubernetes.service.enabled", "false")
+            cls.restartServer()
+
+        super(TestKubernetesClusterAffinityGroups, cls).tearDownClass()
+
+    @classmethod
+    def restartServer(cls):
+        """Restart management server"""
+        cls.debug("Restarting management server")
+        sshClient = SshClient(
+            cls.mgtSvrDetails["mgtSvrIp"],
+            22,
+            cls.mgtSvrDetails["user"],
+            cls.mgtSvrDetails["passwd"]
+        )
+        command = "service cloudstack-management stop"
+        sshClient.execute(command)
+
+        command = "service cloudstack-management start"
+        sshClient.execute(command)
+
+        # Wait for management to come up in 5 mins
+        timeout = time.time() + 300
+        while time.time() < timeout:
+            if cls.isManagementUp() is True:
+                return
+            time.sleep(5)
+        cls.setup_failed = True
+        cls.debug("Management server did not come up, failing")
+        return
+
+    @classmethod
+    def isManagementUp(cls):
+        try:
+            
cls.apiclient.listInfrastructure(listInfrastructure.listInfrastructureCmd())
+            return True
+        except Exception:
+            return False
+
+    @classmethod
+    def waitForKubernetesSupportedVersionIsoReadyState(cls, version_id, 
retries=30, interval=60):
+        """Check if Kubernetes supported version ISO is in Ready state"""
+        while retries > 0:
+            time.sleep(interval)
+            list_versions_response = 
cls.listKubernetesSupportedVersion(version_id)
+            if not hasattr(list_versions_response, 'isostate') or not 
list_versions_response or not list_versions_response.isostate:
+                retries = retries - 1
+                continue
+            if 'Ready' == list_versions_response.isostate:
+                return
+            elif 'Failed' == list_versions_response.isostate:
+                raise Exception("Failed to download template: status - %s" % 
list_versions_response.isostate)
+            retries = retries - 1
+        raise Exception("Kubernetes supported version Ready state timed out")
+
+    @classmethod
+    def listKubernetesSupportedVersion(cls, version_id):
+        listKubernetesSupportedVersionsCmd = 
listKubernetesSupportedVersions.listKubernetesSupportedVersionsCmd()
+        listKubernetesSupportedVersionsCmd.id = version_id
+        versionResponse = 
cls.apiclient.listKubernetesSupportedVersions(listKubernetesSupportedVersionsCmd)
+        return versionResponse[0]
+
+    @classmethod
+    def addKubernetesSupportedVersion(cls, version_service):
+        addKubernetesSupportedVersionCmd = 
addKubernetesSupportedVersion.addKubernetesSupportedVersionCmd()
+        addKubernetesSupportedVersionCmd.semanticversion = 
version_service["semanticversion"]
+        addKubernetesSupportedVersionCmd.name = 'v' + 
version_service["semanticversion"] + '-' + random_gen()
+        addKubernetesSupportedVersionCmd.url = version_service["url"]
+        addKubernetesSupportedVersionCmd.mincpunumber = 
version_service["mincpunumber"]
+        addKubernetesSupportedVersionCmd.minmemory = 
version_service["minmemory"]
+        kubernetes_version = 
cls.apiclient.addKubernetesSupportedVersion(addKubernetesSupportedVersionCmd)
+        cls.debug("Waiting for Kubernetes version with ID %s to be ready" % 
kubernetes_version.id)
+        
cls.waitForKubernetesSupportedVersionIsoReadyState(kubernetes_version.id)
+        kubernetes_version = 
cls.listKubernetesSupportedVersion(kubernetes_version.id)
+        return kubernetes_version
+
+    @classmethod
+    def deleteKubernetesSupportedVersion(cls, version_id):
+        deleteKubernetesSupportedVersionCmd = 
deleteKubernetesSupportedVersion.deleteKubernetesSupportedVersionCmd()
+        deleteKubernetesSupportedVersionCmd.id = version_id
+        
cls.apiclient.deleteKubernetesSupportedVersion(deleteKubernetesSupportedVersionCmd)
+
+    @classmethod
+    def listKubernetesCluster(cls, cluster_id=None, cluster_name=None):
+        listKubernetesClustersCmd = 
listKubernetesClusters.listKubernetesClustersCmd()
+        listKubernetesClustersCmd.listall = True
+        if cluster_id is not None:
+            listKubernetesClustersCmd.id = cluster_id
+        if cluster_name is not None:
+            listKubernetesClustersCmd.name = cluster_name
+        clusterResponse = 
cls.apiclient.listKubernetesClusters(listKubernetesClustersCmd)
+        if (cluster_id is not None or cluster_name is not None) and 
clusterResponse is not None:
+            return clusterResponse[0]
+        return clusterResponse
+
+    @classmethod
+    def deleteKubernetesCluster(cls, cluster_id):
+        deleteKubernetesClusterCmd = 
deleteKubernetesCluster.deleteKubernetesClusterCmd()
+        deleteKubernetesClusterCmd.id = cluster_id
+        response = 
cls.apiclient.deleteKubernetesCluster(deleteKubernetesClusterCmd)
+        return response
+
+    @classmethod
+    def stopKubernetesCluster(cls, cluster_id):
+        stopKubernetesClusterCmd = 
stopKubernetesCluster.stopKubernetesClusterCmd()
+        stopKubernetesClusterCmd.id = cluster_id
+        response = 
cls.apiclient.stopKubernetesCluster(stopKubernetesClusterCmd)
+        return response
+
+    def setUp(self):
+        self.services = self.testClient.getParsedTestDataConfig()
+        self.apiclient = self.testClient.getApiClient()
+        self.dbclient = self.testClient.getDbConnection()
+        self.cleanup = []
+        self.aff_grp = []
+        return
+
+    def tearDown(self):
+        super(TestKubernetesClusterAffinityGroups, self).tearDown()
+
+    def deleteKubernetesClusterAndVerify(self, cluster_id, verify=True, 
forced=False):
+        """Delete Kubernetes cluster and check if it is really deleted"""
+        delete_response = {}
+        forceDeleted = False
+        try:
+            delete_response = self.deleteKubernetesCluster(cluster_id)
+        except Exception as e:
+            if forced:
+                cluster = self.listKubernetesCluster(cluster_id)
+                if cluster is not None:
+                    if cluster.state in ['Starting', 'Running', 'Upgrading', 
'Scaling']:
+                        self.stopKubernetesCluster(cluster_id)
+                        self.deleteKubernetesCluster(cluster_id)
+                    else:
+                        forceDeleted = True
+                        for cluster_vm in cluster.virtualmachines:
+                            cmd = 
destroyVirtualMachine.destroyVirtualMachineCmd()
+                            cmd.id = cluster_vm.id
+                            cmd.expunge = True
+                            self.apiclient.destroyVirtualMachine(cmd)
+                        cmd = deleteNetwork.deleteNetworkCmd()
+                        cmd.id = cluster.networkid
+                        cmd.forced = True
+                        self.apiclient.deleteNetwork(cmd)
+                        self.dbclient.execute(
+                            "update kubernetes_cluster set state='Destroyed', 
removed=now() where uuid = '%s';" % cluster.id)
+            else:
+                raise Exception("Error: Exception during delete cluster : %s" 
% e)
+
+        if verify and not forceDeleted:
+            self.assertEqual(
+                delete_response.success,
+                True,
+                "Check KubernetesCluster delete response {}, 
{}".format(delete_response.success, True)
+            )
+
+            db_cluster_removed = \
+            self.dbclient.execute("select removed from kubernetes_cluster 
where uuid = '%s';" % cluster_id)[0][0]
+
+            self.assertNotEqual(
+                db_cluster_removed,
+                None,
+                "KubernetesCluster not removed in DB, 
{}".format(db_cluster_removed)
+            )
+
+    def create_aff_grp(self, aff_grp_name=None, aff_grp_type="host 
anti-affinity"):
+        """Create an affinity group"""
+        if aff_grp_name is None:
+            aff_grp_name = "aff_grp_" + random_gen(size=6)
+
+        aff_grp_data = {
+            "name": aff_grp_name,
+            "type": aff_grp_type
+        }
+        aff_grp = AffinityGroup.create(
+            self.apiclient,
+            aff_grp_data,
+            self.account.name,
+            self.domain.id
+        )
+        self.aff_grp.append(aff_grp)
+        self.cleanup.append(aff_grp)
+        return aff_grp
+
+    def createKubernetesCluster(self, name, version_id, size=1, 
control_nodes=1, etcd_nodes=0,
+                                control_aff_grp=None, worker_aff_grp=None, 
etcd_aff_grp=None):
+        """Create a Kubernetes cluster with optional affinity groups for each 
node type"""
+        createKubernetesClusterCmd = 
createKubernetesCluster.createKubernetesClusterCmd()
+        createKubernetesClusterCmd.name = name
+        createKubernetesClusterCmd.description = name + "-description"
+        createKubernetesClusterCmd.kubernetesversionid = version_id
+        createKubernetesClusterCmd.size = size
+        createKubernetesClusterCmd.controlnodes = control_nodes
+        createKubernetesClusterCmd.serviceofferingid = 
self.cks_service_offering.id
+        createKubernetesClusterCmd.zoneid = self.zone.id
+        createKubernetesClusterCmd.noderootdisksize = 10
+        createKubernetesClusterCmd.account = self.account.name
+        createKubernetesClusterCmd.domainid = self.domain.id
+
+        if etcd_nodes > 0:
+            createKubernetesClusterCmd.etcdnodes = etcd_nodes
+
+        # Set affinity groups for node types using the nodeaffinitygroups 
parameter
+        # Format: list of {node: "<NODE_TYPE>", affinitygroup: "<UUID>"}
+        if control_aff_grp is not None:
+            if not hasattr(createKubernetesClusterCmd, 'nodeaffinitygroups'):
+                createKubernetesClusterCmd.nodeaffinitygroups = []
+            createKubernetesClusterCmd.nodeaffinitygroups.append({
+                "node": "CONTROL",
+                "affinitygroup": control_aff_grp.id
+            })
+        if worker_aff_grp is not None:
+            if not hasattr(createKubernetesClusterCmd, 'nodeaffinitygroups'):
+                createKubernetesClusterCmd.nodeaffinitygroups = []
+            createKubernetesClusterCmd.nodeaffinitygroups.append({
+                "node": "WORKER",
+                "affinitygroup": worker_aff_grp.id
+            })
+        if etcd_aff_grp is not None:
+            if not hasattr(createKubernetesClusterCmd, 'nodeaffinitygroups'):
+                createKubernetesClusterCmd.nodeaffinitygroups = []
+            createKubernetesClusterCmd.nodeaffinitygroups.append({
+                "node": "ETCD",
+                "affinitygroup": etcd_aff_grp.id
+            })
+
+        if self.default_network:
+            createKubernetesClusterCmd.networkid = self.default_network.id
+
+        clusterResponse = 
self.apiclient.createKubernetesCluster(createKubernetesClusterCmd)
+        return clusterResponse
+
+    def startKubernetesCluster(self, cluster_id):
+        startKubernetesClusterCmd = 
startKubernetesCluster.startKubernetesClusterCmd()
+        startKubernetesClusterCmd.id = cluster_id
+        response = 
self.apiclient.startKubernetesCluster(startKubernetesClusterCmd)
+        return response
+
+    def scaleKubernetesCluster(self, cluster_id, size):
+        scaleKubernetesClusterCmd = 
scaleKubernetesCluster.scaleKubernetesClusterCmd()
+        scaleKubernetesClusterCmd.id = cluster_id
+        scaleKubernetesClusterCmd.size = size
+        response = 
self.apiclient.scaleKubernetesCluster(scaleKubernetesClusterCmd)
+        return response
+
+    def verifyKubernetesClusterState(self, cluster_response, state):
+        """Check if Kubernetes cluster state matches expected state"""
+        self.assertEqual(
+            cluster_response.state,
+            state,
+            "Check KubernetesCluster state {}, expected 
{}".format(cluster_response.state, state)
+        )
+
+    def verifyKubernetesClusterAffinityGroups(self, cluster, 
control_aff_grp=None,
+                                               worker_aff_grp=None, 
etcd_aff_grp=None):
+        """Verify affinity groups are correctly assigned to the cluster"""
+        if control_aff_grp is not None:
+            self.assertEqual(
+                cluster.controlnodeaffinitygroupid,
+                control_aff_grp.id,
+                "Control node affinity group ID mismatch. Expected: {}, Got: 
{}".format(
+                    control_aff_grp.id, cluster.controlnodeaffinitygroupid)
+            )
+            self.assertEqual(
+                cluster.controlnodeaffinitygroupname,
+                control_aff_grp.name,
+                "Control node affinity group name mismatch. Expected: {}, Got: 
{}".format(
+                    control_aff_grp.name, cluster.controlnodeaffinitygroupname)
+            )
+        else:
+            self.assertTrue(
+                not hasattr(cluster, 'controlnodeaffinitygroupid') or 
cluster.controlnodeaffinitygroupid is None,
+                "Control node affinity group should be None"
+            )
+
+        if worker_aff_grp is not None:
+            self.assertEqual(
+                cluster.workernodeaffinitygroupid,
+                worker_aff_grp.id,
+                "Worker node affinity group ID mismatch. Expected: {}, Got: 
{}".format(
+                    worker_aff_grp.id, cluster.workernodeaffinitygroupid)
+            )
+            self.assertEqual(
+                cluster.workernodeaffinitygroupname,
+                worker_aff_grp.name,
+                "Worker node affinity group name mismatch. Expected: {}, Got: 
{}".format(
+                    worker_aff_grp.name, cluster.workernodeaffinitygroupname)
+            )
+        else:
+            self.assertTrue(
+                not hasattr(cluster, 'workernodeaffinitygroupid') or 
cluster.workernodeaffinitygroupid is None,
+                "Worker node affinity group should be None"
+            )
+
+        if etcd_aff_grp is not None:
+            self.assertEqual(
+                cluster.etcdnodeaffinitygroupid,
+                etcd_aff_grp.id,
+                "ETCD node affinity group ID mismatch. Expected: {}, Got: 
{}".format(
+                    etcd_aff_grp.id, cluster.etcdnodeaffinitygroupid)
+            )
+            self.assertEqual(
+                cluster.etcdnodeaffinitygroupname,
+                etcd_aff_grp.name,
+                "ETCD node affinity group name mismatch. Expected: {}, Got: 
{}".format(
+                    etcd_aff_grp.name, cluster.etcdnodeaffinitygroupname)
+            )
+        else:
+            self.assertTrue(
+                not hasattr(cluster, 'etcdnodeaffinitygroupid') or 
cluster.etcdnodeaffinitygroupid is None,
+                "ETCD node affinity group should be None"

Review Comment:
   Test field name mismatch: The test is checking for singular fields like 
'controlnodeaffinitygroupid' and 'controlnodeaffinitygroupname', but the API 
response only defines plural fields 'controlaffinitygroupids' and 
'controlaffinitygroupnames' (which return comma-separated values). The test 
assertions at lines 400, 406, 420, 426, 438, and 444 will fail because these 
singular attributes don't exist in the KubernetesClusterResponse class. 
   
   Update the test to use the correct plural field names and handle CSV 
parsing. For example:
   - cluster.controlaffinitygroupids instead of 
cluster.controlnodeaffinitygroupid
   - Parse the CSV string and verify it contains the expected affinity group ID
   ```suggestion
               control_ids_csv = getattr(cluster, 'controlaffinitygroupids', 
None)
               control_names_csv = getattr(cluster, 
'controlaffinitygroupnames', None)
               self.assertIsNotNone(
                   control_ids_csv,
                   "Control affinity group IDs should be present when a control 
affinity group is specified"
               )
               self.assertIsNotNone(
                   control_names_csv,
                   "Control affinity group names should be present when a 
control affinity group is specified"
               )
               control_ids = [v.strip() for v in 
str(control_ids_csv).split(',') if v.strip()]
               control_names = [v.strip() for v in 
str(control_names_csv).split(',') if v.strip()]
               self.assertIn(
                   str(control_aff_grp.id),
                   control_ids,
                   "Control node affinity group ID mismatch. Expected to find 
ID {} in {}".format(
                       control_aff_grp.id, control_ids_csv)
               )
               self.assertIn(
                   control_aff_grp.name,
                   control_names,
                   "Control node affinity group name mismatch. Expected to find 
name '{}' in '{}'".format(
                       control_aff_grp.name, control_names_csv)
               )
           else:
               self.assertTrue(
                   not hasattr(cluster, 'controlaffinitygroupids') or not 
getattr(cluster, 'controlaffinitygroupids'),
                   "Control node affinity group should be None or empty"
               )
   
           if worker_aff_grp is not None:
               worker_ids_csv = getattr(cluster, 'workeraffinitygroupids', None)
               worker_names_csv = getattr(cluster, 'workeraffinitygroupnames', 
None)
               self.assertIsNotNone(
                   worker_ids_csv,
                   "Worker affinity group IDs should be present when a worker 
affinity group is specified"
               )
               self.assertIsNotNone(
                   worker_names_csv,
                   "Worker affinity group names should be present when a worker 
affinity group is specified"
               )
               worker_ids = [v.strip() for v in str(worker_ids_csv).split(',') 
if v.strip()]
               worker_names = [v.strip() for v in 
str(worker_names_csv).split(',') if v.strip()]
               self.assertIn(
                   str(worker_aff_grp.id),
                   worker_ids,
                   "Worker node affinity group ID mismatch. Expected to find ID 
{} in {}".format(
                       worker_aff_grp.id, worker_ids_csv)
               )
               self.assertIn(
                   worker_aff_grp.name,
                   worker_names,
                   "Worker node affinity group name mismatch. Expected to find 
name '{}' in '{}'".format(
                       worker_aff_grp.name, worker_names_csv)
               )
           else:
               self.assertTrue(
                   not hasattr(cluster, 'workeraffinitygroupids') or not 
getattr(cluster, 'workeraffinitygroupids'),
                   "Worker node affinity group should be None or empty"
               )
   
           if etcd_aff_grp is not None:
               etcd_ids_csv = getattr(cluster, 'etcdaffinitygroupids', None)
               etcd_names_csv = getattr(cluster, 'etcdaffinitygroupnames', None)
               self.assertIsNotNone(
                   etcd_ids_csv,
                   "ETCD affinity group IDs should be present when an ETCD 
affinity group is specified"
               )
               self.assertIsNotNone(
                   etcd_names_csv,
                   "ETCD affinity group names should be present when an ETCD 
affinity group is specified"
               )
               etcd_ids = [v.strip() for v in str(etcd_ids_csv).split(',') if 
v.strip()]
               etcd_names = [v.strip() for v in str(etcd_names_csv).split(',') 
if v.strip()]
               self.assertIn(
                   str(etcd_aff_grp.id),
                   etcd_ids,
                   "ETCD node affinity group ID mismatch. Expected to find ID 
{} in {}".format(
                       etcd_aff_grp.id, etcd_ids_csv)
               )
               self.assertIn(
                   etcd_aff_grp.name,
                   etcd_names,
                   "ETCD node affinity group name mismatch. Expected to find 
name '{}' in '{}'".format(
                       etcd_aff_grp.name, etcd_names_csv)
               )
           else:
               self.assertTrue(
                   not hasattr(cluster, 'etcdaffinitygroupids') or not 
getattr(cluster, 'etcdaffinitygroupids'),
                   "ETCD node affinity group should be None or empty"
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to