This is an automated email from the ASF dual-hosted git repository.

rohit pushed a commit to branch 4.15
in repository https://gitbox.apache.org/repos/asf/cloudstack.git


The following commit(s) were added to refs/heads/4.15 by this push:
     new d4db101  marvin: fix test_scale_vm for xenserver/Xcp-ng (#4929)
d4db101 is described below

commit d4db1015b8b8af5f84ee971888add961ac1cadc1
Author: Pearl Dsilva <[email protected]>
AuthorDate: Tue Apr 27 10:21:48 2021 +0530

    marvin: fix test_scale_vm for xenserver/Xcp-ng (#4929)
    
    Co-authored-by: Pearl Dsilva <[email protected]>
---
 .../xenserver/resource/CitrixResourceBase.java     |   2 +-
 .../main/java/com/cloud/vm/UserVmManagerImpl.java  |   8 +-
 test/integration/smoke/test_scale_vm.py            | 101 ++++++++++++++++-----
 3 files changed, 81 insertions(+), 30 deletions(-)

diff --git 
a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
 
b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
index a26bcc4..315ce17 100644
--- 
a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
+++ 
b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java
@@ -3110,7 +3110,7 @@ public abstract class CitrixResourceBase implements 
ServerResource, HypervisorRe
             s_logger.warn("No recommended value found for dynamic max, setting 
static max and dynamic max equal");
             return dynamicMaxRam;
         }
-        final long staticMax = Math.min(recommendedValue, 4l * dynamicMinRam); 
// XS
+        final long staticMax = Math.min(recommendedValue, 4L * dynamicMinRam); 
// XS
         // constraint
         // for
         // stability
diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java 
b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
index 951f03d..53647ea 100644
--- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
@@ -1947,14 +1947,10 @@ public class UserVmManagerImpl extends ManagerBase 
implements UserVmManager, Vir
                     _itMgr.reConfigureVm(vmInstance.getUuid(), 
currentServiceOffering, newServiceOffering, customParameters, 
existingHostHasCapacity);
                     success = true;
                     return success;
-                } catch (InsufficientCapacityException e) {
-                    s_logger.warn("Received exception while scaling ", e);
-                } catch (ResourceUnavailableException e) {
-                    s_logger.warn("Received exception while scaling ", e);
-                } catch (ConcurrentOperationException e) {
+                } catch (InsufficientCapacityException | 
ResourceUnavailableException | ConcurrentOperationException e) {
                     s_logger.warn("Received exception while scaling ", e);
                 } catch (Exception e) {
-                    s_logger.warn("Received exception while scaling ", e);
+                    s_logger.warn("Scaling failed with exception: ", e);
                 } finally {
                     if (!success) {
                         // Decrement CPU and Memory count accordingly.
diff --git a/test/integration/smoke/test_scale_vm.py 
b/test/integration/smoke/test_scale_vm.py
index ddd6bcf..c58aabb 100644
--- a/test/integration/smoke/test_scale_vm.py
+++ b/test/integration/smoke/test_scale_vm.py
@@ -22,12 +22,17 @@ from marvin.cloudstackTestCase import cloudstackTestCase
 from marvin.cloudstackAPI import scaleVirtualMachine
 from marvin.lib.utils import cleanup_resources
 from marvin.lib.base import (Account,
+                             Host,
                              VirtualMachine,
-                             ServiceOffering)
+                             ServiceOffering,
+                             Template,
+                             Configurations)
 from marvin.lib.common import (get_zone,
                                get_template,
                                get_domain)
 from nose.plugins.attrib import attr
+from marvin.sshClient import SshClient
+import time
 
 _multiprocess_shared_ = True
 
@@ -39,6 +44,8 @@ class TestScaleVm(cloudstackTestCase):
         testClient = super(TestScaleVm, cls).getClsTestClient()
         cls.apiclient = testClient.getApiClient()
         cls.services = testClient.getParsedTestDataConfig()
+        cls.hostConfig = 
cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][
+                    0].__dict__
         cls._cleanup = []
         cls.unsupportedHypervisor = False
         cls.hypervisor = cls.testClient.getHypervisorInfo()
@@ -48,21 +55,31 @@ class TestScaleVm(cloudstackTestCase):
 
         # Get Zone, Domain and templates
         domain = get_domain(cls.apiclient)
-        zone = get_zone(cls.apiclient, testClient.getZoneForTests())
-        cls.services['mode'] = zone.networktype
+        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
+        cls.services['mode'] = cls.zone.networktype
 
-        template = get_template(
-            cls.apiclient,
-            zone.id,
-            cls.services["ostype"]
-        )
-        if template == FAILED:
-            assert False, "get_template() failed to return template\
-                    with description %s" % cls.services["ostype"]
+        if cls.hypervisor.lower() == 'simulator':
+            cls.template = get_template(
+                cls.apiclient,
+                cls.zone.id,
+                cls.services["ostype"]
+            )
+            if cls.template == FAILED:
+                assert False, "get_template() failed to return template\
+                        with description %s" % cls.services["ostype"]
+        else:
+            cls.template = Template.register(
+                       cls.apiclient,
+                       cls.services["CentOS7template"],
+                       zoneid=cls.zone.id
+                    )
+            cls._cleanup.append(cls.template)
+            cls.template.download(cls.apiclient)
+            time.sleep(60)
 
-        # Set Zones and disk offerings ??
-        cls.services["small"]["zoneid"] = zone.id
-        cls.services["small"]["template"] = template.id
+        # Set Zones and disk offerings
+        cls.services["small"]["zoneid"] = cls.zone.id
+        cls.services["small"]["template"] = cls.template.id
 
         # Create account, service offerings, vm.
         cls.account = Account.create(
@@ -70,16 +87,25 @@ class TestScaleVm(cloudstackTestCase):
             cls.services["account"],
             domainid=domain.id
         )
+        cls._cleanup.append(cls.account)
 
         cls.small_offering = ServiceOffering.create(
             cls.apiclient,
             cls.services["service_offerings"]["small"]
         )
+        cls._cleanup.append(cls.small_offering)
 
         cls.big_offering = ServiceOffering.create(
             cls.apiclient,
             cls.services["service_offerings"]["big"]
         )
+        cls._cleanup.append(cls.big_offering)
+
+        Configurations.update(
+                    cls.apiclient,
+                    name="enable.dynamic.scale.vm",
+                    value="true"
+                )
 
         # create a virtual machine
         cls.virtual_machine = VirtualMachine.create(
@@ -90,17 +116,10 @@ class TestScaleVm(cloudstackTestCase):
             serviceofferingid=cls.small_offering.id,
             mode=cls.services["mode"]
         )
-        cls._cleanup = [
-            cls.small_offering,
-            cls.account
-        ]
 
     @classmethod
     def tearDownClass(cls):
-        cls.apiclient = super(
-            TestScaleVm,
-            cls).getClsTestClient().getApiClient()
-        cleanup_resources(cls.apiclient, cls._cleanup)
+        super(TestScaleVm,cls).tearDownClass()
         return
 
     def setUp(self):
@@ -113,10 +132,27 @@ class TestScaleVm(cloudstackTestCase):
                     %s" % self.hypervisor)
 
     def tearDown(self):
+        Configurations.update(
+            self.apiclient,
+            name="enable.dynamic.scale.vm",
+            value="false"
+        )
         # Clean up, terminate the created ISOs
-        cleanup_resources(self.apiclient, self.cleanup)
+        super(TestScaleVm,self).tearDown()
         return
 
+    def get_ssh_client(self, ip, username, password, retries=10):
+        """ Setup ssh client connection and return connection """
+        try:
+            ssh_client = SshClient(ip, 22, username, password, retries)
+        except Exception as e:
+            raise self.skipTest("Unable to create ssh connection: " % e)
+
+        self.assertIsNotNone(
+            ssh_client, "Failed to setup ssh connection to ip=%s" % ip)
+
+        return ssh_client
+
     @attr(hypervisor="xenserver")
     @attr(tags=["advanced", "basic"], required_hardware="false")
     def test_01_scale_vm(self):
@@ -145,6 +181,25 @@ class TestScaleVm(cloudstackTestCase):
             if not "running" in result:
                 self.skipTest("Skipping scale VM operation because\
                     VMware tools are not installed on the VM")
+        if self.hypervisor.lower() != 'simulator':
+            hostid = self.virtual_machine.hostid
+            host = Host.list(
+                       self.apiclient,
+                       zoneid=self.zone.id,
+                       hostid=hostid,
+                       type='Routing'
+                   )[0]
+
+            try:
+                username = self.hostConfig["username"]
+                password = self.hostConfig["password"]
+                ssh_client = self.get_ssh_client(host.ipaddress, username, 
password)
+                res = ssh_client.execute("hostnamectl | grep 'Operating 
System' | cut -d':' -f2")
+            except Exception as e:
+                pass
+
+            if 'XenServer' in res[0]:
+                self.skipTest("Skipping test for XenServer as it's License 
does not allow scaling")
 
         self.virtual_machine.update(
             self.apiclient,

Reply via email to