On 06/26/2012 02:28 PM, Alex Jia wrote:
> It's a common virt-v2v test assistant module, which provides some
> necessary class/functions with virt-v2v testing.
>
> Signed-off-by: Alex Jia <a...@redhat.com>
> Signed-off-by: Wayne Sun <g...@redhat.com>
> ---
>   client/virt/virt_v2v_utils.py |  278 
> +++++++++++++++++++++++++++++++++++++++++
>   1 file changed, 278 insertions(+)
>   create mode 100644 client/virt/virt_v2v_utils.py
>
> diff --git a/client/virt/virt_v2v_utils.py b/client/virt/virt_v2v_utils.py
> new file mode 100644
> index 0000000..f93e417
> --- /dev/null
> +++ b/client/virt/virt_v2v_utils.py
> @@ -0,0 +1,278 @@
> +"""
> +Virt-v2v test utility functions.
> +
> +@copyright: 2008-2012 Red Hat Inc.
> +"""
> +
> +import os, re, logging
> +
> +import ovirt
> +import libvirt_vm as lvirt
> +
> +
> +def build_esx_no_verify(params):
> +    """
> +    Build esx no verify relationship.
> +    """
> +    netrc = params.get('netrc')
> +    path = os.path.join(os.getenv("HOME"), '.netrc')
> +    fp = open(path, "a")
> +    os.chmod(path, 0600)
> +    fp.write("%s\n" %netrc)
> +    fp.close()
> +
> +
> +class URI(object):
> +    """
> +    This class is used for generating uri.
> +    """
> +    def __init__(self, hypervisor):
> +        if hypervisor is None:
> +            # kvm is a default hypervisor
> +            hypervisor = "kvm"
> +        self.hyper = hypervisor
> +

^ A double line spacing between major blocks is desirable as it conforms 
to the style we used on the rest of the code.

> +    def get_uri(self, hostname):
> +        """
> +        This fucntion is a uri dispatcher.
> +        """
> +        uri_func =  getattr(self, "get_%s_uri" % self.hyper)

^ Maybe a more graceful failure and/or more meaningful exception and 
message should be raised instead of the default AttributeError.

> +        self.host = hostname
> +        return uri_func()
> +
> +    def get_kvm_uri(self):
> +        """
> +        Return kvm uri.
> +        """
> +        uri = "qemu+ssh://"+ self.host + "/system"
> +        return uri
> +
> +    def get_xen_uri(self):
> +        """
> +        Return xen uri.
> +        """
> +        uri = "xen+ssh://"+ self.host + "/"
> +        return uri
> +
> +    def get_esx_uri(self):
> +        """
> +        Return esx uri.
> +        """
> +        uri = "esx://"+ self.host + "/?no_verify=1"
> +        return uri
> +
> +    # add new hypervisor in here.
> +
> +
> +class TARGERT(object):

^ Is this a typo? s/TARGERT/TARGET/ ?

> +    """
> +    This class is used for generating command options.
> +    """
> +    def __init__(self, target, uri):
> +        if target is None:
> +            # libvirt is a default target
> +            target = "libvirt"
> +        self.tgt = target
> +        self.uri = uri
> +
> +    def get_cmd_options(self, params):
> +        """
> +        This fucntion is a target dispatcher.
> +        """
> +        opts_func = getattr(self, "get_%s_options" % self.tgt)
> +        self.params = params
> +        return opts_func()

^ Same here about exception and error message.

> +
> +    def get_libvirt_options(self):
> +        """
> +        Return command options.
> +        """
> +        options = " -ic %s -os %s -b %s %s " % (self.uri,
> +                  self.params.get('storage'), self.params.get('network'),
> +                  self.params.get('vms'))
> +        return options
> +
> +    def get_ovirt_options(self):
> +        """
> +        Return command options.
> +        """
> +        options = " -ic %s -o %s -os %s -n %s %s " % (self.uri, self.tgt,
> +                  self.params.get('storage'), self.params.get('network'),
> +                  self.params.get('vms'))
> +
> +        return options
> +
> +    # add new target in here.
> +
> +
> +class LinuxVMCheck(object):
> +    """
> +    This class handles all basic linux VM check operations.
> +    """
> +    def __init__(self, test, params, env):
> +        self.vm = None
> +        self.test = test
> +        self.env = env
> +        self.params = params
> +        self.name = params.get('vms')
> +        self.target = params.get('target')
> +
> +        if self.name is None:
> +            logging.error("vm name not exist")
> +
> +        # libvirt is a default target
> +        if self.target == "libvirt" or self.target is None:
> +            self.vm = lvirt.VM(self.name, self.params, self.test.bindir,
> +                              self.env.get("address_cache"))
> +        elif self.target == "ovirt":
> +            self.vm = ovirt.VM(self.name, self.params, self.test.bindir,
> +                              self.env.get("address_cache"))
> +        else:
> +            logging.error("Doesn't support %s now" % self.target)
> +
> +        if self.vm.is_alive():
> +            self.vm.shutdown()
> +            self.vm.start()
> +        else:
> +            self.vm.start()
> +
> +    def get_vm_kernel(self, session=None, nic_index=0, timeout=480):

^ Out of curiosity, any special reason for the 480 seconds timeout?

> +        """
> +        Get vm kernel info.
> +        """
> +        cmd = "uname -r"
> +        session = session or self.vm.wait_for_login(nic_index, timeout)
> +        kernel_version = session.cmd_output(cmd)
> +        logging.debug("The kernel of VM '%s' is: %s" % \
> +                      (self.vm.name, kernel_version))
> +        session.close()
> +        return kernel_version
> +
> +    def get_vm_os_vendor(self, session=None, nic_index=0, timeout=480):
> +        """
> +        Get vm os vendor info.
> +        """
> +        cmd = "cat /etc/issue"
> +        session = session or self.vm.wait_for_login(nic_index, timeout)
> +        output = session.cmd_output(cmd).split('\n', 1)[0]
> +        if re.search('Red Hat', output):
> +            vendor = 'Red Hat'
> +        elif re.search('Fedora', output):
> +            vendor = 'Fedora Core'
> +        elif re.search('SUSE', output):
> +            vendor = 'SUSE'
> +        elif re.search('Ubuntu', output):
> +            vendor = 'Ubuntu'
> +        elif re.search('Debian', output):
> +            vendor = 'Debian'
> +        else:
> +            vendor = 'Unknown'
> +        logging.debug("The os info is: %s" % output)
> +        logging.debug("The os vendor of VM '%s' is: %s" % \
> +                      (self.vm.name, vendor))
> +        session.close()
> +        return vendor, output
> +
> +    def get_vm_fdisk(self, session=None, nic_index=0, timeout=480):
> +        """
> +        Get vm fdisk info.
> +        """
> +        grep_cmd = "grep '(hd0)' /boot/grub/device.map | cut -d ' ' -f 6"

^ What about systems with grub2 and /boot/grub2/device.map? Maybe this 
could come from a parameter, just like BaseVM.get_cpu_count().

> +        session = session or self.vm.wait_for_login(nic_index, timeout)
> +        disk_path = session.cmd_output(grep_cmd)
> +        logging.debug("Disk path is %s" % disk_path)
> +
> +        fdisk_cmd = "fdisk -l %s" % disk_path
> +        fdisk_output = session.cmd_output(fdisk_cmd)
> +        fdisk_info = (fdisk_output.split('\n', 1)[1]).split('[', 1)[0]
> +        logging.debug("The fdisk output is:\n %s" % fdisk_info)
> +
> +        session.close()
> +        return fdisk_info
> +
> +    def get_vm_modprobe_conf(self, session=None, nic_index=0, timeout=480):
> +        """
> +        Get /etc/modprobe.conf info.
> +        """
> +        cmd = "cat /etc/modprobe.conf"
> +        session = session or self.vm.wait_for_login(nic_index, timeout)
> +        modprobe_output = session.cmd_output(cmd)
> +        logging.debug("modprobe conf is:\n %s" % modprobe_output)
> +        session.close()
> +        return modprobe_output
> +
> +    def get_vm_modules(self, session=None, nic_index=0, timeout=480):
> +        """
> +        Get vm modules list.
> +        """
> +        cmd = "lsmod"
> +        session = session or self.vm.wait_for_login(nic_index, timeout)
> +        modules = session.cmd_output(cmd)
> +        logging.debug("VM modules list is:\n %s" % modules)
> +        session.close()
> +        return modules
> +
> +    def get_vm_pci_list(self, session=None, nic_index=0, timeout=480):
> +        """
> +        Get vm pci list.
> +        """
> +        cmd = "lspci"
> +        session = session or self.vm.wait_for_login(nic_index, timeout)
> +        lspci_output = session.cmd_output(cmd)
> +        logging.debug("VM pci devices list is:\n %s" % lspci_output)
> +        session.close()
> +        return lspci_output
> +
> +    def get_vm_rc_local(self, session=None, nic_index=0, timeout=480):
> +        """
> +        Get vm /etc/rc.local output.
> +        """
> +        cmd = "cat /etc/rc.local"
> +        session = session or self.vm.wait_for_login(nic_index, timeout)
> +        rc_output = session.cmd_output(cmd)
> +        session.close()
> +        return rc_output
> +
> +    def check_vmware_tools(self, session=None, nic_index=0, timeout=480):
> +        """
> +        Check vmware tools.
> +        """
> +        rpm_cmd = "rpm -q VMwareTools"
> +        ls_cmd = "ls /usr/bin/vmware-uninstall-tools.pl"
> +        session = session or self.vm.wait_for_login(nic_index, timeout)
> +        rpm_output = session.cmd_output(rpm_cmd)
> +        ls_output = session.cmd_output(ls_cmd)
> +        session.close()
> +
> +        if re.search("not installed", rpm_output) or \
> +           re.search("No such file", ls_output):
> +            return True
> +        else:
> +            return False
> +
> +    def get_vm_tty(self, session=None, nic_index=0, timeout=480):
> +        """
> +        Get vm tty config.
> +        """
> +        cmd = "cat /etc/securetty /etc/inittab /boot/grub/grub.conf"
> +        session = session or self.vm.wait_for_login(nic_index, timeout)
> +        tty = session.cmd_output(cmd)
> +        session.close()
> +        return tty
> +
> +    def get_vm_video(self, session=None, nic_index=0, timeout=480):
> +        """
> +        Get vm video config.
> +        """
> +        cmd = "cat /etc/X11/xorg.conf /etc/X11/XF86Config"
> +        session = session or self.vm.wait_for_login(nic_index, timeout)
> +        xorg_output = session.cmd_output(cmd)
> +        session.close()
> +        return xorg_output

^ This makes me think that a LinuxVM mixin class could make a nice 
addition to the BaseVM code. Mixing a LibvirtVM+LinuxVM or 
KvmVM+WindowsVM would be really nice and powerfull. Thoughts?

> +
> +
> +class WindowsVMCheck(object):
> +    """
> +    This class handles all basic windows VM check operations.
> +    """
> +    pass


_______________________________________________
Autotest mailing list
Autotest@test.kernel.org
http://test.kernel.org/cgi-bin/mailman/listinfo/autotest

Reply via email to