Re: [libvirt] [sandbox][PATCH v2] Fix delete of running container
On 08/07/2013 09:34 PM, Daniel P. Berrange wrote: On Wed, Aug 07, 2013 at 09:24:14PM +0800, Wayne Sun wrote: Delete running container is not supprted and will report an error. Related to bug: https://bugzilla.redhat.com/show_bug.cgi?id=994495 Signed-off-by: Wayne Sun g...@redhat.com --- bin/virt-sandbox-service | 15 ++- 1 files changed, 10 insertions(+), 5 deletions(-) diff --git a/bin/virt-sandbox-service b/bin/virt-sandbox-service index 550d46c..c07c33b 100755 --- a/bin/virt-sandbox-service +++ b/bin/virt-sandbox-service @@ -254,11 +254,16 @@ class Container: def delete(self): self.connect() -# Stop service if it is running -try: -self.stop() -except: -pass +# Check container is running or not +cmd = /usr/bin/virsh -c %s list | sed '1d;2d;$d' | awk -F' '\ + '{ print $2}' % self.uri +p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) +out, err = p.communicate() +if p.returncode and p.returncode != 0: +raise OSError(_(Failed to list running domain)) + +if self.name in out.splitlines(): +raise ValueError([_(Delete running container is not supported)]) virt-sandbox-service already has a connection to libvirt - no need to spawn virsh here. Just do something like this (untested): self.conn.fetch_domains() dom = self.conn.find_domain_by_name(self.name) info = dom.get_info() if info.state == LibvirtGObject.DomainState.RUNNING: .error... Daniel My limit thought is use virsh or libvirt python api to check domain state, apparently this is better here. This totally works as I tested, v3 on the way. Thanks! Wayne Sun 2013-08-08 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [sandbox][PATCH v3] Fix delete of running container
Delete running container is not supprted and will report an error. Related to bug: https://bugzilla.redhat.com/show_bug.cgi?id=994495 v1: Fix stop function and delete running container. v2: Delete running container is not allowed, spawn virsh to get domain status. v3: Using exist libvirt connection to get dom status. Signed-off-by: Wayne Sun g...@redhat.com --- bin/virt-sandbox-service | 10 +- 1 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bin/virt-sandbox-service b/bin/virt-sandbox-service index 550d46c..03873c9 100755 --- a/bin/virt-sandbox-service +++ b/bin/virt-sandbox-service @@ -254,11 +254,11 @@ class Container: def delete(self): self.connect() -# Stop service if it is running -try: -self.stop() -except: -pass +self.conn.fetch_domains(None) +dom = self.conn.find_domain_by_name(self.name) +info = dom.get_info() +if info.state == LibvirtGObject.DomainState.RUNNING: +raise ValueError([_(Cannot delete running container)]) # Not sure we should remove content if os.path.exists(self.dest): -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [sandbox][PATCH] Fix delete of running containers
The stop function is removed since 0.5.0, update delete function using virsh destroy to stop container. Signed-off-by: Wayne Sun g...@redhat.com --- bin/virt-sandbox-service |6 -- 1 files changed, 4 insertions(+), 2 deletions(-) diff --git a/bin/virt-sandbox-service b/bin/virt-sandbox-service index 550d46c..926d1d5 100755 --- a/bin/virt-sandbox-service +++ b/bin/virt-sandbox-service @@ -254,9 +254,11 @@ class Container: def delete(self): self.connect() -# Stop service if it is running +# Stop container if it is running try: -self.stop() +p = Popen([/usr/bin/virsh, -c, self.uri, destroy, self.name], + stdout=PIPE, stderr=PIPE) +p.communicate() except: pass -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [sandbox][PATCH] Fix delete of running containers
On 08/07/2013 08:08 PM, Daniel P. Berrange wrote: On Wed, Aug 07, 2013 at 07:56:05PM +0800, Wayne Sun wrote: The stop function is removed since 0.5.0, update delete function using virsh destroy to stop container. Signed-off-by: Wayne Sun g...@redhat.com --- bin/virt-sandbox-service |6 -- 1 files changed, 4 insertions(+), 2 deletions(-) diff --git a/bin/virt-sandbox-service b/bin/virt-sandbox-service index 550d46c..926d1d5 100755 --- a/bin/virt-sandbox-service +++ b/bin/virt-sandbox-service @@ -254,9 +254,11 @@ class Container: def delete(self): self.connect() -# Stop service if it is running +# Stop container if it is running try: -self.stop() +p = Popen([/usr/bin/virsh, -c, self.uri, destroy, self.name], + stdout=PIPE, stderr=PIPE) +p.communicate() except: pass Hmm, I'm not convinced that we should allow deletion of sanboxes that are running at all. IMHO it is better if we just report an error to the admin if they attempt to delete a running sandbox, since I think that would commonly be a mistake they should be protected from. Daniel Yes, the right logic is not allow it, i'll work on v2 for this. Thanks! Wayne Sun -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [sandbox][PATCH v2] Fix delete of running container
Delete running container is not supprted and will report an error. Related to bug: https://bugzilla.redhat.com/show_bug.cgi?id=994495 Signed-off-by: Wayne Sun g...@redhat.com --- bin/virt-sandbox-service | 15 ++- 1 files changed, 10 insertions(+), 5 deletions(-) diff --git a/bin/virt-sandbox-service b/bin/virt-sandbox-service index 550d46c..c07c33b 100755 --- a/bin/virt-sandbox-service +++ b/bin/virt-sandbox-service @@ -254,11 +254,16 @@ class Container: def delete(self): self.connect() -# Stop service if it is running -try: -self.stop() -except: -pass +# Check container is running or not +cmd = /usr/bin/virsh -c %s list | sed '1d;2d;$d' | awk -F' '\ + '{ print $2}' % self.uri +p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) +out, err = p.communicate() +if p.returncode and p.returncode != 0: +raise OSError(_(Failed to list running domain)) + +if self.name in out.splitlines(): +raise ValueError([_(Delete running container is not supported)]) # Not sure we should remove content if os.path.exists(self.dest): -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [sandbox][PATCH] Fix nits in virt-sandbox-service when raise ValueError
Put error msg in list when raise ValueError. This fix is for bug: [virt-sandbox-service] execute command with unsupported URI error msg is not right https://bugzilla.redhat.com/show_bug.cgi?id=967705 Signed-off-by: Wayne Sun g...@redhat.com --- bin/virt-sandbox-service |4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/virt-sandbox-service b/bin/virt-sandbox-service index 4496b29..1db3c09 100755 --- a/bin/virt-sandbox-service +++ b/bin/virt-sandbox-service @@ -878,7 +878,7 @@ def sandbox_list(args): def sandbox_reload(args): config = read_config(args.name) if isinstance(config, gi.repository.LibvirtSandbox.ConfigServiceGeneric): -raise ValueError(_(Generic Containers do not support reload)) +raise ValueError([_(Generic Containers do not support reload)]) container = SystemdContainer(uri = args.uri, config = config) container.reload(args.unitfiles) @@ -931,7 +931,7 @@ def fullpath(cmd): def execute(args): if args.uri != lxc:///: -raise ValueError(_(Can only execute commands inside of linux containers.)) +raise ValueError([_(Can only execute commands inside of linux containers.)]) myexec = [ virsh, -c, args.uri, lxc-enter-namespace ] #myexec = [ virt-sandbox-service-util, execute ] -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH] Add 2 emulatorpin cases cover config and live flags
- use pinEmulator to pin domain emulator to host cpu - 2 cases cover config and live flags - cpulist with '^', '-' and ',' is supported to give multiple host cpus Related bug 916493: pinEmulator and emulatorPinInfo should be simple with required params https://bugzilla.redhat.com/show_bug.cgi?id=916493 is fixed, so the test can run successfully now. Signed-off-by: Wayne Sun g...@redhat.com --- repos/setVcpus/emulatorpin_config.py | 97 +++ repos/setVcpus/emulatorpin_live.py | 98 2 files changed, 195 insertions(+) create mode 100644 repos/setVcpus/emulatorpin_config.py create mode 100644 repos/setVcpus/emulatorpin_live.py diff --git a/repos/setVcpus/emulatorpin_config.py b/repos/setVcpus/emulatorpin_config.py new file mode 100644 index 000..9b94f98 --- /dev/null +++ b/repos/setVcpus/emulatorpin_config.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# Test domain emulator pin with flag VIR_DOMAIN_AFFECT_CONFIG, check +# domain config xml with emulatorpin configuration. + +import re +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = ('guestname', 'cpulist',) +optional_params = {} + +def emulatorpin_check(domobj, cpumap): +check domain config xml with emulatorpin element + +guestxml = domobj.XMLDesc(2) +logger.debug(domain %s xml :\n%s %(domobj.name(), guestxml)) + +doc = minidom.parseString(guestxml) +emulatorpin = doc.getElementsByTagName('emulatorpin') +if not emulatorpin: +logger.error(no emulatorpin element in domain xml) +return 1 + +if not emulatorpin[0].hasAttribute('cpuset'): +logger.error(no cpuset attribute with emulatorpin in domain xml) +return 1 +else: +emulator_attr = emulatorpin[0].getAttributeNode('cpuset') +cpulist = emulator_attr.nodeValue +cpumap_tmp = utils.param_to_tuple(cpulist, maxcpu) + +if cpumap_tmp == cpumap: +logger.info(cpuset is as expected in domain xml) +return 0 +else: +logger.error(cpuset is not as expected in domain xml) +return 1 + +def emulatorpin_config(params): +pin domain emulator to host cpu with config flag + +global logger +logger = params['logger'] +params.pop('logger') +guestname = params['guestname'] +cpulist = params['cpulist'] + +logger.info(the name of virtual machine is %s % guestname) +logger.info(the given cpulist is %s % cpulist) + +global maxcpu +maxcpu = utils.get_host_cpus() +logger.info(%s physical cpu on host % maxcpu) + +conn = sharedmod.libvirtobj['conn'] + +try: +domobj = conn.lookupByName(guestname) +cpumap = utils.param_to_tuple(cpulist, maxcpu) + +if not cpumap: +logger.error(cpulist: Invalid format) +return 1 + +logger.debug(cpumap for emulator pin is:) +logger.debug(cpumap) + +logger.info(pin domain emulator to host cpulist %s with flag: %s % +(cpulist, libvirt.VIR_DOMAIN_AFFECT_CONFIG)) +domobj.pinEmulator(cpumap, libvirt.VIR_DOMAIN_AFFECT_CONFIG) + +logger.info(check emulator pin info) +ret = domobj.emulatorPinInfo(libvirt.VIR_DOMAIN_AFFECT_CONFIG) +logger.debug(emulator pin info is:) +logger.debug(ret) +if ret == cpumap: +logger.info(emulator pin info is expected) +else: +logger.error(emulator pin info is not expected) +return 1 +except libvirtError, e: +logger.error(libvirt call failed: + str(e)) +return 1 + +logger.info(check domain emulatorpin configuration in xml) +ret = emulatorpin_check(domobj, cpumap) +if ret: +logger.error(domain emulator pin check failed) +return 1 +else: +logger.info(domain emulator pin check succeed) +return 0 diff --git a/repos/setVcpus/emulatorpin_live.py b/repos/setVcpus/emulatorpin_live.py new file mode 100644 index 000..08b7073 --- /dev/null +++ b/repos/setVcpus/emulatorpin_live.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python +# Test domain emulator pin with flag VIR_DOMAIN_AFFECT_LIVE, check +# emulator process status under domain task list on host. + +import re + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = ('guestname', 'cpulist',) +optional_params = {} + +def emulatorpin_check(guestname, cpumap): +check emulator process status of the running virtual machine + grep Cpus_allowed_list /proc/PID/status + +tmp_str = '' +cmd = cat /var/run/libvirt/qemu/%s.pid % guestname +status, pid = utils.exec_cmd(cmd, shell=True) +if status: +logger.error(failed to get the pid of domain %s % guestname) +return 1 + +cmd = grep Cpus_allowed_list /proc/%s
[libvirt] [test-API][PATCH] Add env check function for memory_params_live
In this case, domain memory cgroup path is hardcoded and fail the case after cgroup path changed recently. To avoid such failure, add check function for lscgroup command before run this case, if check fail then skip this case. Signed-off-by: Wayne Sun g...@redhat.com --- repos/domain/memory_params_live.py | 36 ++-- 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/repos/domain/memory_params_live.py b/repos/domain/memory_params_live.py index 44fb8b4..280792a 100644 --- a/repos/domain/memory_params_live.py +++ b/repos/domain/memory_params_live.py @@ -4,6 +4,7 @@ import os import math +import commands from xml.dom import minidom import libvirt @@ -15,27 +16,35 @@ required_params = ('guestname', 'hard_limit', 'soft_limit', 'swap_hard_limit', ) optional_params = {} UNLIMITED = 9007199254740991 -CGROUP_PATH = /cgroup/memory/libvirt/qemu +CGROUP_PATH = /cgroup/ def get_cgroup_setting(guestname): get domain memory parameters in cgroup if os.path.exists(CGROUP_PATH): -cgroup_path = %s/%s % (CGROUP_PATH, guestname) +cgroup_path = CGROUP_PATH else: -cgroup_path = /sys/fs%s/%s % (CGROUP_PATH, guestname) +cgroup_path = /sys/fs%s % CGROUP_PATH -f = open(%s/memory.limit_in_bytes % cgroup_path) +cmd = lscgroup | grep %s | grep memory: % guestname +ret, out = commands.getstatusoutput(cmd) +if ret: +logger.error(out) +return 1 +else: +mem_cgroup_path = %s%s % (cgroup_path, out.replace(':', '')) + +f = open(%s/memory.limit_in_bytes % mem_cgroup_path) hard = int(f.read()) logger.info(memory.limit_in_bytes value is %s % hard) f.close() -f = open(%s/memory.soft_limit_in_bytes % cgroup_path) +f = open(%s/memory.soft_limit_in_bytes % mem_cgroup_path) soft = int(f.read()) logger.info(memory.soft_limit_in_bytes value is %s % soft) f.close() -f = open(%s/memory.memsw.limit_in_bytes % cgroup_path) +f = open(%s/memory.memsw.limit_in_bytes % mem_cgroup_path) swap = int(f.read()) logger.info(memory.memsw.limit_in_bytes value is %s % swap) f.close() @@ -98,6 +107,10 @@ def memory_params_live(params): logger.info(check memory parameters in cgroup) ret = get_cgroup_setting(guestname) +if ret == 1: +logger.error(fail to get domain memory cgroup setting) +return 1 + for i in param_dict.keys(): if math.fabs(param_dict[i] - ret[i]) 1: logger.error(%s value not match with cgroup setting % i) @@ -110,3 +123,14 @@ def memory_params_live(params): return 1 return 0 + +def memory_params_live_check(params): +check lscgroup packages + +logger = params['logger'] +cmd = 'lscgroup' +ret, out = commands.getstatusoutput(cmd) +if ret and 'command not found' in out: +logger.error(out) +logger.error(package libcgroup or libcgroup-tools is not installed) +return 1 -- 1.8.2.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH v2] Add 2 host node memory API cases
v1: add 2 host node memory cases and update conf - node_mem_param: tuning host node memory parameters - node_memory: get host node memory info, including host free memory, node free memory and node memory stats - numa_param conf is updated with the 2 new cases v2: node_mem_param: polish codes with better readability node_memory: direct fetch info in /sys/devices/system/node/node* other than using command 'numastat' Signed-off-by: Wayne Sun g...@redhat.com --- cases/numa_param.conf| 8 repos/numa/node_mem_param.py | 81 repos/numa/node_memory.py| 107 +++ 3 files changed, 196 insertions(+) create mode 100644 repos/numa/node_mem_param.py create mode 100644 repos/numa/node_memory.py diff --git a/cases/numa_param.conf b/cases/numa_param.conf index 64268a3..515fb1f 100644 --- a/cases/numa_param.conf +++ b/cases/numa_param.conf @@ -1,3 +1,11 @@ +numa:node_memory + +numa:node_mem_param +shm_pages_to_scan +200 +shm_sleep_millisecs +20 + domain:install_linux_cdrom guestname $defaultname diff --git a/repos/numa/node_mem_param.py b/repos/numa/node_mem_param.py new file mode 100644 index 000..86242b1 --- /dev/null +++ b/repos/numa/node_mem_param.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python +# Test tuning host node memory parameters + +import libvirt +from libvirt import libvirtError + +from src import sharedmod + +required_params = () +optional_params = {shm_pages_to_scan: 100, + shm_sleep_millisecs: 20, + shm_merge_across_nodes: 1 + } + +KSM_PATH = /sys/kernel/mm/ksm/ + +def node_mem_param(params): +test set host node memory parameters + +logger = params['logger'] +shm_pages_to_scan = params.get('shm_pages_to_scan') +shm_sleep_millisecs = params.get('shm_sleep_millisecs') +shm_merge_across_nodes = params.get('shm_merge_across_nodes') + +if not shm_pages_to_scan \ +and not shm_sleep_millisecs \ +and not shm_merge_across_nodes: +logger.error(given param is none) +return 1 + +param_dict = {} +for i in optional_params.keys(): +if eval(i): +param_dict[i] = int(eval(i)) + +logger.info(the given param dict is: %s % param_dict) + +conn = sharedmod.libvirtobj['conn'] + +try: +logger.info(get host node memory parameters) +mem_pre = conn.getMemoryParameters(0) +logger.info(host node memory parameters is: %s % mem_pre) + +logger.info(set host node memory parameters with given param %s % +param_dict) +conn.setMemoryParameters(param_dict, 0) +logger.info(set host node memory parameters done) + +logger.info(get host node memory parameters) +mem_pos = conn.getMemoryParameters(0) +logger.info(host node memory parameters is: %s % mem_pos) + +for i in param_dict.keys(): +if not mem_pos[i] == param_dict[i]: +logger.error(%s is not set as expected % i) + +logger.info(node memory parameters is set as expected) + +logger.info(check tuning detail under %s % KSM_PATH) + +ksm_dict = {} +for i in param_dict.keys(): +path = %s%s % (KSM_PATH, i[4:]) +f = open(path) +ret = int(f.read().split('\n')[0]) +f.close() +logger.info(%s value is: %s % (path, ret)) +ksm_dict[i] = ret + +if ksm_dict == param_dict: +logger.info(tuning detail under %s is expected % KSM_PATH) +else: +logger.error(check with tuning detail under %s failed % KSM_PATH) +return 1 + +except libvirtError, e: +logger.error(libvirt call failed: + str(e)) +return 1 + +return 0 diff --git a/repos/numa/node_memory.py b/repos/numa/node_memory.py new file mode 100644 index 000..0241f3c --- /dev/null +++ b/repos/numa/node_memory.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +# Test get host node memory info, including host free +# memory, node free memory and node memory stats. + +import math + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = () +optional_params = {} + +NODE_MEMINFO_PATH = /sys/devices/system/node/node*/meminfo + +def node_memory(params): +test get host node memory info + +logger = params['logger'] + +cmd = lscpu|grep 'NUMA node(s)' +ret, output = utils.exec_cmd(cmd, shell=True) +node_num = int(output[0].split(' ')[-1]) +logger.info(host total nodes number is: %s % node_num) + +conn = sharedmod.libvirtobj['conn'] + +cmd = grep 'MemFree' %s % NODE_MEMINFO_PATH +node_mem = [] +free_total = 0 +ret, out = utils.exec_cmd(cmd, shell=True) +for i in range(node_num): +mem_free = int(out[i].split()[-2]) +node_mem.append(mem_free
[libvirt] [test-API][PATCH] Add 2 host node memory API cases
add 2 host node memory cases and update conf - node_mem_param: tuning host node memory parameters. - node_memory: get host node memory info, including host free memory, node free memory and node memory stats. - numa_param conf is updated with the 2 new cases Signed-off-by: Wayne Sun g...@redhat.com --- cases/numa_param.conf| 8 repos/numa/node_mem_param.py | 86 repos/numa/node_memory.py| 101 +++ 3 files changed, 195 insertions(+) create mode 100644 repos/numa/node_mem_param.py create mode 100644 repos/numa/node_memory.py diff --git a/cases/numa_param.conf b/cases/numa_param.conf index 64268a3..515fb1f 100644 --- a/cases/numa_param.conf +++ b/cases/numa_param.conf @@ -1,3 +1,11 @@ +numa:node_memory + +numa:node_mem_param +shm_pages_to_scan +200 +shm_sleep_millisecs +20 + domain:install_linux_cdrom guestname $defaultname diff --git a/repos/numa/node_mem_param.py b/repos/numa/node_mem_param.py new file mode 100644 index 000..ba6f8f4 --- /dev/null +++ b/repos/numa/node_mem_param.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python +# Test tuning host node memory parameters + +import libvirt +from libvirt import libvirtError + +from src import sharedmod + +required_params = () +optional_params = {shm_pages_to_scan: 100, + shm_sleep_millisecs: 20, + shm_merge_across_nodes: 1 + } + +KSM_PATH = /sys/kernel/mm/ksm/ + +def node_mem_param(params): +test set host node memory parameters + +logger = params['logger'] +shm_pages_to_scan = params.get('shm_pages_to_scan') +shm_sleep_millisecs = params.get('shm_sleep_millisecs') +shm_merge_across_nodes = params.get('shm_merge_across_nodes') + +if not shm_pages_to_scan \ +and not shm_sleep_millisecs \ +and not shm_merge_across_nodes: +logger.error(given param is none) +return 1 + +param_dict = {} +tmp = ('shm_pages_to_scan', 'shm_sleep_millisecs', 'shm_merge_across_nodes') +tmp1 = ('pages_to_scan', 'sleep_millisecs', 'merge_across_nodes') +for i in tmp: +if eval(i): +param_dict[i] = int(eval(i)) + +logger.info(the given param dict is: %s % param_dict) + +conn = sharedmod.libvirtobj['conn'] + +try: +logger.info(get host node memory parameters) +mem_pre = conn.getMemoryParameters(0) +logger.info(host node memory parameters is: %s % mem_pre) + +logger.info(set host node memory parameters with given param %s % +param_dict) +conn.setMemoryParameters(param_dict, 0) +logger.info(set host node memory parameters done) + +logger.info(get host node memory parameters) +mem_pos = conn.getMemoryParameters(0) +logger.info(host node memory parameters is: %s % mem_pos) + +for i in tmp: +if eval(i): +if not mem_pos[i] == param_dict[i]: +logger.error(%s is not set as expected % i) + +logger.info(node memory parameters is set as expected) + +logger.info(check tuning detail under %s % KSM_PATH) + +mem_tmp = {} +for i in tmp1: +str_tmp = 'shm_%s' % i +if eval(str_tmp): +path = %s%s % (KSM_PATH, i) +f = open(path) +ret = int(f.read().split('\n')[0]) +f.close() +logger.info(%s value is: %s % (path, ret)) +mem_tmp[str_tmp] = ret + +if mem_tmp == param_dict: +logger.info(tuning detail under %s is expected % KSM_PATH) +else: +logger.error(check with tuning detail under %s failed % KSM_PATH) +return 1 + +except libvirtError, e: +logger.error(libvirt call failed: + str(e)) +return 1 + +return 0 diff --git a/repos/numa/node_memory.py b/repos/numa/node_memory.py new file mode 100644 index 000..47d3b4a --- /dev/null +++ b/repos/numa/node_memory.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python +# Test get host node memory info, including host free +# memory, node free memory and node memory stats. + +import math + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = () +optional_params = {} + +CMD = numastat -m + +def node_memory(params): +test get host node memory info + +logger = params['logger'] + +cmd = lscpu|grep 'NUMA node(s)' +ret, output = utils.exec_cmd(cmd, shell=True) +node_num = int(output[0].split(' ')[-1]) +logger.info(host total nodes number is: %s % node_num) + +conn = sharedmod.libvirtobj['conn'] + +try: +logger.info(get host total free memory) +mem = conn.getFreeMemory()/1048576 +logger.info(host free memory total is: %s KiB % mem) +ret, out = utils.exec_cmd(CMD, shell=True
[libvirt] [test-API][PATCH v2] Add 7 memory API related cases
v1: add 7 new cases using domain memory related API add 1 conf for domain memory testing v2: fix the file not close problem and a typo 7 new cases are: memory_params_config: test set memory params with config flag memory_params_live: test set memory params with live flag memory_peek: test memory peek memory_stats: test get memory stats set_maxmem_config: test set maximum memory with config flag set_memory_config: test set current memory with config flag set_memory_live: test set current memory with live flag memory hotplug is not supported yet, so live set max memory case is not added. Signed-off-by: Wayne Sun g...@redhat.com --- cases/domain_memory_test.conf| 99 +++ repos/domain/memory_params_config.py | 96 ++ repos/domain/memory_params_live.py | 112 +++ repos/domain/memory_peek.py | 48 +++ repos/domain/memory_stats.py | 65 repos/domain/set_maxmem_config.py| 59 ++ repos/domain/set_memory_config.py| 94 + repos/domain/set_memory_live.py | 86 +++ 8 files changed, 659 insertions(+) create mode 100644 cases/domain_memory_test.conf create mode 100644 repos/domain/memory_params_config.py create mode 100644 repos/domain/memory_params_live.py create mode 100644 repos/domain/memory_peek.py create mode 100644 repos/domain/memory_stats.py create mode 100644 repos/domain/set_maxmem_config.py create mode 100644 repos/domain/set_memory_config.py create mode 100644 repos/domain/set_memory_live.py diff --git a/cases/domain_memory_test.conf b/cases/domain_memory_test.conf new file mode 100644 index 000..90879ab --- /dev/null +++ b/cases/domain_memory_test.conf @@ -0,0 +1,99 @@ +domain:install_linux_cdrom +guestname +$defaultname +guestos +$defaultos +guestarch +$defaultarch +vcpu +$defaultvcpu +memory +$defaultmem +hddriver +$defaulthd +nicdriver +$defaultnic +macaddr +54:52:00:4a:c1:22 + +domain:balloon_memory +guestname +$defaultname +memorypair +1024,2048 + +domain:destroy +guestname +$defaultname + +domain:memory_params_config +guestname +$defaultname +hard_limit +0 +soft_limit +9007199254740991 +swap_hard_limit +-1 + +domain:set_maxmem_config +guestname +$defaultname +memory +16777216 + +domain:set_memory_config +guestname +$defaultname +memory +1048576 +maxmem +4194304 + +domain:start +guestname +$defaultname + +domain:memory_stats +guestname +$defaultname + +domain:memory_peek +guestname +$defaultname + +domain:memory_params_live +guestname +$defaultname +hard_limit +25417224 +soft_limit +9007199254740900 +swap_hard_limit +-1 + +domain:set_memory_live +guestname +$defaultname +memory +2097152 +username +$username +password +$password + +domain:set_memory_config +guestname +$defaultname +memory +4194304 + +domain:destroy +guestname +$defaultname + +domain:undefine +guestname +$defaultname + +options cleanup=enable diff --git a/repos/domain/memory_params_config.py b/repos/domain/memory_params_config.py new file mode 100644 index 000..af9781b --- /dev/null +++ b/repos/domain/memory_params_config.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +# Test set domain memory parameters with flag +# VIR_DOMAIN_AFFECT_CONFIG + +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod + +required_params = ('guestname', 'hard_limit', 'soft_limit', 'swap_hard_limit', ) +optional_params = {} + +UNLIMITED = 9007199254740991 + +def get_memory_config(domobj, param_dict): +get domain config memory parameters + +new_dict = {} +try: +guestxml = domobj.XMLDesc(2) +logger.debug(domain %s xml is :\n%s %(domobj.name(), guestxml)) +xml = minidom.parseString(guestxml) +logger.info(get domain memory parameters in config xml) +for i in param_dict.keys(): +if xml.getElementsByTagName(i): +limit_element = xml.getElementsByTagName(i)[0] +limit = int(limit_element.childNodes[0].data) +logger.info(%s in config xml is: %s % (i, limit)) +new_dict[i] = limit +else: +logger.info(%s is not in config xml % i) +new_dict[i] = 0 + +except libvirtError, e: +logger.error(libvirt call failed: + str(e)) +return False + +return new_dict + +def memory_params_config(params): +set domain memory parameters with config flag and check
[libvirt] [test-API][PATCH] Add 7 memory API related cases
add 7 new cases using domain memory related API add 1 conf for domain memory testing 7 new cases are: memory_params_config: test set memory params with config flag memory_params_live: test set memory params with live flag memory_peek: test memory peek memory_stats: test get memory stats set_maxmem_config: test set maximum memory with config flag set_memory_config: test set current memory with config flag set_memory_live: test set current memory with live flag memory hotplug is not supported yet, so live set max memory case is not added. Signed-off-by: Wayne Sun g...@redhat.com --- cases/domain_memory_test.conf| 99 +++ repos/domain/memory_params_config.py | 96 ++ repos/domain/memory_params_live.py | 109 +++ repos/domain/memory_peek.py | 48 +++ repos/domain/memory_stats.py | 65 + repos/domain/set_maxmem_config.py| 59 +++ repos/domain/set_memory_config.py| 94 ++ repos/domain/set_memory_live.py | 86 +++ 8 files changed, 656 insertions(+) create mode 100644 cases/domain_memory_test.conf create mode 100644 repos/domain/memory_params_config.py create mode 100644 repos/domain/memory_params_live.py create mode 100644 repos/domain/memory_peek.py create mode 100644 repos/domain/memory_stats.py create mode 100644 repos/domain/set_maxmem_config.py create mode 100644 repos/domain/set_memory_config.py create mode 100644 repos/domain/set_memory_live.py diff --git a/cases/domain_memory_test.conf b/cases/domain_memory_test.conf new file mode 100644 index 000..90879ab --- /dev/null +++ b/cases/domain_memory_test.conf @@ -0,0 +1,99 @@ +domain:install_linux_cdrom +guestname +$defaultname +guestos +$defaultos +guestarch +$defaultarch +vcpu +$defaultvcpu +memory +$defaultmem +hddriver +$defaulthd +nicdriver +$defaultnic +macaddr +54:52:00:4a:c1:22 + +domain:balloon_memory +guestname +$defaultname +memorypair +1024,2048 + +domain:destroy +guestname +$defaultname + +domain:memory_params_config +guestname +$defaultname +hard_limit +0 +soft_limit +9007199254740991 +swap_hard_limit +-1 + +domain:set_maxmem_config +guestname +$defaultname +memory +16777216 + +domain:set_memory_config +guestname +$defaultname +memory +1048576 +maxmem +4194304 + +domain:start +guestname +$defaultname + +domain:memory_stats +guestname +$defaultname + +domain:memory_peek +guestname +$defaultname + +domain:memory_params_live +guestname +$defaultname +hard_limit +25417224 +soft_limit +9007199254740900 +swap_hard_limit +-1 + +domain:set_memory_live +guestname +$defaultname +memory +2097152 +username +$username +password +$password + +domain:set_memory_config +guestname +$defaultname +memory +4194304 + +domain:destroy +guestname +$defaultname + +domain:undefine +guestname +$defaultname + +options cleanup=enable diff --git a/repos/domain/memory_params_config.py b/repos/domain/memory_params_config.py new file mode 100644 index 000..af9781b --- /dev/null +++ b/repos/domain/memory_params_config.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +# Test set domain memory parameters with flag +# VIR_DOMAIN_AFFECT_CONFIG + +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod + +required_params = ('guestname', 'hard_limit', 'soft_limit', 'swap_hard_limit', ) +optional_params = {} + +UNLIMITED = 9007199254740991 + +def get_memory_config(domobj, param_dict): +get domain config memory parameters + +new_dict = {} +try: +guestxml = domobj.XMLDesc(2) +logger.debug(domain %s xml is :\n%s %(domobj.name(), guestxml)) +xml = minidom.parseString(guestxml) +logger.info(get domain memory parameters in config xml) +for i in param_dict.keys(): +if xml.getElementsByTagName(i): +limit_element = xml.getElementsByTagName(i)[0] +limit = int(limit_element.childNodes[0].data) +logger.info(%s in config xml is: %s % (i, limit)) +new_dict[i] = limit +else: +logger.info(%s is not in config xml % i) +new_dict[i] = 0 + +except libvirtError, e: +logger.error(libvirt call failed: + str(e)) +return False + +return new_dict + +def memory_params_config(params): +set domain memory parameters with config flag and check + +global logger +logger = params['logger
[libvirt] [test-API][PATCH] Fix the screenshot case
Fix missing sharemod and nonexist filename Signed-off-by: Wayne Sun g...@redhat.com --- repos/domain/screenshot.py |5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/repos/domain/screenshot.py b/repos/domain/screenshot.py index c0db17f..8284fb4 100644 --- a/repos/domain/screenshot.py +++ b/repos/domain/screenshot.py @@ -6,6 +6,7 @@ import os import mimetypes import libvirt +from src import sharedmod required_params = ('guestname', 'filename',) optional_params = {'screen' : 0} @@ -30,9 +31,9 @@ def screenshot(params): ext = mimetypes.guess_extension(mime) or '.ppm' last_filename = params['filename'] + ext -f = file(filename, 'w') +f = file(last_filename, 'w') -logger.debug('Saving screenshot into %s' % filename) +logger.debug('Saving screenshot into %s' % last_filename) st.recvAll(saver, f) logger.debug('Mimetype of the file is %s' % mime) -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH] Add storage cases of pool list, pool lookup and volume lookup
Add 3 simple storage realted API cases * pool_list_active is only for testing with flags: libvirt.VIR_CONNECT_LIST_STORAGE_POOLS_ACTIVE * pool_lookup is for testing lookup pool object by name, UUID, UUIDString and volume object * vol_lookup is for testing lookup volume object by name, key and path * add cases into storage_dir.conf Signed-off-by: Wayne Sun g...@redhat.com --- cases/storage_dir.conf| 14 +++ repos/storage/pool_list_active.py | 50 + repos/storage/pool_lookup.py | 74 + repos/storage/vol_lookup.py | 62 +++ 4 files changed, 200 insertions(+), 0 deletions(-) create mode 100644 repos/storage/pool_list_active.py create mode 100644 repos/storage/pool_lookup.py create mode 100644 repos/storage/vol_lookup.py diff --git a/cases/storage_dir.conf b/cases/storage_dir.conf index 38b349d..2bdb7d6 100644 --- a/cases/storage_dir.conf +++ b/cases/storage_dir.conf @@ -2,14 +2,22 @@ storage:define_dir_pool poolname $defaultpoolname +storage:pool_lookup +poolname +$defaultpoolname + storage:build_dir_pool poolname $defaultpoolname +storage:pool_list_active + storage:activate_pool poolname $defaultpoolname +storage:pool_list_active + storage:create_dir_volume poolname $defaultpoolname @@ -20,6 +28,12 @@ storage:create_dir_volume capacity $defaultvolumesize +storage:vol_lookup +poolname +$defaultpoolname +volname +$defaultvolumename + storage:vol_clone poolname $defaultpoolname diff --git a/repos/storage/pool_list_active.py b/repos/storage/pool_list_active.py new file mode 100644 index 000..3e390c8 --- /dev/null +++ b/repos/storage/pool_list_active.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# list active storage pool testing with flag: +# libvirt.VIR_CONNECT_LIST_STORAGE_POOLS_ACTIVE + +import libvirt +from libvirt import libvirtError + +from src import sharedmod + +required_params = () +optional_params = { + } + +def pool_list_active(params): +list active storage pool testing + +global logger +logger = params['logger'] +namelist = [] + +conn = sharedmod.libvirtobj['conn'] +try: +pool_num = conn.numOfStoragePools() +logger.info(number of active storage pools is %s % pool_num) + +flag = libvirt.VIR_CONNECT_LIST_STORAGE_POOLS_ACTIVE +poolobj_list = conn.listAllStoragePools(flag) +if not len(poolobj_list) == pool_num: +logger.error(active pool object number mismatched) +return 1 + +for i in poolobj_list: +pool_name = i.name() +namelist.append(pool_name) + +logger.info(active pool name list is %s % namelist) + +active_namelist = conn.listStoragePools() +if namelist == active_namelist: +logger.info(active pool name list matched) +else: +logger.error(active pool name list mismatched) +return 1 + +except libvirtError, e: +logger.error(libvirt call failed: + str(e)) +return 1 + +logger.info(list active storage pool succeed) +return 0 diff --git a/repos/storage/pool_lookup.py b/repos/storage/pool_lookup.py new file mode 100644 index 000..89d7252 --- /dev/null +++ b/repos/storage/pool_lookup.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# storage pool lookup testing + +import libvirt +from libvirt import libvirtError + +from src import sharedmod + +required_params = ('poolname',) +optional_params = { + 'volname': None + } + +def pool_lookup(params): +storage pool lookup testing + +global logger +logger = params['logger'] +poolname = params['poolname'] +volname = params.get('volname') +retval = 0 + +logger.info(the poolname is %s % poolname) +if volname: +logger.info(the given volume name is %s % volname) + +conn = sharedmod.libvirtobj['conn'] +try: +logger.info(lookup the pool object by name: %s % poolname) +poolobj = conn.storagePoolLookupByName(poolname) +if poolobj.name() == poolname: +logger.info(pool object lookup by name succeed) +else: +logger.error(pool object lookup by name failed) +retval += 1 + +uuid = poolobj.UUID() +logger.info(lookup the pool object by UUID: %s % uuid) +poolobj_uuid = conn.storagePoolLookupByUUID(uuid) +if poolobj_uuid.name() == poolname: +logger.info(pool object lookup by UUID succeed) +else: +logger.error(pool object lookup by UUID failed) +retval += 1 + +uuidstr = poolobj.UUIDString() +logger.info(lookup the pool object by UUID string: %s % uuidstr) +poolobj_uuidstr = conn.storagePoolLookupByUUIDString(uuidstr
[libvirt] [test-API][PATCH] Add the volume clone case
This is for volume clone testing by using createXMLFrom API * add the vol_clone case under storage it is general for all pool types * add cases into confs: cases/storage_dir.conf cases/storage_logical.conf cases/storage_netfs.conf * add variable 'defaultvolclonename' in global.cfg Signed-off-by: Wayne Sun g...@redhat.com --- cases/storage_dir.conf | 14 +++ cases/storage_logical.conf | 14 +++ cases/storage_netfs.conf | 14 +++ global.cfg |2 + repos/storage/vol_clone.py | 81 5 files changed, 125 insertions(+), 0 deletions(-) create mode 100644 repos/storage/vol_clone.py diff --git a/cases/storage_dir.conf b/cases/storage_dir.conf index dcac700..38b349d 100644 --- a/cases/storage_dir.conf +++ b/cases/storage_dir.conf @@ -20,6 +20,20 @@ storage:create_dir_volume capacity $defaultvolumesize +storage:vol_clone +poolname +$defaultpoolname +volname +$defaultvolumename +clonevolname +$defaultvolclonename + +storage:delete_dir_volume +poolname +$defaultpoolname +volname +$defaultvolclonename + storage:delete_dir_volume poolname $defaultpoolname diff --git a/cases/storage_logical.conf b/cases/storage_logical.conf index 3334abd..d374dfa 100644 --- a/cases/storage_logical.conf +++ b/cases/storage_logical.conf @@ -22,6 +22,20 @@ storage:create_logical_volume capacity $defaultvolumesize +storage:vol_clone +poolname +$defaultpoolname +volname +$defaultvolumename +clonevolname +$defaultvolclonename + +storage:delete_logical_volume +poolname +$defaultpoolname +volname +$defaultvolclonename + storage:delete_logical_volume poolname $defaultpoolname diff --git a/cases/storage_netfs.conf b/cases/storage_netfs.conf index e764813..f486ff4 100644 --- a/cases/storage_netfs.conf +++ b/cases/storage_netfs.conf @@ -24,6 +24,20 @@ storage:create_netfs_volume capacity $defaultvolumesize +storage:vol_clone +poolname +$defaultpoolname +volname +$defaultvolumename +clonevolname +$defaultvolclonename + +storage:delete_netfs_volume +poolname +$defaultpoolname +volname +$defaultvolclonename + storage:delete_netfs_volume poolname $defaultpoolname diff --git a/global.cfg b/global.cfg index 9e28614..182acbd 100644 --- a/global.cfg +++ b/global.cfg @@ -156,6 +156,8 @@ defaultpoolname = test_api_pool defaultpoolpath = /var/lib/libvirt/images/dir_pool # default volume name for creating new volume defaultvolumename = test_api_volume +# default clone volume name for clone a volume +defaultvolclonename = test_clone_volume # default volume type for creating a new volume defaultvolumetype = raw # default volume capacity for creating a new volume diff --git a/repos/storage/vol_clone.py b/repos/storage/vol_clone.py new file mode 100644 index 000..abf5644 --- /dev/null +++ b/repos/storage/vol_clone.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python +# volume clone testing + +import os +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod + +required_params = ('poolname', 'volname', 'clonevolname',) +optional_params = {} + +def prepare_clone_xml(xmlstr, volname): +prepare clone xmldesc by replace name element + with clone souce volume xml + +doc = minidom.parseString(xmlstr) +oldname = doc.getElementsByTagName(name)[0] + +newname = doc.createElement('name') +newnameval = doc.createTextNode(volname) +newname.appendChild(newnameval) + +volume = doc.getElementsByTagName('volume')[0] + +volume.replaceChild(newname, oldname) +newxmlstr = doc.toxml() + +return newxmlstr + +def vol_clone(params): +volume clone testing + +global logger +logger = params['logger'] +poolname = params['poolname'] +volname = params['volname'] +clonevolname = params['clonevolname'] + +logger.info(the poolname is %s, volname is %s % (poolname, volname)) +logger.info(the clone volume name is %s % clonevolname) + +conn = sharedmod.libvirtobj['conn'] +try: +poolobj = conn.storagePoolLookupByName(poolname) +old_vol = poolobj.storageVolLookupByName(volname) + +xmlstr = old_vol.XMLDesc(0) +newxmlstr = prepare_clone_xml(xmlstr, clonevolname) +logger.debug(volume xml:\n%s % newxmlstr) + +logger.info(clone volume %s from source volume %s % +(clonevolname, volname)) + +old_volnum = poolobj.numOfVolumes() + +new_vol = poolobj.createXMLFrom(newxmlstr, old_vol, 0) +poolobj.refresh(0) + +new_volnum = poolobj.numOfVolumes() + +logger.debug(new cloned volume path is: %s % new_vol.path()) +if os.access(new_vol.path(), os.R_OK): +logger.info(cloned volume path
[libvirt] [test-API][PATCH] Update activate pool case
* delete the unused function * fix column length and some other nits Signed-off-by: Wayne Sun g...@redhat.com --- repos/storage/activate_pool.py | 45 +-- 1 files changed, 20 insertions(+), 25 deletions(-) diff --git a/repos/storage/activate_pool.py b/repos/storage/activate_pool.py index 064f356..d3adef8 100644 --- a/repos/storage/activate_pool.py +++ b/repos/storage/activate_pool.py @@ -1,8 +1,5 @@ #!/usr/bin/env python -import os -import re -import sys import time import libvirt @@ -13,42 +10,40 @@ from src import sharedmod required_params = ('poolname',) optional_params = {} -def display_pool_info(stg, logger): -Display current storage pool information -logger.debug(current defined storage pool: %s % \ - stg.defstorage_pool_list()) -logger.debug(current active storage pool: %s % stg.storage_pool_list()) - def activate_pool(params): -Undefine a storage pool that's been defined and inactive +activate a storage pool that's been defined + and inactive + logger = params['logger'] poolname = params['poolname'] conn = sharedmod.libvirtobj['conn'] -pool_names = conn.listDefinedStoragePools() -pool_names += conn.listStoragePools() +try: +pool_names = conn.listDefinedStoragePools() +pool_names += conn.listStoragePools() -if poolname in pool_names: -poolobj = conn.storagePoolLookupByName(poolname) -else: -logger.error(%s not found\n % poolname); -return 1 +if poolname in pool_names: +poolobj = conn.storagePoolLookupByName(poolname) +else: +logger.error(%s not found\n % poolname); +return 1 -if poolobj.isActive(): -logger.error(%s is active already % poolname) -return 1 +if poolobj.isActive(): +logger.error(%s is active already % poolname) +return 1 -try: poolobj.create(0) time.sleep(5) if poolobj.isActive(): -logger.info(activating %s storage pool is SUCCESSFUL!!! % poolname) +logger.info(activating %s storage pool is SUCCESSFUL!!! % +poolname) else: -logger.info(activating %s storage pool is UNSUCCESSFUL!!! % poolname) +logger.info(activating %s storage pool is UNSUCCESSFUL!!! % +poolname) return 1 + except libvirtError, e: -logger.error(API error message: %s, error code is %s \ - % (e.message, e.get_error_code())) +logger.error(libvirt call failed: + str(e)) return 1 return 0 -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH] Add find storage pool sources cases
Add cases for testing findStoragePoolSources API * add 3 cases for storage type 'netfs', 'iscsi' and 'logical' * add 2 xmls for 'netfs' and 'iscsi' find 'logical' storage pool sources did not require xml start with source tag and xml could be empty * add test conf for find storage pool sources Signed-off-by: Wayne Sun g...@redhat.com --- cases/find_storage_pool_sources.conf | 31 +++ repos/storage/find_iscsi_pool_sources.py | 72 repos/storage/find_logical_pool_sources.py | 82 repos/storage/find_netfs_pool_sources.py | 71 repos/storage/xmls/iscsi_pool_source.xml |3 + repos/storage/xmls/netfs_pool_source.xml |4 ++ 6 files changed, 263 insertions(+), 0 deletions(-) create mode 100644 cases/find_storage_pool_sources.conf create mode 100644 repos/storage/find_iscsi_pool_sources.py create mode 100644 repos/storage/find_logical_pool_sources.py create mode 100644 repos/storage/find_netfs_pool_sources.py create mode 100644 repos/storage/xmls/iscsi_pool_source.xml create mode 100644 repos/storage/xmls/netfs_pool_source.xml diff --git a/cases/find_storage_pool_sources.conf b/cases/find_storage_pool_sources.conf new file mode 100644 index 000..d2e86db --- /dev/null +++ b/cases/find_storage_pool_sources.conf @@ -0,0 +1,31 @@ +storage:find_iscsi_pool_sources +sourcehost +$iscsi_server + +storage:find_netfs_pool_sources +sourcehost +$nfs_server + +storage:define_logical_pool +poolname +$defaultpoolname +sourcename +$defaultpoolname +sourcepath +$defaultpartition + +storage:build_logical_pool +poolname +$defaultpoolname + +storage:find_logical_pool_sources +sourcepath +$defaultpartition + +storage:delete_logical_pool +poolname +$defaultpoolname + +storage:undefine_pool +poolname +$defaultpoolname diff --git a/repos/storage/find_iscsi_pool_sources.py b/repos/storage/find_iscsi_pool_sources.py new file mode 100644 index 000..4b758d2 --- /dev/null +++ b/repos/storage/find_iscsi_pool_sources.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# Test finding storage pool source of 'iscsi' type + +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = ('sourcehost',) +optional_params = {'xml' : 'xmls/iscsi_pool_source.xml', + } + +def check_pool_sources(host, xmlstr): +check the iscsi sources with command: + iscsiadm --mode discovery --type sendtargets --portal + +source_val = [] + +doc = minidom.parseString(xmlstr) +for diskTag in doc.getElementsByTagName(source): +device_element = diskTag.getElementsByTagName(device)[0] +attr = device_element.getAttributeNode('path') +path_val = attr.nodeValue + +source_val.append(path_val) + +logger.debug(pool source info list is: %s % source_val) + +cmd = iscsiadm --mode discovery --type sendtargets --portal %s:3260,1 |\ + awk -F' ' '{print $2}' % host +ret, path_list = utils.exec_cmd(cmd, shell=True) + +logger.debug(iscsiadm command output list is: %s % path_list) + +if source_val == path_list: +logger.info(source list matched with iscsiadm command output) +return 0 +else: +logger.error(source list did not match with iscsiadm command output) +return 1 + +def find_iscsi_pool_sources(params): +Find iscsi type storage pool sources from xml +global logger +logger = params['logger'] +sourcehost = params['sourcehost'] +xmlstr = params['xml'] + +conn = sharedmod.libvirtobj['conn'] +try: + +logger.debug(storage source spec xml:\n%s % xmlstr) + +logger.info(find pool sources of iscsi type) +source_xml = conn.findStoragePoolSources('iscsi', xmlstr, 0) +logger.info(pool sources xml description is:\n %s % source_xml) + +ret = check_pool_sources(sourcehost, source_xml) +if ret: +logger.error(pool sources check failed) +return 1 +else: +logger.info(pool sources check succeed) + +except libvirtError, e: +logger.error(libvirt call failed: + str(e)) +return 1 + +return 0 diff --git a/repos/storage/find_logical_pool_sources.py b/repos/storage/find_logical_pool_sources.py new file mode 100644 index 000..255d879 --- /dev/null +++ b/repos/storage/find_logical_pool_sources.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +# Test finding storage pool source of 'logical' type + +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = ('sourcepath',) +optional_params = {'xml' : 'xmls/logical_pool.xml', + } + +def check_pool_sources(xmlstr): +check the logical sources
[libvirt] [test-API][PATCH] Add volume resize case with delta flag
Only storage backend for RBD (RADOS Block Device), FS and directory have the resizeVol function, so only testing dir volume here. Flags 'allocate' and 'shrik' are with bug: https://bugzilla.redhat.com/show_bug.cgi?id=804516 they are not supported yet, so leave the case for later. * using volume resize API with flag VIR_STORAGE_VOL_RESIZE_DELTA * using volume info API to get volume info and check * add dir volume resize conf Signed-off-by: Wayne Sun g...@redhat.com --- cases/storage_dir_vol_resize_delta.conf | 47 +++ repos/storage/vol_resize_delta.py | 75 +++ 2 files changed, 122 insertions(+), 0 deletions(-) create mode 100644 cases/storage_dir_vol_resize_delta.conf create mode 100644 repos/storage/vol_resize_delta.py diff --git a/cases/storage_dir_vol_resize_delta.conf b/cases/storage_dir_vol_resize_delta.conf new file mode 100644 index 000..58e15bf --- /dev/null +++ b/cases/storage_dir_vol_resize_delta.conf @@ -0,0 +1,47 @@ +storage:create_dir_pool +poolname +$defaultpoolname + +storage:create_dir_volume +poolname +$defaultpoolname +volname +$defaultvolumename +volformat +$defaultvolumetype +capacity +$defaultvolumesize + +storage:vol_resize_delta +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +1M + +storage:vol_resize_delta +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +2G + +storage:vol_resize_delta +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +4096K + +storage:delete_dir_volume +poolname +$defaultpoolname +volname +$defaultvolumename + +storage:destroy_pool +poolname +$defaultpoolname diff --git a/repos/storage/vol_resize_delta.py b/repos/storage/vol_resize_delta.py new file mode 100644 index 000..a87941e --- /dev/null +++ b/repos/storage/vol_resize_delta.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +# volume resize testing with delta flags, libvirt storage +# driver only support dir now + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = ('poolname', 'volname', 'capacity',) +optional_params = {} + +def vol_resize_delta(params): +test volume resize with delta flags + +global logger +logger = params['logger'] +poolname = params['poolname'] +volname = params['volname'] +capacity = params['capacity'] + +logger.info(the poolname is %s, volname is %s % +(poolname, volname)) + +logger.info(the capacity given is %s % capacity) +out = utils.get_capacity_suffix_size(capacity) +capacity_val = out['capacity_byte'] +logger.debug(the capacity to byte is %s % capacity_val) + +conn = sharedmod.libvirtobj['conn'] +try: +poolobj = conn.storagePoolLookupByName(poolname) +vol = poolobj.storageVolLookupByName(volname) + +logger.info(get volume info before resize) +out = vol.info() +pre_capacity = out[1] +pre_allocation = out[2] +logger.info(volume capacity is %s bytes, allocation is %s bytes % +(pre_capacity, pre_allocation)) + +flag = libvirt.VIR_STORAGE_VOL_RESIZE_DELTA +logger.info(resize %s with capacity %s in pool %s using flag: %s +% (volname, capacity, poolname, flag)) + +vol.resize(capacity_val, flag) + +logger.info(get volume info after resize) +out = vol.info() +post_capacity = out[1] +post_allocation = out[2] +logger.info(volume capacity is %s bytes, allocation is %s bytes % +(post_capacity, post_allocation)) + +logger.info(check resize effect) +if post_capacity - pre_capacity == capacity_val: +logger.info(increased size is expected) +else: +logger.error(increase size not equal to set, resize failed) +return 1 + +if pre_allocation == post_allocation: +logger.info(allocation is expected) +else: +logger.error(allocation changed, resize failed) +return 1 + +logger.info(resize succeed) + +except libvirtError, e: +logger.error(libvirt call failed: + str(e)) +return 1 + +return 0 -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH v2] Add volume upload and download cases
The cases only cover dir volume testing. v1: * test download storage volumes using storage download API. * test upload storage volumes using storage upload API. For upload case, only raw volume format is supported, other format will fail. The offset and length value should be chosen from 0 and 1048576, because upload size is set as 1M. * both case use blocking stream. * sample conf is added. v2: * move digest function to utils * rename cases with prefix 'dir_' to emphasise that they are only for dir vol testing Signed-off-by: Wayne Sun g...@redhat.com --- cases/storage_dir_vol_upload_download.conf | 127 ++ repos/storage/dir_vol_download.py | 131 +++ repos/storage/dir_vol_upload.py| 158 utils/utils.py | 24 4 files changed, 440 insertions(+), 0 deletions(-) create mode 100644 cases/storage_dir_vol_upload_download.conf create mode 100644 repos/storage/dir_vol_download.py create mode 100644 repos/storage/dir_vol_upload.py diff --git a/cases/storage_dir_vol_upload_download.conf b/cases/storage_dir_vol_upload_download.conf new file mode 100644 index 000..fd22720 --- /dev/null +++ b/cases/storage_dir_vol_upload_download.conf @@ -0,0 +1,127 @@ +storage:create_dir_pool +poolname +$defaultpoolname + +storage:dir_vol_upload +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +10M +volformat +raw +offset +0 +length +0 +clean + +storage:dir_vol_upload +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +10M +volformat +raw +offset +1048576 +length +0 +clean + +storage:dir_vol_upload +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +10M +volformat +raw +offset +0 +length +1048576 +clean + +storage:dir_vol_upload +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +10M +volformat +raw +offset +1048576 +length +1048576 +clean + +storage:dir_vol_download +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50M +volformat +raw +offset +0 +length +0 +clean + +storage:dir_vol_download +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50M +volformat +qcow2 +offset +1048576 +length +0 +clean + +storage:dir_vol_download +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50M +volformat +qed +offset +0 +length +1048576 +clean + +storage:dir_vol_download +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50M +volformat +raw +offset +1048576 +length +1048576 +clean + +storage:destroy_pool +poolname +$defaultpoolname diff --git a/repos/storage/dir_vol_download.py b/repos/storage/dir_vol_download.py new file mode 100644 index 000..ddf293b --- /dev/null +++ b/repos/storage/dir_vol_download.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python +# dir storage volume download testing + +import os +import string +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = ('poolname', 'volname', 'volformat', 'capacity', 'offset', + 'length',) +optional_params = {'xml' : 'xmls/dir_volume.xml', + } + +def get_pool_path(poolobj): + get pool xml description + +poolxml = poolobj.XMLDesc(0) + +logger.debug(the xml description of pool is %s % poolxml) + +doc = minidom.parseString(poolxml) +path_element = doc.getElementsByTagName('path')[0] +textnode = path_element.childNodes[0] +path_value = textnode.data + +return path_value + +def write_file(path, capacity): +write test data to file + +logger.info(write %s data into file %s % (capacity, path)) +out = utils.get_capacity_suffix_size(capacity) +f = open(path, 'w') +datastr = ''.join(string.lowercase + string.uppercase + + string.digits + '.' + '\n') +repeat = out['capacity_byte'] / 64 +data = ''.join(repeat * datastr) +f.write(data) +f.close() + +def handler(stream, data, file_): +return file_.write(data) + +def dir_vol_download(params): +test volume download and check + +global logger +logger = params['logger'] +poolname = params['poolname'] +volname = params['volname'] +volformat = params['volformat'] +offset = int(params['offset']) +length = int(params['length
[libvirt] [test-API][PATCH] Add dir volume wipe cases
* add dir volume wipe and wipe pattern cases * wipe case compare wiped volume with zero volume file with same capacity * wipe pattern cases support algorithms in: zero|nnsa|dod|bsi|gutmann|schneier|pfitzner7|pfitzner33|random, Besides zero, other algorithms are patterns supported by scrub, some algorithm might fail due to scrub version. * the check method in wipe pattern case for each algorithm is the same, only to make sure digest before and after wipe is different. Signed-off-by: Wayne Sun g...@redhat.com --- cases/storage_dir_vol_wipe.conf | 132 repos/storage/dir_vol_wipe.py | 136 + repos/storage/dir_vol_wipe_pattern.py | 123 + 3 files changed, 391 insertions(+), 0 deletions(-) create mode 100644 cases/storage_dir_vol_wipe.conf create mode 100644 repos/storage/dir_vol_wipe.py create mode 100644 repos/storage/dir_vol_wipe_pattern.py diff --git a/cases/storage_dir_vol_wipe.conf b/cases/storage_dir_vol_wipe.conf new file mode 100644 index 000..aa39415 --- /dev/null +++ b/cases/storage_dir_vol_wipe.conf @@ -0,0 +1,132 @@ +storage:create_dir_pool +poolname +$defaultpoolname + +storage:dir_vol_wipe +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +2M +volformat +raw +clean + +storage:dir_vol_wipe +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50M +volformat +qcow2 +clean + +storage:dir_vol_wipe +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +30M +volformat +qed +clean + +storage:dir_vol_wipe_pattern +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +2M +volformat +raw +algorithm +zero +clean + +storage:dir_vol_wipe_pattern +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50M +volformat +qcow2 +algorithm +nnsa +clean + +storage:dir_vol_wipe_pattern +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +30M +volformat +qed +algorithm +pfitzner7 +clean + +storage:dir_vol_wipe_pattern +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +1K +volformat +raw +algorithm +random +clean + +storage:dir_vol_wipe_pattern +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +1K +volformat +raw +algorithm +dod +clean + +storage:dir_vol_wipe_pattern +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +1K +volformat +raw +algorithm +bsi +clean + +storage:dir_vol_wipe_pattern +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +1K +volformat +raw +algorithm +gutmann +clean + + +storage:destroy_pool +poolname +$defaultpoolname diff --git a/repos/storage/dir_vol_wipe.py b/repos/storage/dir_vol_wipe.py new file mode 100644 index 000..c020b43 --- /dev/null +++ b/repos/storage/dir_vol_wipe.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python +# volume wipe testing + +import os +import string +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = ('poolname', 'volname', 'volformat', 'capacity',) +optional_params = {'xml' : 'xmls/dir_volume.xml', + } + +def get_pool_path(poolobj): + get pool xml description + +poolxml = poolobj.XMLDesc(0) + +logger.debug(the xml description of pool is %s % poolxml) + +doc = minidom.parseString(poolxml) +path_element = doc.getElementsByTagName('path')[0] +textnode = path_element.childNodes[0] +path_value = textnode.data + +return path_value + +def write_file(path, capacity): +write test data to file + +logger.info(write %s data into file %s % (capacity, path)) +out = utils.get_capacity_suffix_size(capacity) +f = open(path, 'w') +datastr = ''.join(string.lowercase + string.uppercase + + string.digits + '.' + '\n') +repeat = out['capacity_byte'] / 64 +data = ''.join(repeat * datastr) +f.write(data) +f.close() + +def dir_vol_wipe(params): +test volume download and check + +global logger +logger = params['logger'] +poolname = params['poolname'] +volname = params['volname'] +volformat = params['volformat'] +capacity = params['capacity'] +xmlstr = params['xml'] + +logger.info(the poolname is %s, volname is %s, volformat is %s % +(poolname, volname, volformat)) + +conn
[libvirt] [test-API][PATCH] Add logical volume download and upload cases
This is for logical volume download and upload testing. * using download and upload API under class virStream. they are functions act as same with download/upload APIs under class virStorageVol, just different entrance. * using logical volume specified xml to create volume. no need to provide volume format. * check method is the same with dir vol download/upload cases. Signed-off-by: Wayne Sun g...@redhat.com --- cases/storage_logical_vol_upload_download.conf | 179 repos/storage/logical_vol_download.py | 125 + repos/storage/logical_vol_upload.py| 152 3 files changed, 456 insertions(+), 0 deletions(-) create mode 100644 cases/storage_logical_vol_upload_download.conf create mode 100644 repos/storage/logical_vol_download.py create mode 100644 repos/storage/logical_vol_upload.py diff --git a/cases/storage_logical_vol_upload_download.conf b/cases/storage_logical_vol_upload_download.conf new file mode 100644 index 000..51b640e --- /dev/null +++ b/cases/storage_logical_vol_upload_download.conf @@ -0,0 +1,179 @@ +storage:define_logical_pool +poolname +$defaultpoolname +sourcename +$defaultpoolname +sourcepath +$defaultpartition + +storage:build_logical_pool +poolname +$defaultpoolname + +storage:activate_pool +poolname +$defaultpoolname + +storage:logical_vol_download +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50 +offset +0 +length +0 +clean + +storage:delete_logical_volume +poolname +$defaultpoolname +volname +$defaultvolumename + +storage:logical_vol_download +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50 +offset +0 +length +1048576 +clean + +storage:delete_logical_volume +poolname +$defaultpoolname +volname +$defaultvolumename + +storage:logical_vol_download +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50 +offset +1048576 +length +0 +clean + +storage:delete_logical_volume +poolname +$defaultpoolname +volname +$defaultvolumename + +storage:logical_vol_download +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50 +offset +1048576 +length +1048576 +clean + +storage:delete_logical_volume +poolname +$defaultpoolname +volname +$defaultvolumename + +storage:logical_vol_upload +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50 +offset +0 +length +0 +clean + +storage:delete_logical_volume +poolname +$defaultpoolname +volname +$defaultvolumename + +storage:logical_vol_upload +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50 +offset +0 +length +1048576 +clean + +storage:delete_logical_volume +poolname +$defaultpoolname +volname +$defaultvolumename + +storage:logical_vol_upload +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50 +offset +1048576 +length +0 +clean + +storage:delete_logical_volume +poolname +$defaultpoolname +volname +$defaultvolumename + +storage:logical_vol_upload +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50 +offset +1048576 +length +1048576 +clean + +storage:delete_logical_volume +poolname +$defaultpoolname +volname +$defaultvolumename + +storage:destroy_pool +poolname +$defaultpoolname + +storage:delete_logical_pool +poolname +$defaultpoolname + +storage:undefine_pool +poolname +$defaultpoolname diff --git a/repos/storage/logical_vol_download.py b/repos/storage/logical_vol_download.py new file mode 100644 index 000..9797f36 --- /dev/null +++ b/repos/storage/logical_vol_download.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python +# logical storage volume download testing + +import os +import string +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = ('poolname', 'volname', 'capacity', 'offset', 'length',) +optional_params = {'xml' : 'xmls/logical_volume.xml', + } + +def get_pool_path(poolobj): + get pool xml description + +poolxml = poolobj.XMLDesc(0) + +logger.debug(the xml description of pool is %s % poolxml) + +doc = minidom.parseString(poolxml) +path_element = doc.getElementsByTagName('path')[0] +textnode
[libvirt] [test-API][PATCH] Add volume upload and download cases
* test download storage volumes using storage download API. * test upload storage volumes using storage upload API. For upload case, only raw volume format is supported, other format will fail. The offset and length value should be chosen from 0 and 1048576, because upload size is set as 1M. * both case use blocking stream. * sample conf is added. Signed-off-by: Wayne Sun g...@redhat.com --- cases/storage_vol_upload_download.conf | 127 ++ repos/storage/vol_download.py | 157 +++ repos/storage/vol_upload.py| 183 3 files changed, 467 insertions(+), 0 deletions(-) create mode 100644 cases/storage_vol_upload_download.conf create mode 100644 repos/storage/vol_download.py create mode 100644 repos/storage/vol_upload.py diff --git a/cases/storage_vol_upload_download.conf b/cases/storage_vol_upload_download.conf new file mode 100644 index 000..b393814 --- /dev/null +++ b/cases/storage_vol_upload_download.conf @@ -0,0 +1,127 @@ +storage:create_dir_pool +poolname +$defaultpoolname + +storage:vol_upload +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +10M +volformat +raw +offset +0 +length +0 +clean + +storage:vol_upload +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +10M +volformat +raw +offset +1048576 +length +0 +clean + +storage:vol_upload +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +10M +volformat +raw +offset +0 +length +1048576 +clean + +storage:vol_upload +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +10M +volformat +raw +offset +1048576 +length +1048576 +clean + +storage:vol_download +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50M +volformat +raw +offset +0 +length +0 +clean + +storage:vol_download +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50M +volformat +qcow2 +offset +1048576 +length +0 +clean + +storage:vol_download +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50M +volformat +qed +offset +0 +length +1048576 +clean + +storage:vol_download +poolname +$defaultpoolname +volname +$defaultvolumename +capacity +50M +volformat +raw +offset +1048576 +length +1048576 +clean + +storage:destroy_pool +poolname +$defaultpoolname diff --git a/repos/storage/vol_download.py b/repos/storage/vol_download.py new file mode 100644 index 000..839bc8a --- /dev/null +++ b/repos/storage/vol_download.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python +# storage volume download testing + +import os +import string +import hashlib +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = ('poolname', 'volname', 'volformat', 'capacity', 'offset', + 'length',) +optional_params = {'xml' : 'xmls/dir_volume.xml', + } + +def get_pool_path(poolobj): + get pool xml description + +poolxml = poolobj.XMLDesc(0) + +logger.debug(the xml description of pool is %s % poolxml) + +doc = minidom.parseString(poolxml) +path_element = doc.getElementsByTagName('path')[0] +textnode = path_element.childNodes[0] +path_value = textnode.data + +return path_value + +def write_file(path, capacity): +write test data to file + +logger.info(write %s data into file %s % (capacity, path)) +out = utils.get_capacity_suffix_size(capacity) +f = open(path, 'w') +datastr = ''.join(string.lowercase + string.uppercase + + string.digits + '.' + '\n') +repeat = out['capacity_byte'] / 64 +data = ''.join(repeat * datastr) +f.write(data) +f.close() + +def digest(path, offset, length): +read data from file with length bytes, begin at offset + and return md5 hexdigest + +f = open(path, 'r') +f.seek(offset) +m = hashlib.md5() +done = 0 + +while True: +want = 1024 +if length and length - done want: +want = length - done +outstr = f.read(want) +got = len(outstr) +if got == 0: +break +done += got +m.update(outstr) + +logger.debug(total %s bytes data is readed % done) + +f.close() +return m.hexdigest() + +def handler(stream, data, file_): +return file_.write(data) + +def
[libvirt] [test-API][PATCH] set_vcpus_*: Add check point with vcpusFlags API
* add check with vcpusFlags API, it return current or max vcpu base on given flags Signed-off-by: Wayne Sun g...@redhat.com --- repos/setVcpus/set_vcpus_config.py | 28 repos/setVcpus/set_vcpus_live.py |8 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/repos/setVcpus/set_vcpus_config.py b/repos/setVcpus/set_vcpus_config.py index 289dad1..3bb3984 100644 --- a/repos/setVcpus/set_vcpus_config.py +++ b/repos/setVcpus/set_vcpus_config.py @@ -63,19 +63,39 @@ def set_vcpus_config(params): try: domobj = conn.lookupByName(guestname) if vcpu: +flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG logger.info(the given vcpu number is %s % vcpu) logger.info(set domain vcpu as %s with flag: %s % -(vcpu, libvirt.VIR_DOMAIN_AFFECT_CONFIG)) -domobj.setVcpusFlags(int(vcpu), libvirt.VIR_DOMAIN_AFFECT_CONFIG) +(vcpu, flags)) +domobj.setVcpusFlags(int(vcpu), flags) logger.info(set domain vcpu succeed) +logger.info(check with vcpusFlags api) +ret = domobj.vcpusFlags(flags) +logger.info(vcpusFlags return current vcpu is: %s % ret) +if ret == int(vcpu): +logger.info(vcpusFlags check succeed) +else: +logger.error(vcpusFlags check failed) +return 1 + if maxvcpu: +flags = libvirt.VIR_DOMAIN_VCPU_MAXIMUM logger.info(the given max vcpu number is %s % maxvcpu) logger.info(set domain maximum vcpu as %s with flag: %s % -(maxvcpu, libvirt.VIR_DOMAIN_VCPU_MAXIMUM)) -domobj.setVcpusFlags(int(maxvcpu), libvirt.VIR_DOMAIN_VCPU_MAXIMUM) +(maxvcpu, flags)) +domobj.setVcpusFlags(int(maxvcpu), flags) logger.info(set domain vcpu succeed) +logger.info(check with vcpusFlags api) +ret = domobj.vcpusFlags(flags) +logger.info(vcpusFlags return max vcpu is: %s % ret) +if ret == int(maxvcpu): +logger.info(vcpusFlags check succeed) +else: +logger.error(vcpusFlags check failed) +return 1 + except libvirtError, e: logger.error(libvirt call failed: + str(e)) return 1 diff --git a/repos/setVcpus/set_vcpus_live.py b/repos/setVcpus/set_vcpus_live.py index 35a2976..fdd8f8a 100644 --- a/repos/setVcpus/set_vcpus_live.py +++ b/repos/setVcpus/set_vcpus_live.py @@ -82,6 +82,14 @@ def set_vcpus_live(params): logger.info(set domain vcpu as %s with flag: %s % (vcpu, libvirt.VIR_DOMAIN_VCPU_LIVE)) domobj.setVcpusFlags(vcpu, libvirt.VIR_DOMAIN_VCPU_LIVE) +logger.info(check with vcpusFlags api) +ret = domobj.vcpusFlags(libvirt.VIR_DOMAIN_VCPU_LIVE) +logger.info(vcpusFlags return current vcpu is: %s % ret) +if ret == vcpu: +logger.info(vcpusFlags check succeed) +else: +logger.error(vcpusFlags check failed) +return 1 except libvirtError, e: logger.error(libvirt call failed: + str(e)) return 1 -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH v2] Add 2 vcpupin cases cover config and live flags
v1: add 2 vcpupin cases * use pinVcpuFlags to pin domain vcpu to host cpu * 2 cases cover config and live flags * cpulist with '^', '-' and ',' is supported to give multiple host cpus * vcpus and vcpuPinInfo are used as part of the checking * a sample conf is added v2: move format cpulist functions to utils * the format cpulist functions could be reused for cases need parse param with '-', '^' and ','. Signed-off-by: Wayne Sun g...@redhat.com --- cases/vcpupin.conf | 67 +++ repos/setVcpus/vcpupin_config.py | 109 ++ repos/setVcpus/vcpupin_live.py | 101 +++ utils/utils.py | 71 4 files changed, 348 insertions(+), 0 deletions(-) create mode 100644 cases/vcpupin.conf create mode 100644 repos/setVcpus/vcpupin_config.py create mode 100644 repos/setVcpus/vcpupin_live.py diff --git a/cases/vcpupin.conf b/cases/vcpupin.conf new file mode 100644 index 000..880247f --- /dev/null +++ b/cases/vcpupin.conf @@ -0,0 +1,67 @@ +domain:install_linux_cdrom +guestname +$defaultname +guestos +$defaultos +guestarch +$defaultarch +vcpu +4 +memory +$defaultmem +hddriver +$defaulthd +nicdriver +$defaultnic +imageformat +qcow2 + +setVcpus:vcpupin_live +guestname +$defaultname +vcpu +0 +cpulist +2,4-6,^4 + +setVcpus:vcpupin_live +guestname +$defaultname +vcpu +1 +cpulist +3 + +domain:destroy +guestname +$defaultname + +setVcpus:vcpupin_config +guestname +$defaultname +vcpu +2 +cpulist +0-8,^1 + +setVcpus:vcpupin_config +guestname +$defaultname +vcpu +3 +cpulist +^2,0-8 + +domain:start +guestname +$defaultname + +domain:destroy +guestname +$defaultname + +domain:undefine +guestname +$defaultname + +options cleanup=enable diff --git a/repos/setVcpus/vcpupin_config.py b/repos/setVcpus/vcpupin_config.py new file mode 100644 index 000..80df659 --- /dev/null +++ b/repos/setVcpus/vcpupin_config.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python +# Test domain vcpu pin with flag VIR_DOMAIN_AFFECT_CONFIG, check +# domain config xml with vcpupin configuration. + +import re +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = ('guestname', 'vcpu', 'cpulist',) +optional_params = {} + +def vcpupin_check(domobj, vcpu, cpumap): +check domain config xml with vcpupin element + +guestxml = domobj.XMLDesc(2) +logger.debug(domain %s xml :\n%s %(domobj.name(), guestxml)) + +doc = minidom.parseString(guestxml) +vcpupin = doc.getElementsByTagName('vcpupin') +if not vcpupin: +logger.error(no vcpupin element in domain xml) +return 1 + +for i in range(len(vcpupin)): +if vcpupin[i].hasAttribute('vcpu') and \ + vcpupin[i].hasAttribute('cpuset'): +vcpu_attr = vcpupin[i].getAttributeNode('vcpu') +cpu_attr = vcpupin[i].getAttributeNode('cpuset') +if int(vcpu_attr.nodeValue) == vcpu: +cpulist = cpu_attr.nodeValue +if cpulist == '': +cpumap_tmp = () +for i in range(maxcpu): +cpumap_tmp += (False,) +else: +cpumap_tmp = utils.param_to_tuple(cpulist, maxcpu) + +if cpumap_tmp == cpumap: +logger.info(cpuset is as expected in domain xml) +return 0 +else: +logger.error(cpuset is not as expected in domain xml) +return 1 + +if i == len(vcpupin) - 1: +logger.error(the vcpupin element with given vcpu is not found) +return 1 + +def vcpupin_config(params): +pin domain vcpu to host cpu with config flag + +global logger +logger = params['logger'] +params.pop('logger') +guestname = params['guestname'] +vcpu = int(params['vcpu']) +cpulist = params['cpulist'] + +logger.info(the name of virtual machine is %s % guestname) +logger.info(the given vcpu is %s % vcpu) +logger.info(the given cpulist is %s % cpulist) + +global maxcpu +maxcpu = utils.get_host_cpus() +logger.info(%s physical cpu on host % maxcpu) + +conn = sharedmod.libvirtobj['conn'] + +try: +domobj = conn.lookupByName(guestname) +cpumap = utils.param_to_tuple(cpulist, maxcpu) + +if not cpumap: +logger.error(cpulist: Invalid format) +return 1 + +logger.debug(cpumap for vcpu pin is:) +logger.debug(cpumap) + +logger.info(pin domain vcpu %s to host cpulist %s with flag: %s
[libvirt] [test-API][PATCH] Add two numa parameters cases
* cover live and config flags. * config case use numaParameters API to check and confirm with check domain config xml. * live update numa parameters is with problem now, setNumaParameters API is with bug 857312 on live domain. Also use numaParameters API to check after set, then check domain pid status with allowed memory list to confirm the nodeset. * The mode check in live case is marked as TODO for later. Signed-off-by: Wayne Sun g...@redhat.com --- cases/numa_param.conf | 51 + repos/numa/numa_param_config.py | 119 +++ repos/numa/numa_param_live.py | 104 ++ 3 files changed, 274 insertions(+), 0 deletions(-) create mode 100644 cases/numa_param.conf create mode 100644 repos/numa/__init__.py create mode 100644 repos/numa/numa_param_config.py create mode 100644 repos/numa/numa_param_live.py diff --git a/cases/numa_param.conf b/cases/numa_param.conf new file mode 100644 index 000..64268a3 --- /dev/null +++ b/cases/numa_param.conf @@ -0,0 +1,51 @@ +domain:install_linux_cdrom +guestname +$defaultname +guestos +$defaultos +guestarch +$defaultarch +vcpu +4 +memory +$defaultmem +hddriver +$defaulthd +nicdriver +$defaultnic +imageformat +qcow2 + +numa:numa_param_live +guestname +$defaultname +nodeset +0 +mode +0 + +domain:destroy +guestname +$defaultname + +numa:numa_param_config +guestname +$defaultname +nodeset +0-1 +mode +2 + +domain:start +guestname +$defaultname + +domain:destroy +guestname +$defaultname + +domain:undefine +guestname +$defaultname + +options cleanup=enable diff --git a/repos/numa/__init__.py b/repos/numa/__init__.py new file mode 100644 index 000..e69de29 diff --git a/repos/numa/numa_param_config.py b/repos/numa/numa_param_config.py new file mode 100644 index 000..52b21dd --- /dev/null +++ b/repos/numa/numa_param_config.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python +# Test set domain numa parameters with flag +# VIR_DOMAIN_AFFECT_CONFIG and check + +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = ('guestname', 'nodeset', 'mode') +optional_params = {} + +def check_numa_params(domobj, mode, node_tuple): +dump domain config xml description to check numa params + +guestxml = domobj.XMLDesc(2) +logger.debug(domain %s xml is :\n%s %(domobj.name(), guestxml)) +xml = minidom.parseString(guestxml) +numatune = xml.getElementsByTagName('numatune')[0] +mem_element = numatune.getElementsByTagName('memory')[0] + +if mem_element.hasAttribute('mode') and \ + mem_element.hasAttribute('nodeset'): +attr = mem_element.getAttributeNode('mode') +mode_val = attr.nodeValue +logger.info(memory mode in config xml is: %s % mode_val) +if mode_val == 'strict': +mode_num = 0 +elif mode_val == 'preferred': +mode_num = 1 +elif mode_val == 'interleave': +mode_num = 2 +else: +logger.error(mode value is invalid) +return 1 + +attr = mem_element.getAttributeNode('nodeset') +nodeset_val = attr.nodeValue +logger.info(nodeset in config xml is: %s % nodeset_val) +else: +logger.error(no 'mode' and 'nodeset' atrribute for element memory) +return 1 + +ret = utils.param_to_tuple(nodeset_val, node_num) +logger.debug(nudeset in config xml to tuple is:) +logger.debug(ret) +if not ret: +logger.error(fail to parse nodeset to tuple) +return 1 + +if mode_num == mode and ret == node_tuple: +return 0 +else: +return 1 + +def numa_param_config(params): +set domain numa parameters with config flag and check + +global logger +logger = params['logger'] +params.pop('logger') +guestname = params['guestname'] +nodeset = params['nodeset'] +mode = int(params['mode']) + +logger.info(the name of virtual machine is %s % guestname) +logger.info(the given node number is: %s % nodeset) +logger.info(the given mode is: %s % mode) + +global node_num +cmd = lscpu|grep 'NUMA node(s)' +ret, output = utils.exec_cmd(cmd, shell=True) +node_num = int(output[0].split(' ')[-1]) +node_tuple = utils.param_to_tuple(nodeset, node_num) +logger.debug(nodeset to tuple is:) +logger.debug(node_tuple) + +param = {'numa_nodeset': nodeset, 'numa_mode': mode} +logger.info(numa param dict for set is: %s % param) + +conn = sharedmod.libvirtobj['conn'] + +try: +domobj = conn.lookupByName(guestname) +logger.info(set domain numa parameters with flag: %s
[libvirt] [test-API][PATCH] Add 2 vcpupin cases cover config and live flags
* use pinVcpuFlags to pin domain vcpu to host cpu * 2 cases cover config and live flags * cpulist with '^', '-' and ',' is supported to give multiple host cpus * vcpus and vcpuPinInfo are used as part of the checking * a sample conf is added Signed-off-by: Wayne Sun g...@redhat.com --- cases/vcpupin.conf | 67 +++ repos/setVcpus/vcpupin_config.py | 174 ++ repos/setVcpus/vcpupin_live.py | 166 3 files changed, 407 insertions(+), 0 deletions(-) create mode 100644 cases/vcpupin.conf create mode 100644 repos/setVcpus/vcpupin_config.py create mode 100644 repos/setVcpus/vcpupin_live.py diff --git a/cases/vcpupin.conf b/cases/vcpupin.conf new file mode 100644 index 000..880247f --- /dev/null +++ b/cases/vcpupin.conf @@ -0,0 +1,67 @@ +domain:install_linux_cdrom +guestname +$defaultname +guestos +$defaultos +guestarch +$defaultarch +vcpu +4 +memory +$defaultmem +hddriver +$defaulthd +nicdriver +$defaultnic +imageformat +qcow2 + +setVcpus:vcpupin_live +guestname +$defaultname +vcpu +0 +cpulist +2,4-6,^4 + +setVcpus:vcpupin_live +guestname +$defaultname +vcpu +1 +cpulist +3 + +domain:destroy +guestname +$defaultname + +setVcpus:vcpupin_config +guestname +$defaultname +vcpu +2 +cpulist +0-8,^1 + +setVcpus:vcpupin_config +guestname +$defaultname +vcpu +3 +cpulist +^2,0-8 + +domain:start +guestname +$defaultname + +domain:destroy +guestname +$defaultname + +domain:undefine +guestname +$defaultname + +options cleanup=enable diff --git a/repos/setVcpus/vcpupin_config.py b/repos/setVcpus/vcpupin_config.py new file mode 100644 index 000..12d9598 --- /dev/null +++ b/repos/setVcpus/vcpupin_config.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python +# Test domain vcpu pin with flag VIR_DOMAIN_AFFECT_CONFIG, check +# domain config xml with vcpupin configuration. + +import re +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = ('guestname', 'vcpu', 'cpulist',) +optional_params = {} + +def vcpupin_check(domobj, vcpu, cpumap): +check domain config xml with vcpupin element + +guestxml = domobj.XMLDesc(2) +logger.debug(domain %s xml :\n%s %(domobj.name(), guestxml)) + +doc = minidom.parseString(guestxml) +vcpupin = doc.getElementsByTagName('vcpupin') +if not vcpupin: +logger.error(no vcpupin element in domain xml) +return 1 + +for i in range(len(vcpupin)): +if vcpupin[i].hasAttribute('vcpu') and \ + vcpupin[i].hasAttribute('cpuset'): +vcpu_attr = vcpupin[i].getAttributeNode('vcpu') +cpu_attr = vcpupin[i].getAttributeNode('cpuset') +if int(vcpu_attr.nodeValue) == vcpu: +cpulist = cpu_attr.nodeValue +if cpulist == '': +cpumap_tmp = () +for i in range(maxcpu): +cpumap_tmp += (False,) +else: +cpumap_tmp = get_cpumap(cpulist) + +if cpumap_tmp == cpumap: +logger.info(cpuset is as expected in domain xml) +return 0 +else: +logger.error(cpuset is not as expected in domain xml) +return 1 + +if i == len(vcpupin) - 1: +logger.error(the vcpupin element with given vcpu is not found) +return 1 + +def format_cpumap(cpulist, cpumap_test): + format cpumap base on cpulist + +cpumap = () + +try: +if re.match('\^', cpulist): +unuse = int(re.split('\^', cpulist)[1]) +for i in range(maxcpu): +if i == unuse: +cpumap += (False,) +else: +cpumap += (cpumap_test[i],) + +elif '-' in cpulist: +cpu_list = re.split('-', cpulist) +if not len(cpu_list) == 2: +return False +if not int(cpu_list[1]) maxcpu: +logger.error(cpulist: out of host cpu range) +return False +if int(cpu_list[1]) int(cpu_list[0]): +return False + +for i in range(maxcpu): +if i in range(int(cpu_list[0]), int(cpu_list[1])+1): +cpumap += (True,) +else: +cpumap += (cpumap_test[i],) + +else: +for i in range(maxcpu): +if i == int(cpulist): +cpumap += (True,) +else: +cpumap += (cpumap_test[i],) + +return cpumap
[libvirt] [test-API][PATCH v4] Add test case of set vcpus with flags
v2: break down the case to small cases with separate flags * Use setVcpusFlags API to set domain vcpus with flags * 3 cases added, each only deal with one set flag value as in config, live or maximum * cases are independent on domain states, API will report error if not suitable for certain states * the sample conf is only one scenario of hotplug domain vcpus v3: merge config and maximum case to config * maximum flag can only work when domain is shutoff, merge it to config case to simplify code v4: make both vcpu and maxvcpu as optional param in config case * depend on the given params to select flags and setting Signed-off-by: Wayne Sun g...@redhat.com --- cases/set_vcpus_flags.conf | 67 repos/setVcpus/set_vcpus_config.py | 99 repos/setVcpus/set_vcpus_live.py | 96 ++ 3 files changed, 262 insertions(+), 0 deletions(-) create mode 100644 cases/set_vcpus_flags.conf create mode 100644 repos/setVcpus/__init__.py create mode 100644 repos/setVcpus/set_vcpus_config.py create mode 100644 repos/setVcpus/set_vcpus_live.py diff --git a/cases/set_vcpus_flags.conf b/cases/set_vcpus_flags.conf new file mode 100644 index 000..6cf595f --- /dev/null +++ b/cases/set_vcpus_flags.conf @@ -0,0 +1,67 @@ +domain:install_linux_cdrom +guestname +$defaultname +guestos +$defaultos +guestarch +$defaultarch +vcpu +$defaultvcpu +memory +$defaultmem +hddriver +$defaulthd +nicdriver +$defaultnic +imageformat +qcow2 + +domain:destroy +guestname +$defaultname + +setVcpus:set_vcpus_config +guestname +$defaultname +vcpu +1 +maxvcpu +8 + +domain:start +guestname +$defaultname + +setVcpus:set_vcpus_live +guestname +$defaultname +vcpu +3 +username +$username +password +$password + +setVcpus:set_vcpus_config +guestname +$defaultname +vcpu +5 + +domain:destroy +guestname +$defaultname + +domain:start +guestname +$defaultname + +domain:destroy +guestname +$defaultname + +domain:undefine +guestname +$defaultname + +options cleanup=enable diff --git a/repos/setVcpus/__init__.py b/repos/setVcpus/__init__.py new file mode 100644 index 000..e69de29 diff --git a/repos/setVcpus/set_vcpus_config.py b/repos/setVcpus/set_vcpus_config.py new file mode 100644 index 000..289dad1 --- /dev/null +++ b/repos/setVcpus/set_vcpus_config.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python +# Test set domain vcpu with flag VIR_DOMAIN_AFFECT_CONFIG or +# VIR_DOMAIN_VCPU_MAXIMUM, depend on which optional param is +# given. + +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod + +required_params = ('guestname', ) +optional_params = {'vcpu': 1, + 'maxvcpu': 8, + } + +def get_vcpu_number(domobj): +dump domain config xml description to get vcpu number, return + current vcpu and maximum vcpu number + +try: +guestxml = domobj.XMLDesc(2) +logger.debug(domain %s xml is :\n%s %(domobj.name(), guestxml)) +xml = minidom.parseString(guestxml) +vcpu = xml.getElementsByTagName('vcpu')[0] +maxvcpu = int(vcpu.childNodes[0].data) +logger.info(domain max vcpu number is: %s % maxvcpu) + +if vcpu.hasAttribute('current'): +attr = vcpu.getAttributeNode('current') +current = int(attr.nodeValue) +else: +logger.info(no 'current' atrribute for element vcpu) +current = int(vcpu.childNodes[0].data) + +logger.info(domain current vcpu number is: %s % current) + +except libvirtError, e: +logger.error(libvirt call failed: + str(e)) +return False + +return current, maxvcpu + +def set_vcpus_config(params): +set domain vcpu with config flag and check, also set and check + max vcpu with maximum flag if optional param maxvcpu is given + +global logger +logger = params['logger'] +params.pop('logger') +guestname = params['guestname'] +vcpu = params.get('vcpu', None) +maxvcpu = params.get('maxvcpu', None) + +logger.info(the name of virtual machine is %s % guestname) +if vcpu == None and maxvcpu == None: +logger.error(at least one of vcpu or maxvcpu should be provided) +return 1 + +conn = sharedmod.libvirtobj['conn'] + +try: +domobj = conn.lookupByName(guestname) +if vcpu: +logger.info(the given vcpu number is %s % vcpu) +logger.info(set domain vcpu as %s with flag: %s % +(vcpu, libvirt.VIR_DOMAIN_AFFECT_CONFIG)) +domobj.setVcpusFlags(int(vcpu), libvirt.VIR_DOMAIN_AFFECT_CONFIG) +logger.info(set
[libvirt] [test-API][PATCH v3] Add test case of set vcpus with flags
v2: break down the case to small cases with separate flags * Use setVcpusFlags API to set domain vcpus with flags * 3 cases added, each only deal with one set flag value as in config, live or maximum * cases are independent on domain states, API will report error if not suitable for certain states * the sample conf is only one scenario of hotplug domain vcpus v3: merge config and maximum case to config * maximum flag can only work when domain is shutoff, merge it to config case to simplify code Signed-off-by: Wayne Sun g...@redhat.com --- cases/set_vcpus_flags.conf | 67 + repos/setVcpus/set_vcpus_config.py | 93 ++ repos/setVcpus/set_vcpus_live.py | 96 3 files changed, 256 insertions(+), 0 deletions(-) create mode 100644 cases/set_vcpus_flags.conf create mode 100644 repos/setVcpus/__init__.py create mode 100644 repos/setVcpus/set_vcpus_config.py create mode 100644 repos/setVcpus/set_vcpus_live.py diff --git a/cases/set_vcpus_flags.conf b/cases/set_vcpus_flags.conf new file mode 100644 index 000..6cf595f --- /dev/null +++ b/cases/set_vcpus_flags.conf @@ -0,0 +1,67 @@ +domain:install_linux_cdrom +guestname +$defaultname +guestos +$defaultos +guestarch +$defaultarch +vcpu +$defaultvcpu +memory +$defaultmem +hddriver +$defaulthd +nicdriver +$defaultnic +imageformat +qcow2 + +domain:destroy +guestname +$defaultname + +setVcpus:set_vcpus_config +guestname +$defaultname +vcpu +1 +maxvcpu +8 + +domain:start +guestname +$defaultname + +setVcpus:set_vcpus_live +guestname +$defaultname +vcpu +3 +username +$username +password +$password + +setVcpus:set_vcpus_config +guestname +$defaultname +vcpu +5 + +domain:destroy +guestname +$defaultname + +domain:start +guestname +$defaultname + +domain:destroy +guestname +$defaultname + +domain:undefine +guestname +$defaultname + +options cleanup=enable diff --git a/repos/setVcpus/__init__.py b/repos/setVcpus/__init__.py new file mode 100644 index 000..e69de29 diff --git a/repos/setVcpus/set_vcpus_config.py b/repos/setVcpus/set_vcpus_config.py new file mode 100644 index 000..08eb53f --- /dev/null +++ b/repos/setVcpus/set_vcpus_config.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python +# Test set domain vcpu with flag VIR_DOMAIN_AFFECT_CONFIG, also set +# and check max vcpu with flag VIR_DOMAIN_VCPU_MAXIMUM if maxvcpu +# param is given + +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod + +required_params = ('guestname', 'vcpu', ) +optional_params = {'maxvcpu': 8, + } + +def get_vcpu_number(domobj): +dump domain config xml description to get vcpu number, return + current vcpu and maximum vcpu number + +try: +guestxml = domobj.XMLDesc(2) +logger.debug(domain %s xml is :\n%s %(domobj.name(), guestxml)) +xml = minidom.parseString(guestxml) +vcpu = xml.getElementsByTagName('vcpu')[0] +maxvcpu = int(vcpu.childNodes[0].data) +logger.info(domain max vcpu number is: %s % maxvcpu) + +if vcpu.hasAttribute('current'): +attr = vcpu.getAttributeNode('current') +current = int(attr.nodeValue) +else: +logger.info(no 'current' atrribute for element vcpu) +current = int(vcpu.childNodes[0].data) + +logger.info(domain current vcpu number is: %s % current) + +except libvirtError, e: +logger.error(libvirt call failed: + str(e)) +return False + +return current, maxvcpu + +def set_vcpus_config(params): +set domain vcpu with config flag and check, also set and check + max vcpu with maximum flag if optional param maxvcpu is given + +global logger +logger = params['logger'] +params.pop('logger') +guestname = params['guestname'] +vcpu = int(params['vcpu']) +maxvcpu = params.get('maxvcpu', None) + +logger.info(the name of virtual machine is %s % guestname) +logger.info(the given vcpu number is %s % vcpu) + +conn = sharedmod.libvirtobj['conn'] + +try: +domobj = conn.lookupByName(guestname) +logger.info(set domain vcpu as %s with flag: %s % +(vcpu, libvirt.VIR_DOMAIN_AFFECT_CONFIG)) +domobj.setVcpusFlags(vcpu, libvirt.VIR_DOMAIN_AFFECT_CONFIG) +logger.info(set domain vcpu succeed) + +if maxvcpu: +logger.info(the given max vcpu number is %s % maxvcpu) +logger.info(set domain maximum vcpu as %s with flag: %s % +(maxvcpu, libvirt.VIR_DOMAIN_VCPU_MAXIMUM)) +domobj.setVcpusFlags(int(maxvcpu
[libvirt] [test-API][PATCH v2] Add test case of set vcpus with flags
v2: break down the case to small cases with separate flags * Use setVcpusFlags API to set domain vcpus with flags * 3 cases added, each only deal with one set flag value as in config, live or maximum * cases are independent on domain states, API will report error if not suitable for certain states * the sample conf is only one scenario of hotplug domain vcpus Signed-off-by: Wayne Sun g...@redhat.com --- cases/set_vcpus_flags.conf | 64 +++ repos/setVcpus/set_vcpus_config.py | 69 + repos/setVcpus/set_vcpus_live.py| 96 +++ repos/setVcpus/set_vcpus_maximum.py | 62 ++ 4 files changed, 291 insertions(+), 0 deletions(-) create mode 100644 cases/set_vcpus_flags.conf create mode 100644 repos/setVcpus/__init__.py create mode 100644 repos/setVcpus/set_vcpus_config.py create mode 100644 repos/setVcpus/set_vcpus_live.py create mode 100644 repos/setVcpus/set_vcpus_maximum.py diff --git a/cases/set_vcpus_flags.conf b/cases/set_vcpus_flags.conf new file mode 100644 index 000..d346735 --- /dev/null +++ b/cases/set_vcpus_flags.conf @@ -0,0 +1,64 @@ +domain:install_linux_cdrom +guestname +$defaultname +guestos +$defaultos +guestarch +$defaultarch +vcpu +$defaultvcpu +memory +$defaultmem +hddriver +$defaulthd +nicdriver +$defaultnic +imageformat +qcow2 + + +domain:destroy +guestname +$defaultname + +setVcpus:set_vcpus_maximum +guestname +$defaultname +vcpu +4 + +setVcpus:set_vcpus_config +guestname +$defaultname +vcpu +1 + +domain:start +guestname +$defaultname + +setVcpus:set_vcpus_live +guestname +$defaultname +vcpu +3 +username +$username +password +$password + +setVcpus:set_vcpus_config +guestname +$defaultname +vcpu +2 + +domain:destroy +guestname +$defaultname + +domain:undefine +guestname +$defaultname + +options cleanup=enable diff --git a/repos/setVcpus/__init__.py b/repos/setVcpus/__init__.py new file mode 100644 index 000..e69de29 diff --git a/repos/setVcpus/set_vcpus_config.py b/repos/setVcpus/set_vcpus_config.py new file mode 100644 index 000..2b8f5e7 --- /dev/null +++ b/repos/setVcpus/set_vcpus_config.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python +# Test set domain vcpu with flag VIR_DOMAIN_AFFECT_CONFIG. Check +# domain config xml to get 'current' vcpu number. + +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod + +required_params = ('guestname', 'vcpu', ) +optional_params = {} + +def get_current_vcpu(domobj): +dump domain config xml description to get current vcpu number + +try: +guestxml = domobj.XMLDesc(2) +logger.debug(domain %s xml is :\n%s %(domobj.name(), guestxml)) +xml = minidom.parseString(guestxml) +vcpu = xml.getElementsByTagName('vcpu')[0] + +if vcpu.hasAttribute('current'): +attr = vcpu.getAttributeNode('current') +current = int(attr.nodeValue) +else: +logger.info(no 'current' atrribute for element vcpu) +current = int(vcpu.childNodes[0].data) + +logger.info(domain current vcpu number is: %s % current) + +except libvirtError, e: +logger.error(libvirt call failed: + str(e)) +return False + +return current + +def set_vcpus_config(params): +set domain vcpu with config flag and check + +global logger +logger = params['logger'] +params.pop('logger') +guestname = params['guestname'] +vcpu = int(params['vcpu']) + +logger.info(the name of virtual machine is %s % guestname) +logger.info(the given vcpu number is %s % vcpu) + +conn = sharedmod.libvirtobj['conn'] + +try: +domobj = conn.lookupByName(guestname) +logger.info(set domain vcpu as %s with flag: %s % +(vcpu, libvirt.VIR_DOMAIN_AFFECT_CONFIG)) +domobj.setVcpusFlags(vcpu, libvirt.VIR_DOMAIN_AFFECT_CONFIG) +except libvirtError, e: +logger.error(libvirt call failed: + str(e)) +return 1 + +logger.info(check domain config xml to get current vcpu) +ret = get_current_vcpu(domobj) +if ret == vcpu: +logger.info(domain current vcpu is equal as set) +return 0 +else: +logger.error(domain current vcpu is not equal as set) +return 1 diff --git a/repos/setVcpus/set_vcpus_live.py b/repos/setVcpus/set_vcpus_live.py new file mode 100644 index 000..35a2976 --- /dev/null +++ b/repos/setVcpus/set_vcpus_live.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +# Test set domain vcpu with flag VIR_DOMAIN_VCPU_LIVE. Check +# domain xml and inside domain to get current vcpu number. The +# live flag only work on running domain
Re: [libvirt] [test-API][PATCH] balloon_memory: add time break before dump xml
On 11/27/2012 04:36 PM, Guannan Ren wrote: On 11/27/2012 03:02 PM, Wayne Sun wrote: the xml dumped after setMemory is not accurate due to time delay, so take 10 sec sleep before dump to show the right xml info Why it is necessary to sleep for a while, I don't think adding a sleep is a good way to solve problem. Please try to figure out the root cause. Guannan DB recently change the code of get domain memory balloon value as in patch: http://www.redhat.com/archives/libvir-list/2012-May/msg00871.html If QEMU supports the BALLOON_EVENT QMP event, then we can avoid invoking 'query-balloon' when returning XML or the domain info. And in qemu patch: http://lists.gnu.org/archive/html/qemu-devel/2012-05/msg02833.html DB also emphasis in description: It is important to note that events are only discarded when they are obsoleted by a newer event. So an application is guarenteed to see the final balloon event, with at worst a 1 second delay. So when after do memory change, for dump domain xml to get balloon value, it will be controlled in 1 second delay. When i check with QMP with query-events commands on qemu-kvm-rhev-0.12.1.2-2.337.el6.x86_64: { execute: query-events} {error: {class: CommandNotFound, desc: The command query-events has not been found, data: {name: query-events}}} and this also in my log, so my qemu did not support this BALLOON_EVENT and using query-balloon to get current memory, and indeed 'query-balloon' is spotted in my log. This back to # vim src/qemu/qemu_driver.c +5330 /* Don't delay if someone's using the monitor, just use * existing most recent data instead */ so most recent data will be returned with query-balloon, that means if we query too fast, the result will not be accurate. I think add little time sleep is reasonable here for now, but after qemu support BALLOON_EVENT, then the sleep will be no use. So, what do you think? Wayne Sun 2012-11-27 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH] balloon_memory: add time break before dump xml
the xml dumped after setMemory is not accurate due to time delay, so take 10 sec sleep before dump to show the right xml info Signed-off-by: Wayne Sun g...@redhat.com --- repos/domain/balloon_memory.py |2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/repos/domain/balloon_memory.py b/repos/domain/balloon_memory.py index 7051a0a..5bf023a 100644 --- a/repos/domain/balloon_memory.py +++ b/repos/domain/balloon_memory.py @@ -209,6 +209,7 @@ def balloon_memory(params): logger.debug(dump the xml description of guest virtual machine %s % domname) +time.sleep(10) dom_xml = domobj.XMLDesc(0) logger.debug(the xml definination is %s % dom_xml) @@ -240,6 +241,7 @@ def balloon_memory(params): logger.debug(dump the xml description of \ guest virtual machine %s % domname) +time.sleep(10) dom_xml = domobj.XMLDesc(0) logger.debug(the xml definination is %s % dom_xml) -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH] Add test case of set vcpus with flags
Use setVcpusFlags API to set domain vcpu with flags, domain could be active or not. Flags could be '0', 'live', 'config', 'maximum' and their combinations, use '|' between flags for combinations. A sample conf file also added. Signed-off-by: Wayne Sun g...@redhat.com --- cases/set_vcpus_flags.conf | 56 repos/domain/set_vcpus_flags.py | 283 +++ 2 files changed, 339 insertions(+), 0 deletions(-) create mode 100644 cases/set_vcpus_flags.conf create mode 100644 repos/domain/set_vcpus_flags.py diff --git a/cases/set_vcpus_flags.conf b/cases/set_vcpus_flags.conf new file mode 100644 index 000..7da13a2 --- /dev/null +++ b/cases/set_vcpus_flags.conf @@ -0,0 +1,56 @@ +domain:install_linux_cdrom +guestname +$defaultname +guestos +$defaultos +guestarch +$defaultarch +vcpu +$defaultvcpu +memory +$defaultmem +hddriver +$defaulthd +nicdriver +$defaultnic +imageformat +qcow2 + +domain:set_vcpus_flags +guestname +$defaultname +vcpu +4 +flags +0|config|live +username +$username +password +$password + +domain:destroy +guestname +$defaultname + +domain:set_vcpus_flags +guestname +$defaultname +vcpu +5 +flags +0|config + +domain:set_vcpus_flags +guestname +$defaultname +vcpu +6 +flags +maximum + +domain:undefine +guestname +$defaultname + +options cleanup=enable + diff --git a/repos/domain/set_vcpus_flags.py b/repos/domain/set_vcpus_flags.py new file mode 100644 index 000..ca614cc --- /dev/null +++ b/repos/domain/set_vcpus_flags.py @@ -0,0 +1,283 @@ +#!/usr/bin/env python +# Test set domain vcpu with flags. Flags could be 0, live, config, +# maximum and their combinations, use '|' for combinations. If +# domain is active, username and password should be provided, else +# not. + +import time +import commands +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = ('guestname', 'vcpu', 'flags', ) +optional_params = { + 'username': 'root', + 'password': '', + } + +def check_domain_running(conn, guestname): + check if the domain exists, may or may not be active +guest_names = [] +ids = conn.listDomainsID() +for id in ids: +obj = conn.lookupByID(id) +guest_names.append(obj.name()) + +if guestname not in guest_names: +logger.info(%s is not running % guestname) +return 1 +else: +return 0 + +def redefine_vcpu_number(domobj, guestname, vcpu): +dump domain xml description to change the vcpu number, + then, define the domain again + +guestxml = domobj.XMLDesc(0) +logger.debug('''original guest %s xml :\n%s''' %(guestname, guestxml)) + +doc = minidom.parseString(guestxml) + +newvcpu = doc.createElement('vcpu') +newvcpuval = doc.createTextNode(str(vcpu)) +newvcpu.appendChild(newvcpuval) +newvcpu.setAttribute('current', '1') + +domain = doc.getElementsByTagName('domain')[0] +oldvcpu = doc.getElementsByTagName('vcpu')[0] + +domain.replaceChild(newvcpu, oldvcpu) + +return doc.toxml() + +def check_current_vcpu(domobj, state, flag, username, password): +dump domain xml description to get current vcpu number and + check vcpu in domain if domain is active + +if len(flag) == 1 and flag[0] == '0': +if state: +flag.append('config') +else: +flag.append('live') + +if len(flag) == 1 and flag[0] == 'maximum': +if state: +flag.append('config') +else: +logger.error('maximum' on live domain is not supported') +return False + +if 'live' in flag: +if 'maximum' in flag: +logger.error('live' with 'maximum' is not supported') +return False + +guestxml = domobj.XMLDesc(1) +logger.debug(domain %s xml is :\n%s %(domobj.name(), guestxml)) + +xml = minidom.parseString(guestxml) +vcpu = xml.getElementsByTagName('vcpu')[0] +if vcpu.hasAttribute('current'): +attr = vcpu.getAttributeNode('current') +current_vcpu = int(attr.nodeValue) +else: +logger.info(no 'current' attribute in vcpu element) +return False + +if not state: +if password == '' and username == 'root': +logger.error(check will fail with empty root password) +return False + +logger.info(check cpu number in domain) +ip = utils.mac_to_ip(mac, 180) + +cmd = cat /proc/cpuinfo | grep processor | wc -l +ret, output = utils.remote_exec_pexpect(ip, username, password, cmd
[libvirt] [test-API][PATCH] Fix a problem in cpu_affinity
int() with base 16 will cause problem when cpu number bigger than 10, so change it as default with base 10. Signed-off-by: Wayne Sun g...@redhat.com --- repos/domain/cpu_affinity.py |2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/repos/domain/cpu_affinity.py b/repos/domain/cpu_affinity.py index ee585e6..e710968 100644 --- a/repos/domain/cpu_affinity.py +++ b/repos/domain/cpu_affinity.py @@ -151,7 +151,7 @@ def vcpu_affinity_check(domain_name, vcpu, expected_pinned_cpu, hypervisor): task_list = output.split('\n')[1:] vcpu_task = task_list[int(vcpu)] -actual_pinned_cpu = int(vcpu_task.split('\t')[1], 16) +actual_pinned_cpu = int(vcpu_task.split('\t')[1]) elif 'el5' in host_kernel_version: cmd_get_task_list = grep Cpus_allowed /proc/%s/task/*/status % pid status, output = commands.getstatusoutput(cmd_get_task_list) -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH] Add cpu hotplug test case
Test hotplug domain CPU, loop increase cpu to max then decrease to min. Now qemu did not support decrease plug of cpu, so decrease part will fail, leave it in the case to keep integrity. Signed-off-by: Wayne Sun g...@redhat.com --- cases/cpu_hotplug.conf | 38 +++ repos/domain/cpu_hotplug.py | 257 +++ 2 files changed, 295 insertions(+), 0 deletions(-) create mode 100644 cases/cpu_hotplug.conf create mode 100644 repos/domain/cpu_hotplug.py diff --git a/cases/cpu_hotplug.conf b/cases/cpu_hotplug.conf new file mode 100644 index 000..7ac0612 --- /dev/null +++ b/cases/cpu_hotplug.conf @@ -0,0 +1,38 @@ +domain:install_linux_cdrom +guestname +$defaultname +guestos +$defaultos +guestarch +$defaultarch +vcpu +$defaultvcpu +memory +$defaultmem +hddriver +$defaulthd +nicdriver +$defaultnic +macaddr +54:52:00:45:c3:8a + +domain:cpu_hotplug +guestname +$defaultname +vcpu +4 +username +$username +password +$password + +domain:destroy +guestname +$defaultname + +domain:undefine +guestname +$defaultname + +options cleanup=enable + diff --git a/repos/domain/cpu_hotplug.py b/repos/domain/cpu_hotplug.py new file mode 100644 index 000..3626db7 --- /dev/null +++ b/repos/domain/cpu_hotplug.py @@ -0,0 +1,257 @@ +#!/usr/bin/env python +# Test hotplug domain CPU, loop increase cpu to max then decrease +# to min + +import time +import commands +from xml.dom import minidom + +import libvirt +from libvirt import libvirtError + +from src import sharedmod +from utils import utils + +required_params = ('guestname', 'vcpu', 'username', 'password') +optional_params = {} + +def check_domain_running(conn, guestname): + check if the domain exists, may or may not be active +guest_names = [] +ids = conn.listDomainsID() +for id in ids: +obj = conn.lookupByID(id) +guest_names.append(obj.name()) + +if guestname not in guest_names: +logger.error(%s doesn't exist or not running % guestname) +return 1 +else: +return 0 + +def redefine_vcpu_number(domobj, guestname, vcpu): +dump domain xml description to change the vcpu number, + then, define the domain again + +guestxml = domobj.XMLDesc(0) +logger.debug('''original guest %s xml :\n%s''' %(guestname, guestxml)) + +doc = minidom.parseString(guestxml) + +newvcpu = doc.createElement('vcpu') +newvcpuval = doc.createTextNode(str(vcpu)) +newvcpu.appendChild(newvcpuval) +newvcpu.setAttribute('current', '1') + +domain = doc.getElementsByTagName('domain')[0] +oldvcpu = doc.getElementsByTagName('vcpu')[0] + +domain.replaceChild(newvcpu, oldvcpu) + +return doc.toxml() + +def check_current_vcpu(domobj, username, password): +dump domain xml description to get current vcpu number + +guestxml = domobj.XMLDesc(1) +logger.debug(domain %s xml is :\n%s %(domobj.name(), guestxml)) + +xml = minidom.parseString(guestxml) +vcpu = xml.getElementsByTagName('vcpu')[0] +if vcpu.hasAttribute('current'): +attr = vcpu.getAttributeNode('current') +current_vcpu = int(attr.nodeValue) +else: +logger.info(domain did not have 'current' attribute in vcpu element) +current_vcpu = int(vcpu.childNodes[0].data) + +logger.info(check cpu number in domain) +ip = utils.mac_to_ip(mac, 180) + +cmd = cat /proc/cpuinfo | grep processor | wc -l +ret, output = utils.remote_exec_pexpect(ip, username, password, cmd) +if not ret: +logger.info(cpu number in domain is %s % output) +if int(output) == current_vcpu: +logger.info(cpu number in domain is equal to current vcpu value) +return current_vcpu +else: +logger.error(current vcpu is not equal as check in domain) +return False +else: +logger.error(check in domain fail) +return False + + +def set_vcpus(domobj, guestname, vcpu, username, password): +set the value of virtual machine to vcpu offline , then boot up + the virtual machine + +timeout = 60 +logger.info('destroy domain') + +try: +domobj.destroy() +except libvirtError, e: +logger.error(API error message: %s, error code is %s \ +% (e.message, e.get_error_code())) +logger.error(fail to destroy domain) +return 1 + +newguestxml = redefine_vcpu_number(domobj, guestname, vcpu) +logger.debug('''new guest %s xml :\n%s''' %(guestname, newguestxml)) + +logger.info(undefine the original guest) +try: +domobj.undefine() +except libvirtError, e: +logger.error(API error message: %s, error code is %s \ + % (e.message, e.get_error_code())) +logger.error(fail to undefine guest % % guestname
[libvirt] [test-API][PATCH v2] utils: Update remote_exec_pexpect function and sync cases
After delete duplicate remote_exec_pexect function, the left function with same name causes some problem with cases, so update the function and sync all cases using it. Signed-off-by: Wayne Sun g...@redhat.com --- repos/domain/cpu_topology.py |2 +- repos/snapshot/file_flag.py |8 repos/snapshot/flag_check.py | 12 ++-- utils/utils.py | 28 ++-- 4 files changed, 25 insertions(+), 25 deletions(-) diff --git a/repos/domain/cpu_topology.py b/repos/domain/cpu_topology.py index 5f4ef52..cb071c9 100644 --- a/repos/domain/cpu_topology.py +++ b/repos/domain/cpu_topology.py @@ -136,7 +136,7 @@ def cpu_topology_chk(ip, username, password, int = 0 actual_thread = actual_core = actual_socket = '' -for item in output.strip().split('\r'): +for item in output.split('\r'): if int == 5: actual_thread = item.split()[-1] logger.info(the actual thread in the guest is %s % actual_thread) diff --git a/repos/snapshot/file_flag.py b/repos/snapshot/file_flag.py index e18975e..001f13c 100644 --- a/repos/snapshot/file_flag.py +++ b/repos/snapshot/file_flag.py @@ -34,12 +34,12 @@ def check_domain_running(conn, guestname, logger): def make_flag(ipaddr, username, password, logger): enter guest OS, create a file in /tmp folder -ret = utils.remote_exec_pexpect(ipaddr, username, password, MAKE_FLAG) -if ret == TIMEOUT!!!: +ret, out = utils.remote_exec_pexpect(ipaddr, username, password, MAKE_FLAG) +if ret: logger.error(connecting to guest OS timeout) return False -elif ret != '': -logger.error(failed to make flag in guest OS, %s % ret) +elif out != '': +logger.error(failed to make flag in guest OS, %s % out) return False else: logger.info(flag %s is created in /tmp folder % FLAG_FILE) diff --git a/repos/snapshot/flag_check.py b/repos/snapshot/flag_check.py index 41314d8..f0ddecf 100644 --- a/repos/snapshot/flag_check.py +++ b/repos/snapshot/flag_check.py @@ -68,20 +68,20 @@ def flag_check(params): logger.info(vm %s failed to get ip address % guestname) return 1 -ret = utils.remote_exec_pexpect(ipaddr, username, password, FLAG_CHECK) -if ret == TIMEOUT!!!: +ret, out = utils.remote_exec_pexpect(ipaddr, username, password, FLAG_CHECK) +if ret: logger.error(connecting to guest OS timeout) return 1 -elif ret == FLAG_FILE and expected_result == exist: +elif out == FLAG_FILE and expected_result == exist: logger.info(checking flag %s in guest OS succeeded % FLAG_FILE) return 0 -elif ret == FLAG_FILE and expected_result == 'noexist': +elif out == FLAG_FILE and expected_result == 'noexist': logger.error(flag %s still exist, FAILED. % FLAG_FILE) return 1 -elif ret != None and expected_result == exist: +elif out != None and expected_result == exist: logger.error(no flag %s exists in the guest %s % (FLAG_FILE,guestname)) return 1 -elif ret != None and expected_result == 'noexist': +elif out != None and expected_result == 'noexist': logger.info(flag %s is not present, checking succeeded % FLAG_FILE) return 0 diff --git a/utils/utils.py b/utils/utils.py index 27ddbc2..e242847 100644 --- a/utils/utils.py +++ b/utils/utils.py @@ -427,10 +427,10 @@ def remote_exec_pexpect(hostname, username, password, cmd): child.sendline(password) elif index == 2: child.close() -return 0, child.before +return 0, string.strip(child.before) elif index == 3: child.close() -return 1, +return 1, Timeout return 0 @@ -531,8 +531,8 @@ def get_remote_memory(hostname, username, password): i = 0 while i 3: i += 1 -memsize = \ -int(remote_exec_pexpect(hostname, username, password, cmd)) * 1024 +ret, out = remote_exec_pexpect(hostname, username, password, cmd) +memsize = int(out) * 1024 if memsize == -1: continue else: @@ -595,10 +595,10 @@ def libvirt_version(latest_ver = ''): def create_dir(hostname, username, password): Create new dir cmd = mkdir /tmp/test -mkdir_ret = remote_exec_pexpect(hostname, username, password, cmd) +ret, mkdir_ret = remote_exec_pexpect(hostname, username, password, cmd) if mkdir_ret == '': cmd = ls -d /tmp/test -check_str = remote_exec_pexpect(hostname, username, +ret, check_str = remote_exec_pexpect(hostname, username, password, cmd) if check_str == /tmp/test: return 0 @@ -613,11 +613,11 @@ def write_file(hostname, username, password): Simple test for writting file on specified host test_string = 'hello word testing' cmd = echo '%s'/tmp/test/test.log
[libvirt] [test-API][PATCH] Fix a typo in commit b111531
Signed-off-by: Wayne Sun g...@redhat.com --- src/env_inspect.py |2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/src/env_inspect.py b/src/env_inspect.py index cf036f7..222ffb1 100644 --- a/src/env_inspect.py +++ b/src/env_inspect.py @@ -77,7 +77,7 @@ def sharemod_init(env_parser, logger): username = env_parser.get_value('variables', 'username') password = env_parser.get_value('variables', 'password') conn = utils.get_conn(uri, username, password) -if not conn +if not conn: return 1 # initialize conn object in sharedmod -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH] Fix problem in create netfs pool and logical volume
create_netfs_pool: In rhel7 the source nfs mount dir shows in /proc/mounts will end with a '/' which in rhel6 and rhel5 didn't. So the check with exact match will fail on rhel7. Change with '/*' will accept with both '/' exist or not. create_logical_volume: When grep lvcreate command in /etc/lvm/backup/logical_pool for create logical volume, rhel6 and rhel7 command is slightly differnet with rhel5, so make the change to fit for all version. Signed-off-by: Wayne Sun g...@redhat.com --- repos/storage/create_logical_volume.py |6 +++--- repos/storage/create_netfs_pool.py |2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/repos/storage/create_logical_volume.py b/repos/storage/create_logical_volume.py index 098c148..9fd1305 100644 --- a/repos/storage/create_logical_volume.py +++ b/repos/storage/create_logical_volume.py @@ -54,9 +54,9 @@ def check_volume_create(poolobj, poolname, volname, size): logger.debug(%s file path: %s % (poolname, path)) if os.access(path, os.R_OK): logger.debug(execute grep lvcreate %s command % path) -stat, ret = commands.getstatusoutput(grep \ -'lvcreate --name %s -L %sK /dev/%s' %s\ - % (volname, size, poolname, path)) +stat, ret = commands.getstatusoutput( +grep 'lvcreate --name %s -L %sK .*%s' %s +% (volname, size, poolname, path)) if stat == 0 and volname in poolobj.listVolumes(): logger.debug(ret) return True diff --git a/repos/storage/create_netfs_pool.py b/repos/storage/create_netfs_pool.py index 9fb7b69..4603788 100644 --- a/repos/storage/create_netfs_pool.py +++ b/repos/storage/create_netfs_pool.py @@ -44,7 +44,7 @@ def check_pool_create_OS(conn, poolname, logger): (src_host, src_path, dest_path) ) fd = open(/proc/mounts,r) mount = src_host + : + src_path -pat = mount + \s+ + dest_path +pat = mount + /*\s+ + dest_path found = 0 for line in fd: if re.match(pat, line): -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH] utils: Update remote_exec_pexpect function and sync cases
After delete duplicate remote_exec_pexect function, the left function with same name causes some problem with cases, so update the function and sync all cases using it. Signed-off-by: Wayne Sun g...@redhat.com --- repos/domain/cpu_topology.py|8 +++--- repos/domain/define.py |7 +++-- repos/remoteAccess/tcp_setup.py | 36 +- repos/remoteAccess/tls_setup.py | 41 --- utils/utils.py | 19 - 5 files changed, 56 insertions(+), 55 deletions(-) diff --git a/repos/domain/cpu_topology.py b/repos/domain/cpu_topology.py index 5f4ef52..14fe67d 100644 --- a/repos/domain/cpu_topology.py +++ b/repos/domain/cpu_topology.py @@ -126,17 +126,17 @@ def cpu_topology_chk(ip, username, password, lscpu = lscpu # sleep for 5 seconds time.sleep(40) -ret, output = utils.remote_exec_pexpect(ip, username, password, lscpu) +ret = utils.remote_exec_pexpect(ip, username, password, lscpu) logger.debug(lscpu:) -logger.debug(output) -if ret: +logger.debug(ret) +if ret == TIMEOUT!!!: logger.error(failed to run lscpu on guest OS) return 1 int = 0 actual_thread = actual_core = actual_socket = '' -for item in output.strip().split('\r'): +for item in ret.split('\r'): if int == 5: actual_thread = item.split()[-1] logger.info(the actual thread in the guest is %s % actual_thread) diff --git a/repos/domain/define.py b/repos/domain/define.py index a70ddad..2b127a7 100644 --- a/repos/domain/define.py +++ b/repos/domain/define.py @@ -44,9 +44,10 @@ def check_define_domain(guestname, virt_type, hostname, username, \ if hostname: cmd = ls %s % path -ret, output = utils.remote_exec_pexpect(hostname, username, \ - password, cmd) -if ret: +ret = utils.remote_exec_pexpect(hostname, username, password, cmd) +cmd = grep 'No such file' %s % ret +ret = utils.exec_cmd(cmd) +if ret == 0: logger.error(guest %s xml file doesn't exsits % guestname) return False else: diff --git a/repos/remoteAccess/tcp_setup.py b/repos/remoteAccess/tcp_setup.py index ddbe333..7a963e7 100644 --- a/repos/remoteAccess/tcp_setup.py +++ b/repos/remoteAccess/tcp_setup.py @@ -26,9 +26,9 @@ def sasl_user_add(target_machine, username, password, logger): execute saslpasswd2 to add sasl user logger.info(add sasl user on server side) saslpasswd2_add = echo %s | %s -a libvirt %s % (password, SASLPASSWD2, username) -ret, output = utils.remote_exec_pexpect(target_machine, username, +ret = utils.remote_exec_pexpect(target_machine, username, password, saslpasswd2_add) -if ret: +if ret == TIMEOUT!!!: logger.error(failed to add sasl user) return 1 @@ -40,18 +40,18 @@ def tcp_libvirtd_set(target_machine, username, password, logger.info(setting libvirtd.conf on libvirt server) # open libvirtd --listen option listen_open_cmd = echo 'LIBVIRTD_ARGS=\--listen\' %s % SYSCONFIG_LIBVIRTD -ret, output = utils.remote_exec_pexpect(target_machine, username, +ret = utils.remote_exec_pexpect(target_machine, username, password, listen_open_cmd) -if ret: +if ret == TIMEOUT!!!: logger.error(failed to uncomment --listen in %s % SYSCONFIG_LIBVIRTD) return 1 # set listen_tls logger.info(set listen_tls to 0 in %s % LIBVIRTD_CONF) listen_tls_disable = echo \listen_tls = 0\ %s % LIBVIRTD_CONF -ret, output = utils.remote_exec_pexpect(target_machine, username, +ret = utils.remote_exec_pexpect(target_machine, username, password, listen_tls_disable) -if ret: +if ret == TIMEOUT!!!: logger.error(failed to set listen_tls to 0 in %s % LIBVIRTD_CONF) return 1 @@ -59,27 +59,27 @@ def tcp_libvirtd_set(target_machine, username, password, if listen_tcp == 'enable': logger.info(enable listen_tcp = 1 in %s % LIBVIRTD_CONF) listen_tcp_set = echo 'listen_tcp = 1' %s % LIBVIRTD_CONF -ret, output = utils.remote_exec_pexpect(target_machine, username, +ret = utils.remote_exec_pexpect(target_machine, username, password, listen_tcp_set) -if ret: +if ret == TIMEOUT!!!: logger.error(failed to set listen_tcp in %s % LIBVIRTD_CONF) return 1 # set auth_tcp logger.info(set auth_tcp to \%s\ in %s % (auth_tcp, LIBVIRTD_CONF)) auth_tcp_set = echo 'auth_tcp = \%s\' %s % (auth_tcp, LIBVIRTD_CONF) -ret, output = utils.remote_exec_pexpect(target_machine, username, +ret = utils.remote_exec_pexpect(target_machine, username
[libvirt] [test-API][PATCH 2/3] Modify function name to avoid key word 'check' in framework
Modfiy the function name to avoid generating check case and doing params check before the case run. Signed-off-by: Wayne Sun g...@redhat.com --- repos/domain/cpu_topology.py |4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/repos/domain/cpu_topology.py b/repos/domain/cpu_topology.py index f0f081c..5f4ef52 100644 --- a/repos/domain/cpu_topology.py +++ b/repos/domain/cpu_topology.py @@ -120,7 +120,7 @@ def guest_start(domobj, guestname, logger): return 0, ip -def cpu_topology_check(ip, username, password, +def cpu_topology_chk(ip, username, password, sockets, cores, threads, logger): login the guest, run lscpu command to check the result lscpu = lscpu @@ -194,7 +194,7 @@ def cpu_topology(params): if ret: return 1 -if cpu_topology_check(ip, username, password, +if cpu_topology_chk(ip, username, password, sockets, cores, threads, logger): return 1 -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 3/3] Target path should not with lines in pool xml
The xml for define and create pool is with line switch in target path. For aa.xml: pool type=netfs namenetfs_pool/name source host name=192.168.0.121/ dir path=/dir/ format type=nfs/ /source target path /tmp/netfs /path /target /pool virsh pool-create aa.xml error: Failed to create pool from aa.xml error: cannot open path ' /tmp/netfs ': No such file or directory Signed-off-by: Wayne Sun g...@redhat.com --- repos/storage/xmls/netfs_pool.xml |4 +--- 1 files changed, 1 insertions(+), 3 deletions(-) diff --git a/repos/storage/xmls/netfs_pool.xml b/repos/storage/xmls/netfs_pool.xml index 309a652..d8b88c2 100644 --- a/repos/storage/xmls/netfs_pool.xml +++ b/repos/storage/xmls/netfs_pool.xml @@ -6,8 +6,6 @@ format type=nfs/ /source target -path - TARGETPATH -/path +pathTARGETPATH/path /target /pool -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 1/3] Delete the duplicate function
Signed-off-by: Wayne Sun g...@redhat.com --- utils/utils.py | 18 -- 1 files changed, 0 insertions(+), 18 deletions(-) diff --git a/utils/utils.py b/utils/utils.py index eade10d..b174a58 100644 --- a/utils/utils.py +++ b/utils/utils.py @@ -514,24 +514,6 @@ def remote_exec(hostname, username, password, cmd): subproc_flag = 0 return -1 -def remote_exec_pexpect(hostname, username, password, cmd): -Remote exec function via pexpect -user_hostname = %s@%s % (username, hostname) -child = pexpect.spawn(/usr/bin/ssh, [user_hostname, cmd], - timeout = 60, maxread = 2000, logfile = None) -#child.logfile = sys.stdout -while True: -index = child.expect(['(yes\/no)', 'password:', pexpect.EOF, - pexpect.TIMEOUT]) -if index == 0: -child.sendline(yes) -elif index == 1: -child.sendline(password) -elif index == 2: -return string.strip(child.before) -elif index == 3: -return TIMEOUT!!! - def get_remote_vcpus(hostname, username, password): Get cpu number of specified host cmd = cat /proc/cpuinfo | grep processor | wc -l -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 1/2] New get_conn function in utils
The get_conn function return connection object from libvirt module. This function could be used by both framework and testcases. The patch includes: * get_conn in utils/utils.py * sync env_inspect.py using the new function Signed-off-by: Wayne Sun g...@redhat.com --- src/env_inspect.py | 22 ++ utils/utils.py | 27 +++ 2 files changed, 29 insertions(+), 20 deletions(-) diff --git a/src/env_inspect.py b/src/env_inspect.py index b260ff8..2c1a701 100644 --- a/src/env_inspect.py +++ b/src/env_inspect.py @@ -20,6 +20,7 @@ import commands import libvirt import sharedmod +from utils import utils def check_libvirt(logger): virsh = 'virsh -v' @@ -68,20 +69,6 @@ def hostinfo(logger): return 1 return 0 -def request_credentials(credentials, user_data): -for credential in credentials: -if credential[0] == libvirt.VIR_CRED_AUTHNAME: -credential[4] = user_data[0] - -if len(credential[4]) == 0: -credential[4] = credential[3] -elif credential[0] == libvirt.VIR_CRED_PASSPHRASE: -credential[4] = user_data[1] -else: -return -1 - -return 0 - def sharemod_init(env_parser, logger): get connection object from libvirt module initialize sharemod for use by testcases @@ -89,12 +76,7 @@ def sharemod_init(env_parser, logger): uri = env_parser.get_value('variables', 'defaulturi') username = env_parser.get_value('variables', 'username') password = env_parser.get_value('variables', 'password') -user_data = [username, password] -auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE], request_credentials, user_data] -conn = libvirt.openAuth(uri, auth, 0) -if not conn: -logger.error(Failed to setup libvirt connection); -return 1 +conn = utils.get_conn(uri, username, password) # initialize conn object in sharedmod sharedmod.libvirtobj.clear() diff --git a/utils/utils.py b/utils/utils.py index be87cdc..eade10d 100644 --- a/utils/utils.py +++ b/utils/utils.py @@ -29,6 +29,7 @@ import struct import pexpect import string import subprocess +import libvirt from xml.dom import minidom from urlparse import urlparse @@ -57,6 +58,32 @@ def get_uri(ip): uri = qemu+ssh://%s/system % ip return uri +def request_credentials(credentials, user_data): +for credential in credentials: +if credential[0] == libvirt.VIR_CRED_AUTHNAME: +credential[4] = user_data[0] + +if len(credential[4]) == 0: +credential[4] = credential[3] +elif credential[0] == libvirt.VIR_CRED_PASSPHRASE: +credential[4] = user_data[1] +else: +return -1 + +return 0 + +def get_conn(uri='', username='', password=''): + get connection object from libvirt module + +user_data = [username, password] +auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE], request_credentials, user_data] +conn = libvirt.openAuth(uri, auth, 0) +if not conn: +logger.error(Failed to setup libvirt connection); +sys.exit(1) +else: +return conn + def parse_uri(uri): # This is a simple parser for uri return urlparse(uri) -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 2/2] Update cases with libvirtd restart problem
Restart libvirtd during test will break conn from framework. Update cases with: * Add notes in case description * Using get_conn to reconnect in cases Signed-off-by: Wayne Sun g...@redhat.com --- repos/domain/ownership_test.py |6 -- repos/libvirtd/qemu_hang.py |1 - repos/libvirtd/restart.py |3 +++ repos/libvirtd/upstart.py |3 +++ repos/sVirt/domain_nfs_start.py | 11 --- 5 files changed, 18 insertions(+), 6 deletions(-) diff --git a/repos/domain/ownership_test.py b/repos/domain/ownership_test.py index b479708..33b57e2 100644 --- a/repos/domain/ownership_test.py +++ b/repos/domain/ownership_test.py @@ -3,6 +3,9 @@ # check the ownership of saved domain file. Test could # be on local or root_squash nfs. The default owner of # the saved domain file is qemu:qemu in this case. +# +# NOTES: Libvirtd will be restarted during test, better run this +# case alone. import os import re @@ -11,7 +14,6 @@ import sys import libvirt from libvirt import libvirtError -from src import sharedmod from utils import utils required_params = ('guestname', 'dynamic_ownership', 'use_nfs',) @@ -180,7 +182,7 @@ def ownership_test(params): logger.error(failed to prepare the environment) return 1 -conn = sharedmod.libvirtobj['conn'] +conn = utils.get_conn() # save domain to the file logger.info(save domain %s to the file %s % (guestname, SAVE_FILE)) diff --git a/repos/libvirtd/qemu_hang.py b/repos/libvirtd/qemu_hang.py index 7a58f50..9127ed6 100644 --- a/repos/libvirtd/qemu_hang.py +++ b/repos/libvirtd/qemu_hang.py @@ -17,7 +17,6 @@ required_params = ('guestname',) optional_params = {} VIRSH_LIST = virsh list --all -RESTART_CMD = service libvirtd restart def check_domain_running(conn, guestname, logger): check if the domain exists, may or may not be active diff --git a/repos/libvirtd/restart.py b/repos/libvirtd/restart.py index 803fa2e..e66f30a 100644 --- a/repos/libvirtd/restart.py +++ b/repos/libvirtd/restart.py @@ -2,6 +2,9 @@ # Restart libvirtd testing. A running guest is required in # this test. During libvirtd restart, the guest remains # running and not affected by libvirtd restart. +# +# NOTES: Libvirtd will be restarted during test, better run this +# case alone. import os import re diff --git a/repos/libvirtd/upstart.py b/repos/libvirtd/upstart.py index 13cb349..c57ba1c 100644 --- a/repos/libvirtd/upstart.py +++ b/repos/libvirtd/upstart.py @@ -1,5 +1,8 @@ #!/usr/bin/env python # Upstart libvirtd testing +# +# NOTES: Libvirtd will be restarted during test, better run this +# case alone. import os import re diff --git a/repos/sVirt/domain_nfs_start.py b/repos/sVirt/domain_nfs_start.py index 88d349c..5ce9a7a 100644 --- a/repos/sVirt/domain_nfs_start.py +++ b/repos/sVirt/domain_nfs_start.py @@ -5,6 +5,9 @@ # check whether the guest can be started or not. The nfs could # be root_squash or no_root_squash. SElinux should be enabled # and enforcing on host. +# +# NOTES: Libvirtd will be restarted during test, better run this +# case alone. import os import re @@ -12,7 +15,6 @@ import sys import libvirt from libvirt import libvirtError - from src import sharedmod from utils import utils from shutil import copy @@ -171,6 +173,9 @@ def domain_nfs_start(params): logger.error(failed to prepare the environment) return 1 +# reconnect libvirt +conn = utils.get_conn() + domobj = conn.lookupByName(guestname) logger.info(begin to test start domain from nfs storage) @@ -283,7 +288,7 @@ def domain_nfs_start(params): logger.error(Error: fail to get domain %s state % guestname) return 1 -if state != shutoff: +if state != libvirt.VIR_DOMAIN_SHUTOFF: logger.info(shut down the domain %s % guestname) try: domobj.destroy() @@ -407,7 +412,7 @@ def domain_nfs_start_clean(params): # Connect to local hypervisor connection URI -conn = sharedmod.libvirtobj['conn'] +conn = utils.get_conn() domobj = conn.lookupByName(guestname) if domobj.info()[0] != libvirt.VIR_DOMAIN_SHUTOFF: -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [test-API][PATCH 1/2] New get_conn function in utils
On 08/16/2012 06:07 PM, Osier Yang wrote: On 2012年08月16日 17:00, Wayne Sun wrote: The get_conn function return connection object from libvirt module. This function could be used by both framework and testcases. The patch includes: * get_conn in utils/utils.py * sync env_inspect.py using the new function Signed-off-by: Wayne Sung...@redhat.com --- src/env_inspect.py | 22 ++ utils/utils.py | 27 +++ 2 files changed, 29 insertions(+), 20 deletions(-) diff --git a/src/env_inspect.py b/src/env_inspect.py index b260ff8..2c1a701 100644 --- a/src/env_inspect.py +++ b/src/env_inspect.py @@ -20,6 +20,7 @@ import commands import libvirt import sharedmod +from utils import utils def check_libvirt(logger): virsh = 'virsh -v' @@ -68,20 +69,6 @@ def hostinfo(logger): return 1 return 0 -def request_credentials(credentials, user_data): -for credential in credentials: -if credential[0] == libvirt.VIR_CRED_AUTHNAME: -credential[4] = user_data[0] - -if len(credential[4]) == 0: -credential[4] = credential[3] -elif credential[0] == libvirt.VIR_CRED_PASSPHRASE: -credential[4] = user_data[1] -else: -return -1 - -return 0 - def sharemod_init(env_parser, logger): get connection object from libvirt module initialize sharemod for use by testcases @@ -89,12 +76,7 @@ def sharemod_init(env_parser, logger): uri = env_parser.get_value('variables', 'defaulturi') username = env_parser.get_value('variables', 'username') password = env_parser.get_value('variables', 'password') -user_data = [username, password] -auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE], request_credentials, user_data] -conn = libvirt.openAuth(uri, auth, 0) -if not conn: -logger.error(Failed to setup libvirt connection); -return 1 +conn = utils.get_conn(uri, username, password) # initialize conn object in sharedmod sharedmod.libvirtobj.clear() diff --git a/utils/utils.py b/utils/utils.py index be87cdc..eade10d 100644 --- a/utils/utils.py +++ b/utils/utils.py @@ -29,6 +29,7 @@ import struct import pexpect import string import subprocess +import libvirt from xml.dom import minidom from urlparse import urlparse @@ -57,6 +58,32 @@ def get_uri(ip): uri = qemu+ssh://%s/system % ip return uri +def request_credentials(credentials, user_data): +for credential in credentials: +if credential[0] == libvirt.VIR_CRED_AUTHNAME: +credential[4] = user_data[0] + +if len(credential[4]) == 0: +credential[4] = credential[3] +elif credential[0] == libvirt.VIR_CRED_PASSPHRASE: +credential[4] = user_data[1] +else: +return -1 + +return 0 + +def get_conn(uri='', username='', password=''): + get connection object from libvirt module + +user_data = [username, password] +auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE], request_credentials, user_data] +conn = libvirt.openAuth(uri, auth, 0) +if not conn: +logger.error(Failed to setup libvirt connection); +sys.exit(1) +else: +return conn Isn't there a shared 'conn' in sharemod.py? Yes, but it will be broke when restart libvirtd and some case do need restart libvirtd. In those cases need a to get a new 'conn', so add this function for this. The 'conn' in sharemod.py is from sharemod_init in env_inspect.py, extract the get connection method from there to utils for benefit of the framework and testcases. Wayne Sun + def parse_uri(uri): # This is a simple parser for uri return urlparse(uri) -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [test-API][PATCH 1/2] Reconnct libvirt after libvirtd restart
On 08/07/2012 09:15 PM, Guannan Ren wrote: On 08/06/2012 03:46 PM, Wayne Sun wrote: In domain_nfs_start case, libvirtd will be restarted during test, which broke existing connection. User need re-init connection in test case, for this: * Using sharedmod data dictionary to store Envparser class in generator. * Do not clear data dictionary in env_inspect, user can update it or framework release it at last. * Using sharemod_init in env_inspect to re-init conn in domain_nfs_start. For this case, it's better not to use the connection object offered by framework. The case could create its own connection through case options. Yes, the first thought is to create the connection in the case, but since multiple cases have this problem and the get connection code is reusable, so i directly using the sharemod_init function in env_inspect. This is not right, it's better create a shared get_conn function in utils for both framework and testcase. Then, env_inspect could use it and also testcases have the reconnect problem. I'll send v2 for this, thx! Wayne Sun 2012-08-08 Signed-off-by: Wayne Sun g...@redhat.com --- repos/sVirt/domain_nfs_start.py | 11 +-- src/env_inspect.py |1 - src/generator.py|2 ++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/repos/sVirt/domain_nfs_start.py b/repos/sVirt/domain_nfs_start.py index 88d349c..4d48d97 100644 --- a/repos/sVirt/domain_nfs_start.py +++ b/repos/sVirt/domain_nfs_start.py @@ -12,7 +12,7 @@ import sys import libvirt from libvirt import libvirtError - +from src import env_inspect The env_inspect is framework module. It is not recommanded to use directly in testcase. from src import sharedmod from utils import utils from shutil import copy @@ -163,6 +163,8 @@ def domain_nfs_start(params): logger.error(Error: fail to get domain %s xml % guestname) return 1 +conn.close() + # set env logger.info(prepare the environment) ret = prepare_env(dynamic_ownership, virt_use_nfs, guestname, \ @@ -171,6 +173,11 @@ def domain_nfs_start(params): logger.error(failed to prepare the environment) return 1 +# reconnect libvirt +env = sharedmod.data['env'] +env_inspect.sharemod_init(env, logger) +conn = sharedmod.libvirtobj['conn'] you could create own connection rather than use the connection object offerred by framework. + domobj = conn.lookupByName(guestname) logger.info(begin to test start domain from nfs storage) @@ -283,7 +290,7 @@ def domain_nfs_start(params): logger.error(Error: fail to get domain %s state % guestname) return 1 -if state != shutoff: +if state != libvirt.VIR_DOMAIN_SHUTOFF: logger.info(shut down the domain %s % guestname) try: domobj.destroy() diff --git a/src/env_inspect.py b/src/env_inspect.py index b260ff8..a6dc4b1 100644 --- a/src/env_inspect.py +++ b/src/env_inspect.py @@ -98,7 +98,6 @@ def sharemod_init(env_parser, logger): # initialize conn object in sharedmod sharedmod.libvirtobj.clear() -sharedmod.data.clear() sharedmod.libvirtobj['conn'] = conn return 0 diff --git a/src/generator.py b/src/generator.py index 0cdc9de..f01f2fb 100644 --- a/src/generator.py +++ b/src/generator.py @@ -30,6 +30,7 @@ from testcasexml import xml_file_to_str import env_parser import env_inspect import format +import sharedmod class FuncGen(object): To generate a callable testcase @@ -56,6 +57,7 @@ class FuncGen(object): self.__case_info_save(activity, testrunid) self.env = env_parser.Envparser(global.cfg) +sharedmod.data['env'] = self.env mapper_obj = mapper.Mapper(activity) case_list = mapper_obj.module_casename_func_map() -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH v2] Reconnct libvirt after libvirtd restart
In domain_nfs_start case, libvirtd will be restarted during test, which broke existing connection. User need re-init connection in test case, for this: * New get_conn function in utils for get libvirt connection * sharemod_init in env_inspect use get_conn to get libvirt connection * In case domain_nfs_start, use get_conn to get new connectin after libvirtd restarted. Signed-off-by: Wayne Sun g...@redhat.com --- repos/sVirt/domain_nfs_start.py |9 +++-- src/env_inspect.py | 22 ++ utils/utils.py | 29 - 3 files changed, 37 insertions(+), 23 deletions(-) diff --git a/repos/sVirt/domain_nfs_start.py b/repos/sVirt/domain_nfs_start.py index 88d349c..5475945 100644 --- a/repos/sVirt/domain_nfs_start.py +++ b/repos/sVirt/domain_nfs_start.py @@ -12,7 +12,6 @@ import sys import libvirt from libvirt import libvirtError - from src import sharedmod from utils import utils from shutil import copy @@ -163,6 +162,8 @@ def domain_nfs_start(params): logger.error(Error: fail to get domain %s xml % guestname) return 1 +conn.close() + # set env logger.info(prepare the environment) ret = prepare_env(dynamic_ownership, virt_use_nfs, guestname, \ @@ -171,6 +172,10 @@ def domain_nfs_start(params): logger.error(failed to prepare the environment) return 1 +# reconnect libvirt +conn = utils.get_conn() +sharedmod.libvirtobj['conn'] = conn + domobj = conn.lookupByName(guestname) logger.info(begin to test start domain from nfs storage) @@ -283,7 +288,7 @@ def domain_nfs_start(params): logger.error(Error: fail to get domain %s state % guestname) return 1 -if state != shutoff: +if state != libvirt.VIR_DOMAIN_SHUTOFF: logger.info(shut down the domain %s % guestname) try: domobj.destroy() diff --git a/src/env_inspect.py b/src/env_inspect.py index b260ff8..2c1a701 100644 --- a/src/env_inspect.py +++ b/src/env_inspect.py @@ -20,6 +20,7 @@ import commands import libvirt import sharedmod +from utils import utils def check_libvirt(logger): virsh = 'virsh -v' @@ -68,20 +69,6 @@ def hostinfo(logger): return 1 return 0 -def request_credentials(credentials, user_data): -for credential in credentials: -if credential[0] == libvirt.VIR_CRED_AUTHNAME: -credential[4] = user_data[0] - -if len(credential[4]) == 0: -credential[4] = credential[3] -elif credential[0] == libvirt.VIR_CRED_PASSPHRASE: -credential[4] = user_data[1] -else: -return -1 - -return 0 - def sharemod_init(env_parser, logger): get connection object from libvirt module initialize sharemod for use by testcases @@ -89,12 +76,7 @@ def sharemod_init(env_parser, logger): uri = env_parser.get_value('variables', 'defaulturi') username = env_parser.get_value('variables', 'username') password = env_parser.get_value('variables', 'password') -user_data = [username, password] -auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE], request_credentials, user_data] -conn = libvirt.openAuth(uri, auth, 0) -if not conn: -logger.error(Failed to setup libvirt connection); -return 1 +conn = utils.get_conn(uri, username, password) # initialize conn object in sharedmod sharedmod.libvirtobj.clear() diff --git a/utils/utils.py b/utils/utils.py index be87cdc..9167c29 100644 --- a/utils/utils.py +++ b/utils/utils.py @@ -29,6 +29,7 @@ import struct import pexpect import string import subprocess +import libvirt from xml.dom import minidom from urlparse import urlparse @@ -57,6 +58,32 @@ def get_uri(ip): uri = qemu+ssh://%s/system % ip return uri +def request_credentials(credentials, user_data): +for credential in credentials: +if credential[0] == libvirt.VIR_CRED_AUTHNAME: +credential[4] = user_data[0] + +if len(credential[4]) == 0: +credential[4] = credential[3] +elif credential[0] == libvirt.VIR_CRED_PASSPHRASE: +credential[4] = user_data[1] +else: +return -1 + +return 0 + +def get_conn(uri=None, username='', password=''): + get connection object from libvirt module + +user_data = [username, password] +auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE], request_credentials, user_data] +conn = libvirt.openAuth(uri, auth, 0) +if not conn: +logger.error(Failed to setup libvirt connection); +sys.exit(1) +else: +return conn + def parse_uri(uri): # This is a simple parser for uri return urlparse(uri) @@ -501,7 +528,7 @@ def remote_exec_pexpect(hostname, username, password, cmd): elif index == 1: child.sendline(password) elif index
[libvirt] [test-API][PATCH] Delete the unused util param
The util is undefined and cause case run fail, it's with no use and should be deleted. Signed-off-by: Wayne Sun g...@redhat.com --- repos/interface/create.py |4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/repos/interface/create.py b/repos/interface/create.py index 50d92d2..f5ef308 100644 --- a/repos/interface/create.py +++ b/repos/interface/create.py @@ -25,7 +25,7 @@ def display_current_interface(conn): logger.debug(current defined host interface list: %s \ % conn.listDefinedInterfaces()) -def check_create_interface(ifacename, util): +def check_create_interface(ifacename): Check creating interface result, it will can ping itself if create interface is successful. @@ -67,7 +67,7 @@ def create(params): ifaceobj.create(0) logger.info(create host interface %s % ifacename) display_current_interface(conn) -if check_create_interface(ifacename, util): +if check_create_interface(ifacename): logger.info(create host interface %s is successful % ifacename) else: logger.error(fail to check create interface) -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 1/2] Reconnct libvirt after libvirtd restart
In domain_nfs_start case, libvirtd will be restarted during test, which broke existing connection. User need re-init connection in test case, for this: * Using sharedmod data dictionary to store Envparser class in generator. * Do not clear data dictionary in env_inspect, user can update it or framework release it at last. * Using sharemod_init in env_inspect to re-init conn in domain_nfs_start. Signed-off-by: Wayne Sun g...@redhat.com --- repos/sVirt/domain_nfs_start.py | 11 +-- src/env_inspect.py |1 - src/generator.py|2 ++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/repos/sVirt/domain_nfs_start.py b/repos/sVirt/domain_nfs_start.py index 88d349c..4d48d97 100644 --- a/repos/sVirt/domain_nfs_start.py +++ b/repos/sVirt/domain_nfs_start.py @@ -12,7 +12,7 @@ import sys import libvirt from libvirt import libvirtError - +from src import env_inspect from src import sharedmod from utils import utils from shutil import copy @@ -163,6 +163,8 @@ def domain_nfs_start(params): logger.error(Error: fail to get domain %s xml % guestname) return 1 +conn.close() + # set env logger.info(prepare the environment) ret = prepare_env(dynamic_ownership, virt_use_nfs, guestname, \ @@ -171,6 +173,11 @@ def domain_nfs_start(params): logger.error(failed to prepare the environment) return 1 +# reconnect libvirt +env = sharedmod.data['env'] +env_inspect.sharemod_init(env, logger) +conn = sharedmod.libvirtobj['conn'] + domobj = conn.lookupByName(guestname) logger.info(begin to test start domain from nfs storage) @@ -283,7 +290,7 @@ def domain_nfs_start(params): logger.error(Error: fail to get domain %s state % guestname) return 1 -if state != shutoff: +if state != libvirt.VIR_DOMAIN_SHUTOFF: logger.info(shut down the domain %s % guestname) try: domobj.destroy() diff --git a/src/env_inspect.py b/src/env_inspect.py index b260ff8..a6dc4b1 100644 --- a/src/env_inspect.py +++ b/src/env_inspect.py @@ -98,7 +98,6 @@ def sharemod_init(env_parser, logger): # initialize conn object in sharedmod sharedmod.libvirtobj.clear() -sharedmod.data.clear() sharedmod.libvirtobj['conn'] = conn return 0 diff --git a/src/generator.py b/src/generator.py index 0cdc9de..f01f2fb 100644 --- a/src/generator.py +++ b/src/generator.py @@ -30,6 +30,7 @@ from testcasexml import xml_file_to_str import env_parser import env_inspect import format +import sharedmod class FuncGen(object): To generate a callable testcase @@ -56,6 +57,7 @@ class FuncGen(object): self.__case_info_save(activity, testrunid) self.env = env_parser.Envparser(global.cfg) +sharedmod.data['env'] = self.env mapper_obj = mapper.Mapper(activity) case_list = mapper_obj.module_casename_func_map() -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 2/2] Replace env.conf with global.conf in env_parser
Only update the expection info when parsing. Signed-off-by: Wayne Sun g...@redhat.com --- src/env_parser.py | 22 +++--- 1 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/env_parser.py b/src/env_parser.py index f02af57..ddda727 100644 --- a/src/env_parser.py +++ b/src/env_parser.py @@ -30,7 +30,7 @@ class Envparser(object): self.cfg.read(configfile) else: raise exception.FileDoesNotExist( -env.conf is not a regular file or nonexist) +global.conf is not a regular file or nonexist) def has_section(self, section): if self.cfg.has_section(section): @@ -46,7 +46,7 @@ class Envparser(object): return False else: raise exception.SectionDoesNotExist( -In env.conf, the section %s is nonexist % section) +In global.conf, the section %s is nonexist % section) def sections_list(self): return self.cfg.sections() @@ -56,7 +56,7 @@ class Envparser(object): return self.cfg.options(section) else: raise exception.SectionDoesNotExist( -In env.conf, the section %s is nonexist % section) +In global.conf, the section %s is nonexist % section) def get_value(self, section, option): if self.has_section: @@ -64,17 +64,17 @@ class Envparser(object): return self.cfg.get(section, option) else: raise exception.OptionDoesNotExist( -In env.conf, the option %s is nonexist % option) +In global.conf, the option %s is nonexist % option) else: raise exception.SectionDoesNotExist( -In env.conf, the section %s is nonexist % section) +In global.conf, the section %s is nonexist % section) def get_items(self, section): if self.has_section: return self.cfg.items(section) else: raise exception.SectionDoesNotExist( -In env.conf, the section %s is nonexist % section) +In global.conf, the section %s is nonexist % section) def add_section(self, section): if self.has_section: @@ -91,10 +91,10 @@ class Envparser(object): return True else: raise exception.OptionDoesNotExist( -In env.conf, the option %s is nonexist % option) +In global.conf, the option %s is nonexist % option) else: raise exception.SectionDoesNotExist( -In env.conf, the section %s is nonexist % section) +In global.conf, the section %s is nonexist % section) def remove_section(self, section): if self.has_section: @@ -102,7 +102,7 @@ class Envparser(object): return True else: raise exception.SectionDoesNotExist( -In env.conf, the section %s is nonexist % section) +In global.conf, the section %s is nonexist % section) def set_value(self, section, option, value): if self.has_section: @@ -111,6 +111,6 @@ class Envparser(object): return True else: raise exception.OptionDoesNotExist( -In env.conf, the option %s is nonexist % option) +In global.conf, the option %s is nonexist % option) raise exception.SectionDoesNotExist( -In env.conf, the section %s is nonexist % section) +In global.conf, the section %s is nonexist % section) -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH] Fix xml parser problem when node have both attribute and value
When xml node have both attribute and value at first level, the parser will broke. After fix, the node key will have a dictionary with both value and attr inside. For example, the xml node: capacity unit='bytes'536870912000/capacity will be parsed into: {u'capacity': {'attr': {u'unit': u'bytes'}, 'value': u'536870912000'}} Also when fetch the attribute key, should use a new param (attrkey) other than exist key in outside loop. Signed-off-by: Wayne Sun g...@redhat.com --- utils/xml_parser.py | 22 +- 1 files changed, 17 insertions(+), 5 deletions(-) diff --git a/utils/xml_parser.py b/utils/xml_parser.py index 04e7501..01b928f 100644 --- a/utils/xml_parser.py +++ b/utils/xml_parser.py @@ -88,15 +88,21 @@ class xml_parser(object): if thenode.attributes != None: tmpattr = dict() if thenode.attributes.length 0: -for key in thenode.attributes.keys(): +for attrkey in thenode.attributes.keys(): tmpattr.update( -{key:thenode.attributes.get(key).nodeValue}) + {attrkey:thenode.attributes.get(attrkey).nodeValue}) attrdic = { attr:tmpattr } if key in out: if out[key] == None: -out[key] = value if attrdic != None: -out[key].update(attrdic) +if value == None: +out[key] = attrdic +else: +valdic = { value:value } +valdic.update(attrdic) +out[key] = valdic +else: +out[key] = value elif type(out[key]) == list: if attrdic != None: newdict.update(attrdic) @@ -111,7 +117,13 @@ class xml_parser(object): else: out[key] = value if attrdic != None: -out[key].update(attrdic) +if value == None: +newdict[key] = attrdic +else: +valdic = { value:value } +valdic.update(attrdic) +newdict = valdic +out[key] = newdict self.parseintodict(thenode, level+1, out, key) return out -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH] Fix logical volume create problem
* capacity should be int type * delete unused capacity suffix convert * fix xml param name Signed-off-by: Wayne Sun g...@redhat.com --- repos/storage/create_logical_volume.py |6 ++ 1 files changed, 2 insertions(+), 4 deletions(-) diff --git a/repos/storage/create_logical_volume.py b/repos/storage/create_logical_volume.py index 170bbf5..098c148 100644 --- a/repos/storage/create_logical_volume.py +++ b/repos/storage/create_logical_volume.py @@ -73,10 +73,8 @@ def create_logical_volume(params): logger = params['logger'] poolname = params['poolname'] volname = params['volname'] -capacity = params['capacity'] -xmlstr = params['xmlstr'] - -dicts = utils.get_capacity_suffix_size(capacity) +capacity = int(params['capacity']) +xmlstr = params['xml'] conn = sharedmod.libvirtobj['conn'] pool_names = conn.listDefinedStoragePools() -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 2/2] Define domain from xml should support image format option
* add imageformat as optional param in define * modify define xml for image format support Signed-off-by: Wayne Sun g...@redhat.com --- repos/domain/define.py |1 + repos/domain/xmls/kvm_guest_define.xml |2 +- 2 files changed, 2 insertions(+), 1 deletions(-) diff --git a/repos/domain/define.py b/repos/domain/define.py index 0a06dfd..a70ddad 100644 --- a/repos/domain/define.py +++ b/repos/domain/define.py @@ -16,6 +16,7 @@ from utils import utils required_params = ('guestname', 'diskpath',) optional_params = {'memory': 1048576, 'vcpu': 1, + 'imageformat' : 'raw', 'hddriver' : 'virtio', 'nicdriver': 'virtio', 'macaddr': '52:54:00:97:e4:28', diff --git a/repos/domain/xmls/kvm_guest_define.xml b/repos/domain/xmls/kvm_guest_define.xml index 4599b2e..6da0ad4 100644 --- a/repos/domain/xmls/kvm_guest_define.xml +++ b/repos/domain/xmls/kvm_guest_define.xml @@ -19,7 +19,7 @@ on_crashrestart/on_crash devices disk type='file' device='disk' - driver name='qemu' type='qcow2'/ + driver name='qemu' type='IMAGEFORMAT'/ source file='DISKPATH'/ target dev='vda' bus='HDDRIVER'/ /disk -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 1/2] Fix the undefined local variable problem in remote_exec
* subproc_flag should be global when used between functions * print error msg when exception happened Signed-off-by: Wayne Sun g...@redhat.com --- utils/utils.py |5 + 1 files changed, 5 insertions(+), 0 deletions(-) diff --git a/utils/utils.py b/utils/utils.py index 455e9cf..be87cdc 100644 --- a/utils/utils.py +++ b/utils/utils.py @@ -32,6 +32,8 @@ import subprocess from xml.dom import minidom from urlparse import urlparse +subproc_flag = 0 + def get_hypervisor(): if commands.getoutput(lsmod | grep kvm): return 'kvm' @@ -439,10 +441,12 @@ def support_virt(self): return True def subproc(a, b): +global subproc_flag subproc_flag = 1 def remote_exec(hostname, username, password, cmd): Remote execution on specified host +global subproc_flag pid, fd = pty.fork() if pid == 0: try: @@ -479,6 +483,7 @@ def remote_exec(hostname, username, password, cmd): subproc_flag = 0 return ret except Exception, e: +print e subproc_flag = 0 return -1 -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH] Add 3 APIs in snapshotAPI
* new APIs get_parent(self, domname, snapname, flag = 0) children_num(self, domname, snapname, flag) children_names_list(self, domname, snapname, flag) the flag in children_num and children_names_list could be in 0-7, which is the OR operation result of (VIR_DOMAIN_SNAPSHOT_LIST_DESCENDANTS | VIR_DOMAIN_SNAPSHOT_LIST_METADATA | VIR_DOMAIN_SNAPSHOT_LIST_LEAVES) * deleted redundancy spaces and fixed a typo in function domain() * getConnect(), getDomain() and getName() API are not added in, current snapshotAPI requires user to offer connection obj, domain name and snapshot name. --- lib/snapshotAPI.py | 59 --- 1 files changed, 46 insertions(+), 13 deletions(-) diff --git a/lib/snapshotAPI.py b/lib/snapshotAPI.py index 43ccb89..d866b60 100644 --- a/lib/snapshotAPI.py +++ b/lib/snapshotAPI.py @@ -24,14 +24,14 @@ import libvirt import re import os -def append_path(path): +def append_path(path): Append root path of package if path in sys.path: pass else: sys.path.append(path) - -pwd = os.getcwd() + +pwd = os.getcwd() result = re.search('(.*)libvirt-test-API', pwd) append_path(result.group(0)) @@ -39,7 +39,7 @@ import exception class SnapshotAPI(object): def __init__(self, connection): -self.conn = connection +self.conn = connection def create(self, domname, xml_desc, flag = 0): try: @@ -57,7 +57,7 @@ class SnapshotAPI(object): except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() -raise exception.LibvirtAPI(message, code) +raise exception.LibvirtAPI(message, code) def snapshot_name_list(self, domname, flag = 0): try: @@ -66,7 +66,7 @@ class SnapshotAPI(object): except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() -raise exception.LibvirtAPI(message, code) +raise exception.LibvirtAPI(message, code) def snapshot_nums(self, domname, flag = 0): try: @@ -95,31 +95,64 @@ class SnapshotAPI(object): message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) - + def delete(self, domname, snapname, flag = 0): try: -snap = self.snapshot_lookup_by_name(domname, snapname, flag = 0) +snap = self.snapshot_lookup_by_name(domname, snapname) return snap.delete(flag) except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() -raise exception.LibvirtAPI(message, code) +raise exception.LibvirtAPI(message, code) def get_xml_desc(self, domname, snapname, flag = 0): try: -snap = self.snapshot_lookup_by_name(domname, snapname, flag = 0) +snap = self.snapshot_lookup_by_name(domname, snapname) return snap.getXMLDesc(flag) except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def domain(self, domname): +def domain(self, domname, snapname): try: -snap = self.snapshot_lookup_by_name(domname, snapname, flag = 0) +snap = self.snapshot_lookup_by_name(domname, snapname) return snap.domain() except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) - + +def get_parent(self, domname, snapname, flag = 0): +try: +snap = self.snapshot_lookup_by_name(domname, snapname) +return snap.getParent(flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def children_num(self, domname, snapname, flag): +try: +snap = self.snapshot_lookup_by_name(domname, snapname) +return snap.numChildren(flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def children_names_list(self, domname, snapname, flag): +try: +snap = self.snapshot_lookup_by_name(domname, snapname) +return snap.listChildrenNames(flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +# virDomainSnapshotListFlags +VIR_DOMAIN_SNAPSHOT_LIST_DESCENDANTS = 1 +VIR_DOMAIN_SNAPSHOT_LIST_ROOTS
[libvirt] [test-API][PATCH] Add new APIs and fix problems in connectAPI
* add 8 new APIs get_sys_info(self, flag = 0) get_memory_stats(self, cellNum, flag = 0) get_cpu_stats(self, cpuNum, flag = 0) is_alive(self) change_begin(self, flag = 0) change_commit(self, flag = 0) change_rollback(self, flag = 0) suspend_for_duration(self, target, duration, flag = 0) * remove outdated ref(self, uri) function * fix typo in following functions migrate(self, domain, flags, dname, uri, bandwidth) networkLookupByUUID(self, uuid) numOfDefinedDomains(self) numOfDomains(self) nwfilterLookupByUUID(self, uuid) --- lib/connectAPI.py | 83 ++--- 1 files changed, 72 insertions(+), 11 deletions(-) diff --git a/lib/connectAPI.py b/lib/connectAPI.py index 9f2b728..87b0a59 100644 --- a/lib/connectAPI.py +++ b/lib/connectAPI.py @@ -105,6 +105,15 @@ class ConnectAPI(object): code = e.get_error_code() raise exception.LibvirtAPI(message, code) +def get_sys_info(self, flag = 0): +try: +sysinfo = self.conn.getSysinfo(flag) +return sysinfo +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + def get_type(self): try: type = self.conn.getType() @@ -123,19 +132,18 @@ class ConnectAPI(object): code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def ref(self, uri): +def get_cells_free_memory(self, startCell, maxCells): try: -refer = self.conn.ref() -return refer +cfreemem = self.conn.getCellsFreeMemory(startCell, maxCells) +return cfreemem except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def get_cells_free_memory(self, startCell, maxCells): +def get_memory_stats(self, cellNum, flag = 0): try: -cfreemem = self.conn.getCellsFreeMemory(startCell, maxCells) -return cfreemem +return self.conn.getMemoryStats(cellNum, flag) except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() @@ -158,6 +166,14 @@ class ConnectAPI(object): code = e.get_error_code() raise exception.LibvirtAPI(message, code) +def get_cpu_stats(self, cpuNum, flag = 0): +try: +return self.conn.getCPUStats(cpuNum, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + def compare_cpu(self, xmlDesc, flag = 0): try: return self.conn.compareCPU(xmlDesc, flag) @@ -208,6 +224,14 @@ class ConnectAPI(object): code = e.get_error_code() raise exception.LibvirtAPI(message, code) +def is_alive(self): +try: +return self.conn.isAlive() +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + def isEncrypted(self): try: return self.conn.isEncrypted() @@ -307,7 +331,7 @@ class ConnectAPI(object): def migrate(self, domain, flags, dname, uri, bandwidth): try: -return self.migrate(self, domain, flags, dname, uri, bandwidth) +return self.conn.migrate(domain, flags, dname, uri, bandwidth) except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() @@ -315,7 +339,7 @@ class ConnectAPI(object): def networkLookupByUUID(self, uuid): try: -return self.networkLookupByUUID(self, uuid) +return self.conn.networkLookupByUUID(uuid) except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() @@ -323,7 +347,7 @@ class ConnectAPI(object): def numOfDefinedDomains(self): try: -return self.numOfDefinedDomains(self) +return self.conn.numOfDefinedDomains() except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() @@ -331,7 +355,7 @@ class ConnectAPI(object): def numOfDomains(self): try: -return self.numOfDomains(self) +return self.conn.numOfDomains() except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() @@ -339,7 +363,39 @@ class ConnectAPI(object): def nwfilterLookupByUUID(self, uuid): try: -return self.nwfilterLookupByUUID(self, uuid) +
[libvirt] [test-API][PATCH] Add 8 new APIs in domainAPI
* new APIs reset(self, domname, flag = 0) get_vcpu_pin_info(self, domname, flag) is_updated(self, domname) inject_NMI(self, domname, flag = 0) open_console(self, domname, dev_name, stream, flag = 0) open_graphics(self, domname, idx, fd, flag = 1) screenshot(self, domname, stream, screen, flag = 0) get_migrate_max_speed(self, domname, flag = 0) A bug is filed to migrateGetMaxSpeed() about parameters, it'll affect get_migrate_max_speed() function, but the parameters should remain the status quo. Change will be followed up anyway. open_graphics() will not work now for lack qemu support, missing add_client qemu command. for dev_name in open_console, pass the alias name of console, serial or parallel port. * deleted outdated function get_ref(self, domname) --- lib/domainAPI.py | 91 +- 1 files changed, 83 insertions(+), 8 deletions(-) diff --git a/lib/domainAPI.py b/lib/domainAPI.py index 91f2ba3..a8086ab 100644 --- a/lib/domainAPI.py +++ b/lib/domainAPI.py @@ -261,6 +261,16 @@ class DomainAPI(object): code = e.get_error_code() raise exception.LibvirtAPI(message, code) +def reset(self, domname, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +retval = dom_obj.reset(flag) +return retval +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + def get_info(self, domname): try: dom_obj = self.get_domain_by_name(domname) @@ -335,36 +345,46 @@ class DomainAPI(object): code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def get_ref(self, domname): +def get_vcpus(self, domname): try: dom_obj = self.get_domain_by_name(domname) -ref = dom_obj.ref() -return ref +vcpu_info = dom_obj.vcpus() +return vcpu_info except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def get_vcpus(self, domname): +def set_pin_vcpu(self, domname, vcpu, cpumap): try: dom_obj = self.get_domain_by_name(domname) -vcpu_info = dom_obj.vcpus() -return vcpu_info +pin_vcpu = dom_obj.pinVcpu(vcpu, cpumap) +return pin_vcpu except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def set_pin_vcpu(self, domname, vcpu, cpumap): +def set_pin_vcpu_flags(self, domname, vcpu, cpumap, flag): try: dom_obj = self.get_domain_by_name(domname) -pin_vcpu = dom_obj.pinVcpu(vcpu, cpumap) +pin_vcpu = dom_obj.pinVcpuFlags(vcpu, cpumap, flag) return pin_vcpu except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) +def get_vcpu_pin_info(self, domname, flag): +try: +dom_obj = self.get_domain_by_name(domname) +pin_vcpu_info = dom_obj.vcpuPinInfo(flag) +return pin_vcpu_info +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + def get_uuid(self, domname): try: dom_obj = self.get_domain_by_name(domname) @@ -716,6 +736,15 @@ class DomainAPI(object): code = e.get_error_code() raise exception.LibvirtAPI(message, code) +def is_updated(self, domname): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.isUpdated() +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + def job_info(self, domname): try: dom_obj = self.get_domain_by_name(domname) @@ -878,6 +907,50 @@ class DomainAPI(object): code = e.get_error_code() raise exception.LibvirtAPI(message, code) +def inject_NMI(self, domname, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.injectNMI(flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def open_console(self, domname, dev_name, stream, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +
[libvirt] [test-API][PATCH v2 2/2] Add and update functions in streamAPI
* accpet connection object and stream flag as parameter in __init__ remove connect function for connction is given in as parameter. * remove newStream() from each function create new stream object in __init__. function also need flags parameter. * Add 5 new functions screenshot(self, domain, screen, flags = 0) download(self, vol, offset, length, flags = 0) upload(self, vol, offset, length, flags = 0) recvAll(self, handler, opaque) sendAll(self, handler, opaque) for recvAll and sendAll, handler is a user defined function which write/read data to/from file. --- lib/streamAPI.py | 75 -- 1 files changed, 50 insertions(+), 25 deletions(-) diff --git a/lib/streamAPI.py b/lib/streamAPI.py index bc7d217..4da57ca 100644 --- a/lib/streamAPI.py +++ b/lib/streamAPI.py @@ -38,76 +38,101 @@ append_path(result.group(0)) import exception class StreamAPI(object): -def __init__(self, connection): +def __init__(self, connection, flag = 0): self.conn = connection +self.stream_obj = self.conn.newStream(flag) -def abort(self, flag = 0): +def abort(self): try: -stream_obj = newStream(flag) -return stream_obj.abort() +return self.stream_obj.abort() except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def connect(self, flag = 0): +def finish(self): try: -stream_obj = newStream(flag) -return stream_obj.connect() +return self.stream_obj.finish() except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def finish(self, flag = 0): +def recv(self, nbytes): try: -stream_obj = newStream(flag) -return stream_obj.finish() +return self.stream_obj.recv(nbytes) except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def recv(self, flag = 0, data, nbytes): +def send(self, data): try: -stream_obj = newStream(flag) -return stream_obj.recv(data, nbytes) +return self.stream_obj.send(data) except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def send(self, flag = 0, data, nbytes): +def eventAddCallback(self, cb, opaque): try: -stream_obj = newStream(flag) -return stream_obj.send(data, nbytes) +return self.stream_obj.eventAddCallback(cb, opaque) except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def eventAddCallback(self, flag = 0, cb, opaque): +def eventRemoveCallback(self): try: -stream_obj = newStream(flag) -return stream_obj.eventAddCallback(cb, opaque) +return self.stream_obj.eventRemoveCallback() except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def eventRemoveCallback(self, flag = 0): +def eventUpdateCallback(self, events): try: -stream_obj = newStream(flag) -return stream_obj.eventRemoveCallback() +return self.stream_obj.eventUpdateCallback(events) except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def eventUpdateCallback(self, flag = 0, events) +def screenshot(self, domain, screen, flags = 0): try: -stream_obj = newStream(flag) -return stream_obj.eventUpdateCallback(events) +return self.stream_obj.screenshot(domain, screen, flags) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def download(self, vol, offset, length, flags = 0): +try: +return self.stream_obj.download(vol, offset, length, flags) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def upload(self, vol, offset, length, flags = 0): +try: +return self.stream_obj.upload(vol, offset, length, flags) +except
[libvirt] [test-API][PATCH 2/2] Add and update functions in streamAPI
* only accpet stream object as parameter in __init__ * remove newStream() from each function stream object should be pass in as parameter, not create new at each function. function also need flags parameter. * Add 5 new functions screenshot(self, domain, screen, flags = 0) download(self, vol, offset, length, flags = 0) upload(self, vol, offset, length, flags = 0) recvAll(self, handler, opaque) sendAll(self, handler, opaque) for recvAll and sendAll, handler is a user defined function which write/read data to/from file. --- lib/streamAPI.py | 84 + 1 files changed, 58 insertions(+), 26 deletions(-) diff --git a/lib/streamAPI.py b/lib/streamAPI.py index bc7d217..a1b2d0d 100644 --- a/lib/streamAPI.py +++ b/lib/streamAPI.py @@ -38,76 +38,108 @@ append_path(result.group(0)) import exception class StreamAPI(object): -def __init__(self, connection): -self.conn = connection +def __init__(self, stream): +self.stream_obj = stream -def abort(self, flag = 0): +def abort(self): try: -stream_obj = newStream(flag) -return stream_obj.abort() +return self.stream_obj.abort() except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def connect(self, flag = 0): +def connect(self): try: -stream_obj = newStream(flag) -return stream_obj.connect() +return self.stream_obj.connect() except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def finish(self, flag = 0): +def finish(self): try: -stream_obj = newStream(flag) -return stream_obj.finish() +return self.stream_obj.finish() except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def recv(self, flag = 0, data, nbytes): +def recv(self, nbytes): try: -stream_obj = newStream(flag) -return stream_obj.recv(data, nbytes) +return self.stream_obj.recv(nbytes) except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def send(self, flag = 0, data, nbytes): +def send(self, data): try: -stream_obj = newStream(flag) -return stream_obj.send(data, nbytes) +return self.stream_obj.send(data) except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def eventAddCallback(self, flag = 0, cb, opaque): +def eventAddCallback(self, cb, opaque): try: -stream_obj = newStream(flag) -return stream_obj.eventAddCallback(cb, opaque) +return self.stream_obj.eventAddCallback(cb, opaque) except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def eventRemoveCallback(self, flag = 0): +def eventRemoveCallback(self): try: -stream_obj = newStream(flag) -return stream_obj.eventRemoveCallback() +return self.stream_obj.eventRemoveCallback() except libvirt.libvirtError, e: message = e.get_error_message() code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def eventUpdateCallback(self, flag = 0, events) +def eventUpdateCallback(self, events): try: -stream_obj = newStream(flag) -return stream_obj.eventUpdateCallback(events) +return self.stream_obj.eventUpdateCallback(events) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def screenshot(self, domain, screen, flags = 0): +try: +return self.stream_obj.screenshot(domain, screen, flags) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def download(self, vol, offset, length, flags = 0): +try: +return self.stream_obj.download(vol, offset, length, flags) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise
[libvirt] [test-API][PATCH 1/2] Add 2 new functions in storageAPI
* download(self, poolname, volname, stream, offset, length, flags = 0) * upload(self, poolname, volname, stream, offset, length, flags = 0) --- lib/storageAPI.py | 17 + 1 files changed, 17 insertions(+), 0 deletions(-) diff --git a/lib/storageAPI.py b/lib/storageAPI.py index 6c9d286..b0733f8 100644 --- a/lib/storageAPI.py +++ b/lib/storageAPI.py @@ -466,3 +466,20 @@ class StorageAPI(object): code = e.get_error_code() raise exception.LibvirtAPI(message, code) +def download(self, poolname, volname, stream, offset, length, flags = 0): +try: +volobj = self.get_volume_obj(poolname, volname) +return volobj.download(stream, offset, length, flags) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def upload(self, poolname, volname, stream, offset, length, flags = 0): +try: +volobj = self.get_volume_obj(poolname, volname) +return volobj.upload(stream, offset, length, flags) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 2/2] Fix problem of a logger instance
--- utils/Python/format.py |2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/utils/Python/format.py b/utils/Python/format.py index 5de8eb0..9c119dd 100644 --- a/utils/Python/format.py +++ b/utils/Python/format.py @@ -33,7 +33,7 @@ class Format(object): def print_string(self, msg, env_logger): Only print a simple string -env_logger(msg) +env_logger.info(msg) self.write_log('\n%s' %msg) def print_start(self, msg, env_logger): -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 1/2] Fix compatibility problem on python 2.4
* switch rpartition with rsplit * adjust the style --- generator.py | 15 +++ 1 files changed, 11 insertions(+), 4 deletions(-) diff --git a/generator.py b/generator.py index 9a2ed06..6108963 100644 --- a/generator.py +++ b/generator.py @@ -124,10 +124,14 @@ class FuncGen(object): for i in range(loop_number): case_ref_name = self.cases_ref_names[i] -pkg_casename = case_ref_name.rpartition(:)[0] -funcname = case_ref_name.rpartition(:)[-1] +pkg_casename = case_ref_name.rsplit(:, 1)[0] +funcname = case_ref_name.rsplit(:, 1)[-1] + +if _clean not in funcname: +cleanoper = 0 +else: +cleanoper = 1 -cleanoper = 0 if _clean not in funcname else 1 if not cleanoper: self.fmt.print_start(pkg_casename, env_logger) @@ -182,7 +186,10 @@ class FuncGen(object): if not cleanoper: self.fmt.print_end(pkg_casename, ret, env_logger) else: -self.fmt.print_string(21* + Done if clean_ret 1 else 21* + Fail, env_logger) +if clean_ret 1: +self.fmt.print_string(21* + Done, env_logger) +else: +self.fmt.print_string(21* + Fail, env_logger) end_time = time.strftime(%Y-%m-%d %H:%M:%S) -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH v2] Add and update functions in domainAPI
* add 9 new block related functions block_pull(self, domname, device, bandwidth = 0, flag = 0) block_resize(self, domname, device, size, flag = 0) block_job_abort(self, domname, device, flag = 0) block_job_set_speed(self, domname, device, bandwidth, flag = 0) get_block_job_info(self, domname, device, flag = 0) get_blkio_parameters(self, domname, flag) get_block_io_tune(self, domname, device, flag) set_blkio_parameters(self, domname, params, flag) set_block_io_tune(self, domname, device, params, flag) 2 bugs related to parameters for 4 functions(get_blkio_parameters, get_block_io_tune, set_blkio_parameters and set_block_io_tune), it is considered in this patch, it could be updated later anyway. * Add 2 memory parameters function get_memory_parameters(self, domname, flag) set_memory_parameters(self, domname, params, flag) For set_memory_parameters(), the 'params' arguments should be a dictionary contains selectable keys: hard_limit, soft_limit, swap_hard_limit. e.g. {'hard_limit':10240, 'soft_limit':10, 'swap_hard_limit':102400} * Fix problems of 2 functions memory_peek(self, domname) set_sched_params_flags(self, domname, params, flags) --- lib/domainAPI.py | 103 - 1 files changed, 101 insertions(+), 2 deletions(-) diff --git a/lib/domainAPI.py b/lib/domainAPI.py index a6efab7..91f2ba3 100644 --- a/lib/domainAPI.py +++ b/lib/domainAPI.py @@ -546,7 +546,7 @@ class DomainAPI(object): def set_sched_params_flags(self, domname, params, flags): try: dom_obj = self.get_domain_by_name(domname) -retval = dom_obj.setSchedulerParameters(params, flags) +retval = dom_obj.setSchedulerParametersFlags(params, flags) return retval except libvirt.libvirtError, e: message = e.get_error_message() @@ -581,6 +581,105 @@ class DomainAPI(object): code = e.get_error_code() raise exception.LibvirtAPI(message, code) +def block_pull(self, domname, device, bandwidth = 0, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockPull(device, bandwidth, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def block_resize(self, domname, device, size, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockResize(device, size, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def block_job_abort(self, domname, device, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockJobAbort(device, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def block_job_set_speed(self, domname, device, bandwidth, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockJobSetSpeed(device, bandwidth, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def get_block_job_info(self, domname, device, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockJobInfo(device, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def get_blkio_parameters(self, domname, flag): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blkioParameters(flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def get_block_io_tune(self, domname, device, flag): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockIoTune(device, params, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def set_blkio_parameters(self, domname, params, flag): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.setBlkioParameters(params, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code =
Re: [libvirt] [test-API][PATCH] Add and update functions in domainAPI
Osier, Thanks, v2 have been send out. Wayne Sun Best Regards! On 12/30/2011 03:51 PM, Osier Yang wrote: On 2011年12月30日 14:47, Wayne Sun wrote: * add 9 new block related functions block_pull(self, domname, path, bandwidth = 0, flag = 0) block_resize(self, domname, path, size, flag = 0) block_job_abort(self, domname, path, flag = 0) block_job_set_speed(self, domname, path, bandwidth, flag = 0) get_block_job_info(self, domname, path, flag = 0) get_blkio_parameters(self, domname, flag) get_block_io_tune(self, domname, path, flag) set_blkio_parameters(self, domname, params, flag) set_block_io_tune(self, domname, path, params, flag) 2 bugs related to parameters for 4 functions(get_blkio_parameters, get_block_io_tune, set_blkio_parameters and set_block_io_tune), it is considered in this patch, it could be updated later anyway. Okay, * Add 2 memory parameters function get_memory_parameters(self, domname, flag) set_memory_parameters(self, domname, params, flag) For set_memory_parameters(), the 'params' arguments should be a dictionary contains selectable keys: hard_limit, soft_limit, swap_hard_limit. e.g. {'hard_limit':10240, 'soft_limit':10, 'swap_hard_limit':102400} * update 2 functions with problem Are you going to introduce problems? Perhaps Fix problems of 2 functions. memory_stats(self, domname) It's memory_peek, as far as I get from your patch. set_sched_params_flags(self, domname, params, flags) --- lib/domainAPI.py | 103 - 1 files changed, 101 insertions(+), 2 deletions(-) diff --git a/lib/domainAPI.py b/lib/domainAPI.py index a6efab7..1f6ef49 100644 --- a/lib/domainAPI.py +++ b/lib/domainAPI.py @@ -546,7 +546,7 @@ class DomainAPI(object): def set_sched_params_flags(self, domname, params, flags): try: dom_obj = self.get_domain_by_name(domname) -retval = dom_obj.setSchedulerParameters(params, flags) +retval = dom_obj.setSchedulerParametersFlags(params, flags) return retval except libvirt.libvirtError, e: message = e.get_error_message() @@ -581,6 +581,105 @@ class DomainAPI(object): code = e.get_error_code() raise exception.LibvirtAPI(message, code) +def block_pull(self, domname, path, bandwidth = 0, flag = 0): I'd recommend device as the parameter name instead of path, as these functions accept both the device path and device target name now. +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockPull(path, bandwidth, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def block_resize(self, domname, path, size, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockResize(path, size, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def block_job_abort(self, domname, path, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockJobAbort(path, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def block_job_set_speed(self, domname, path, bandwidth, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockJobSetSpeed(path, bandwidth, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def get_block_job_info(self, domname, path, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockJobInfo(path, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def get_blkio_parameters(self, domname, flag): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blkioParameters(flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def get_block_io_tune(self, domname, path, flag): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockIoTune(path, params, flag) +except libvirt.libvirtError, e
[libvirt] [test-API][PATCH] Add and update functions in domainAPI
* add 9 new block related functions block_pull(self, domname, path, bandwidth = 0, flag = 0) block_resize(self, domname, path, size, flag = 0) block_job_abort(self, domname, path, flag = 0) block_job_set_speed(self, domname, path, bandwidth, flag = 0) get_block_job_info(self, domname, path, flag = 0) get_blkio_parameters(self, domname, flag) get_block_io_tune(self, domname, path, flag) set_blkio_parameters(self, domname, params, flag) set_block_io_tune(self, domname, path, params, flag) 2 bugs related to parameters for 4 functions(get_blkio_parameters, get_block_io_tune, set_blkio_parameters and set_block_io_tune), it is considered in this patch, it could be updated later anyway. * Add 2 memory parameters function get_memory_parameters(self, domname, flag) set_memory_parameters(self, domname, params, flag) For set_memory_parameters(), the 'params' arguments should be a dictionary contains selectable keys: hard_limit, soft_limit, swap_hard_limit. e.g. {'hard_limit':10240, 'soft_limit':10, 'swap_hard_limit':102400} * update 2 functions with problem memory_stats(self, domname) set_sched_params_flags(self, domname, params, flags) --- lib/domainAPI.py | 103 - 1 files changed, 101 insertions(+), 2 deletions(-) diff --git a/lib/domainAPI.py b/lib/domainAPI.py index a6efab7..1f6ef49 100644 --- a/lib/domainAPI.py +++ b/lib/domainAPI.py @@ -546,7 +546,7 @@ class DomainAPI(object): def set_sched_params_flags(self, domname, params, flags): try: dom_obj = self.get_domain_by_name(domname) -retval = dom_obj.setSchedulerParameters(params, flags) +retval = dom_obj.setSchedulerParametersFlags(params, flags) return retval except libvirt.libvirtError, e: message = e.get_error_message() @@ -581,6 +581,105 @@ class DomainAPI(object): code = e.get_error_code() raise exception.LibvirtAPI(message, code) +def block_pull(self, domname, path, bandwidth = 0, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockPull(path, bandwidth, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def block_resize(self, domname, path, size, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockResize(path, size, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def block_job_abort(self, domname, path, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockJobAbort(path, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def block_job_set_speed(self, domname, path, bandwidth, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockJobSetSpeed(path, bandwidth, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def get_block_job_info(self, domname, path, flag = 0): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockJobInfo(path, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def get_blkio_parameters(self, domname, flag): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blkioParameters(flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def get_block_io_tune(self, domname, path, flag): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.blockIoTune(path, params, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +raise exception.LibvirtAPI(message, code) + +def set_blkio_parameters(self, domname, params, flag): +try: +dom_obj = self.get_domain_by_name(domname) +return dom_obj.setBlkioParameters(params, flag) +except libvirt.libvirtError, e: +message = e.get_error_message() +code = e.get_error_code() +
Re: [libvirt] [test-API][PATCH 2/2] Add test case set_cpu_shares.py for setting cpu scheduler info
On 12/13/2011 11:45 AM, Nan Zhang wrote: * repos/domain/set_cpu_shares.py: set the value of cpu_shares property of the guest. --- repos/domain/set_cpu_shares.py | 121 1 files changed, 121 insertions(+), 0 deletions(-) create mode 100644 repos/domain/set_cpu_shares.py diff --git a/repos/domain/set_cpu_shares.py b/repos/domain/set_cpu_shares.py new file mode 100644 index 000..809ccef --- /dev/null +++ b/repos/domain/set_cpu_shares.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python +Set the value of cpu_shares property of the guest + domain:set_cpu_shares + guestname + xxx + cpu_shares + integer value + flags + current|live|config + + +__author__ = 'Nan Zhang: nzh...@redhat.com' +__date__ = 'Tue Sep 27, 2011' +__version__ = '0.1.0' +__credits__ = 'Copyright (C) 2011 Red Hat, Inc.' +__all__ = ['check_params', 'check_cpu_shares', 'set_cpu_shares'] + +import os +import re +import sys +import time time module not used. +from xml.dom import minidom + + +def append_path(path): +Append root path of package +if path in sys.path: +pass +else: +sys.path.append(path) + +pwd = os.getcwd() +result = re.search('(.*)libvirt-test-API', pwd) +append_path(result.group(0)) + +from lib import connectAPI +from lib import domainAPI +from utils.Python import utils +from utils.Python import xmlbuilder +from exception import LibvirtAPI + +def check_params(params): +Verify inputing parameter dictionary +logger = params['logger'] +keys = ['guestname', 'flags'] +for key in keys: +if key not in params: +logger.error(%s is required %key) +return 1 +return 0 + +def check_cpu_shares(params, util, guestname, cpu_shares, flags): +Check the value of cpu_shares +logger = params['logger'] +cmd = cat /cgroup/cpu/libvirt/qemu/%s/cpu.shares % guestname +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(fail to set the value of cpu_shares: %s % out[0]) +else: +logger.info(from cgroup, the value of cpu_shares is %s % out[0]) + +if flags == domainAPI.VIR_DOMAIN_AFFECT_CONFIG: +return 0 + +if cmp(int(out[0]), cpu_shares): +return 1 +else: +logger.info(the value of cpu_shares does match the original \ +cpu scheduler information.) +return 0 + +def set_cpu_shares(params): +Get the cpu scheduler information s/Get/Set in function info +# Initiate and check parameters +params_check_result = check_params(params) +if params_check_result: +return 1 +logger = params['logger'] +guestname = params['guestname'] +schedinfo = {} +schedinfo['cpu_shares'] = int(params['cpu_shares']) +cpu_shares = schedinfo['cpu_shares'] + +if params['flags'] == 'current': +flags = domainAPI.VIR_DOMAIN_AFFECT_CURRENT +elif params['flags'] == 'live': +flags = domainAPI.VIR_DOMAIN_AFFECT_LIVE +elif params['flags'] == 'config': +flags = domainAPI.VIR_DOMAIN_AFFECT_CONFIG +else: +logger.error(Invalid flag was specified.) +return 1 + +# Connect to local hypervisor connection URI +util = utils.Utils() +uri = util.get_uri('127.0.0.1') +conn = connectAPI.ConnectAPI() +virconn = conn.open(uri) + +domobj = domainAPI.DomainAPI(virconn) +try: +domobj.set_sched_params_flags(guestname, schedinfo, flags) +logger.debug(set the value of cpu_shares with %s % cpu_shares) +except LibvirtAPI, e: +logger.error(API error message: %s, error code is %s % + (e.response()['message'], e.response()['code'])) +return 1 + +check_result = check_cpu_shares(params, util, guestname, cpu_shares, flags) +if check_result: +logger.error(cpu_shares does not match.) +conn.close() +return 1 + +logger.info(success to set scheduler parameters.) +conn.close() +return 0 + +def set_cpu_shares_clean(): +Clean testing environment +pass Ack, left is fine. -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [test-API][PATCH 1/2] Add test case get_cpu_shares.py for cpu scheduler info testing
On 12/13/2011 11:45 AM, Nan Zhang wrote: * repos/domain/get_cpu_shares.py: get the value of cpu_shares property of the guest. --- lib/domainAPI.py |2 +- repos/domain/get_cpu_shares.py | 117 2 files changed, 118 insertions(+), 1 deletions(-) create mode 100644 repos/domain/get_cpu_shares.py diff --git a/lib/domainAPI.py b/lib/domainAPI.py index a6efab7..0058254 100644 --- a/lib/domainAPI.py +++ b/lib/domainAPI.py @@ -546,7 +546,7 @@ class DomainAPI(object): def set_sched_params_flags(self, domname, params, flags): try: dom_obj = self.get_domain_by_name(domname) -retval = dom_obj.setSchedulerParameters(params, flags) +retval = dom_obj.setSchedulerParametersFlags(params, flags) return retval except libvirt.libvirtError, e: message = e.get_error_message() diff --git a/repos/domain/get_cpu_shares.py b/repos/domain/get_cpu_shares.py new file mode 100644 index 000..5d26e82 --- /dev/null +++ b/repos/domain/get_cpu_shares.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +Get the value of cpu_shares property of the guest + domain:get_cpu_shares + guestname + xxx + flags + current|live|config + + +__author__ = 'Nan Zhang: nzh...@redhat.com' +__date__ = 'Tue Sep 27, 2011' +__version__ = '0.1.0' +__credits__ = 'Copyright (C) 2011 Red Hat, Inc.' +__all__ = ['check_params', 'check_cpu_shares', 'get_cpu_shares'] + +import os +import re +import sys +import time time module is not used +from xml.dom import minidom + + +def append_path(path): +Append root path of package +if path in sys.path: +pass +else: +sys.path.append(path) + +pwd = os.getcwd() +result = re.search('(.*)libvirt-test-API', pwd) +append_path(result.group(0)) + +from lib import connectAPI +from lib import domainAPI +from utils.Python import utils +from utils.Python import xmlbuilder +from exception import LibvirtAPI + +def check_params(params): +Verify inputing parameter dictionary +logger = params['logger'] +keys = ['guestname', 'flags'] +for key in keys: +if key not in params: +logger.error(%s is required %key) +return 1 +return 0 + +def check_cpu_shares(params, util, guestname, cpu_shares, flags): +Check the value of cpu_shares +logger = params['logger'] +cmd = cat /cgroup/cpu/libvirt/qemu/%s/cpu.shares % guestname +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(fail to get the value of cpu_shares: %s % out[0]) +else: +logger.info(from cgroup, the value of cpu_shares: %s % out[0]) + +if flags == domainAPI.VIR_DOMAIN_AFFECT_CONFIG: +return 0 + +if cmp(int(out[0]), cpu_shares): +return 1 +else: +logger.info(the value of cpu_shares does match the original \ +cpu scheduler information.) +return 0 + +def get_cpu_shares(params): +Get the cpu scheduler information +# Initiate and check parameters +params_check_result = check_params(params) +if params_check_result: +return 1 +logger = params['logger'] +guestname = params['guestname'] + +if params['flags'] == 'current': +flags = domainAPI.VIR_DOMAIN_AFFECT_CURRENT +elif params['flags'] == 'live': +flags = domainAPI.VIR_DOMAIN_AFFECT_LIVE +elif params['flags'] == 'config': +flags = domainAPI.VIR_DOMAIN_AFFECT_CONFIG +else: +logger.error(Invalid flag was specified.) +return 1 + +# Connect to local hypervisor connection URI +util = utils.Utils() +uri = util.get_uri('127.0.0.1') +conn = connectAPI.ConnectAPI() +virconn = conn.open(uri) + +domobj = domainAPI.DomainAPI(virconn) +try: +sched_info = domobj.get_sched_params_flags(guestname, flags) +cpu_shares = sched_info['cpu_shares'] +logger.debug(the value of cpu_shares is %s % cpu_shares) +except LibvirtAPI, e: +logger.error(API error message: %s, error code is %s % + (e.response()['message'], e.response()['code'])) +return 1 + +check_result = check_cpu_shares(params, util, guestname, cpu_shares, flags) +if check_result: +logger.error(cpu_shares does not match.) +conn.close() +return 1 + +logger.info(success to get scheduler parameters.) +conn.close() +return 0 + +def get_cpu_shares_clean(): +Clean testing environment +pass Ack, left is fine. -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH] Update clean part for cases
* add clean function for missing ones * clean the system configuration files which been touched --- repos/domain/balloon_memory.py |2 ++ repos/domain/cpu_topology.py |5 + repos/domain/ownership_test.py | 10 ++ repos/libvirtd/qemu_hang.py | 12 ++-- repos/remoteAccess/unix_perm_sasl.py | 10 ++ repos/sVirt/domain_nfs_start.py | 10 ++ repos/snapshot/delete.py | 18 +++--- repos/snapshot/file_flag.py | 28 +--- repos/snapshot/flag_check.py |4 +++- repos/snapshot/internal_create.py|4 +++- repos/snapshot/revert.py |3 +++ repos/snapshot/snapshot_list.py | 16 +++- 12 files changed, 67 insertions(+), 55 deletions(-) diff --git a/repos/domain/balloon_memory.py b/repos/domain/balloon_memory.py index 30f5edb..0a40591 100644 --- a/repos/domain/balloon_memory.py +++ b/repos/domain/balloon_memory.py @@ -299,6 +299,8 @@ def balloon_memory(params): logger.info(the actual size of memory is \ rounded to the value %s we expected % maxmem) +util.clean_ssh() + if count: return return_close(conn, logger, 1) else: diff --git a/repos/domain/cpu_topology.py b/repos/domain/cpu_topology.py index 7202559..adf238c 100644 --- a/repos/domain/cpu_topology.py +++ b/repos/domain/cpu_topology.py @@ -246,3 +246,8 @@ def cpu_topology(params): conn.close() return 0 + +def cpu_topology_clean(params): +clean testing enviorment +return 0 + diff --git a/repos/domain/ownership_test.py b/repos/domain/ownership_test.py index 74620f4..1eacbcf 100644 --- a/repos/domain/ownership_test.py +++ b/repos/domain/ownership_test.py @@ -307,6 +307,9 @@ def ownership_test_clean(params): for i in range(len(out)): logger.error(out[i]) +clean_nfs_conf = sed -i '$d' /etc/exports +util.exec_cmd(clean_nfs_conf, shell=True) + filepath = TEMP_FILE elif use_nfs == 'disable': filepath = SAVE_FILE @@ -314,3 +317,10 @@ def ownership_test_clean(params): if os.path.exists(filepath): os.remove(filepath) +clean_qemu_conf = sed -i '$d' %s % QEMU_CONF +util.exec_cmd(clean_qemu_conf, shell=True) + +cmd = service libvirtd restart +util.exec_cmd(cmd, shell=True) + +return 0 diff --git a/repos/libvirtd/qemu_hang.py b/repos/libvirtd/qemu_hang.py index c97e0b7..8b1ffda 100644 --- a/repos/libvirtd/qemu_hang.py +++ b/repos/libvirtd/qemu_hang.py @@ -135,8 +135,16 @@ def qemu_hang(params): return 0 -def qemu_hang_cleanup(params): +def qemu_hang_clean(params): clean testing environment -pass +logger = params['logger'] +guestname = params['guestname'] +util = utils.Utils() +ret = get_domain_pid(util, logger, guestname) +cmd = kill -CONT %s % ret[1] +ret = util.exec_cmd(cmd, shell=True) +if ret[0]: +logger.error(failed to resume qemu process of %s % guestname) +return 0 diff --git a/repos/remoteAccess/unix_perm_sasl.py b/repos/remoteAccess/unix_perm_sasl.py index 54fa108..5db758c 100644 --- a/repos/remoteAccess/unix_perm_sasl.py +++ b/repos/remoteAccess/unix_perm_sasl.py @@ -22,6 +22,7 @@ import sys import commands from pwd import getpwnam +from utils.Python import utils def append_path(path): Append root path of package @@ -214,6 +215,7 @@ def unix_perm_sasl(params): def unix_perm_sasl_clean(params): clean testing environment logger = params['logger'] +util = utils.Utils() auth_unix_ro = params['auth_unix_ro'] auth_unix_rw = params['auth_unix_rw'] @@ -241,3 +243,11 @@ def unix_perm_sasl_clean(params): if status: logger.error(failed to delete sasl user %s % TESTING_USER) +clean_libvirtd_conf = sed -i -e :a -e '$d;N;2,3ba' -e 'P;D' %s % \ + LIBVIRTD_CONF +util.exec_cmd(clean_libvirtd_conf, shell=True) + +cmd = service libvirtd restart +util.exec_cmd(cmd, shell=True) + +return 0 diff --git a/repos/sVirt/domain_nfs_start.py b/repos/sVirt/domain_nfs_start.py index 2479366..edaf2f2 100644 --- a/repos/sVirt/domain_nfs_start.py +++ b/repos/sVirt/domain_nfs_start.py @@ -483,3 +483,13 @@ def domain_nfs_start_clean(params): conn.close() +clean_nfs_conf = sed -i '$d' /etc/exports +util.exec_cmd(clean_nfs_conf, shell=True) + +clean_qemu_conf = sed -i '$d' %s % QEMU_CONF +util.exec_cmd(clean_qemu_conf, shell=True) + +cmd = service libvirtd restart +util.exec_cmd(cmd, shell=True) + +return 0 diff --git a/repos/snapshot/delete.py b/repos/snapshot/delete.py index 8a7eef6..98f82a5 100644 --- a/repos/snapshot/delete.py +++ b/repos/snapshot/delete.py @@ -124,18 +124,6 @@ def delete(params): return 0 - - - - - - - - - - - - - - - +def delete_clean(params): + clean testing environment +
[libvirt] [test-API][PATCH 2/2] Add optional argument uuid in create and install module
--- repos/domain/create.py|6 -- repos/domain/install_image.py |5 +++-- repos/domain/install_linux_cdrom.py | 12 +++- repos/domain/install_linux_net.py | 13 - repos/domain/install_windows_cdrom.py | 12 +++- 5 files changed, 29 insertions(+), 19 deletions(-) diff --git a/repos/domain/create.py b/repos/domain/create.py index a06a2d3..b6e1e66 100644 --- a/repos/domain/create.py +++ b/repos/domain/create.py @@ -3,7 +3,8 @@ create domain from xml mandatory arguments:guesttype guestname - optional arguments: memory + optional arguments: uuid + memory vcpu disksize imagepath @@ -49,7 +50,8 @@ START_PAUSED = 1 def usage(): print '''usage: mandatory arguments:guesttype guestname - optional arguments: memory + optional arguments: uuid + memory vcpu disksize imagepath diff --git a/repos/domain/install_image.py b/repos/domain/install_image.py index 3fb8970..888e80f 100644 --- a/repos/domain/install_image.py +++ b/repos/domain/install_image.py @@ -5,7 +5,8 @@ guestname guestos guestarch - optional arguments: memory + optional arguments: uuid + memory vcpu imagepath imagetype @@ -54,7 +55,7 @@ def check_params(params): Checking the arguments required params_given = copy.deepcopy(params) mandatory_args = ['guestname', 'guesttype', 'guestos', 'guestarch'] -optional_args = ['memory', 'vcpu', 'imagepath', 'imagetype', +optional_args = ['uuid', 'memory', 'vcpu', 'imagepath', 'imagetype', 'hdmodel', 'nicmodel'] for arg in mandatory_args: diff --git a/repos/domain/install_linux_cdrom.py b/repos/domain/install_linux_cdrom.py index 8d21797..79dd41c 100644 --- a/repos/domain/install_linux_cdrom.py +++ b/repos/domain/install_linux_cdrom.py @@ -5,7 +5,8 @@ guestname guestos guestarch - optional arguments: memory + optional arguments: uuid + memory vcpu disksize imagetype @@ -68,7 +69,8 @@ def return_close(conn, logger, ret): def usage(): print '''usage: mandatory arguments:guesttype guestname - optional arguments: memory + optional arguments: uuid + memory vcpu disksize imagetype @@ -86,9 +88,9 @@ def check_params(params): Checking the arguments required params_given = copy.deepcopy(params) mandatory_args = ['guestname', 'guesttype', 'guestos', 'guestarch'] -optional_args = ['memory', 'vcpu', 'disksize', 'imagepath', 'hdmodel', - 'nicmodel', 'macaddr', 'ifacetype', 'source', 'type', - 'volumepath', 'imagetype'] +optional_args = ['uuid', 'memory', 'vcpu', 'disksize', 'imagepath', + 'hdmodel', 'nicmodel', 'macaddr', 'ifacetype', 'source', + 'type', 'volumepath', 'imagetype'] for arg in mandatory_args: if arg not in params_given.keys(): diff --git a/repos/domain/install_linux_net.py b/repos/domain/install_linux_net.py index 1b0470e..0ef5036 100644 --- a/repos/domain/install_linux_net.py +++ b/repos/domain/install_linux_net.py @@ -6,7 +6,8 @@ guestos guestarch netmethod - optional arguments: memory + optional arguments: uuid + memory vcpu disksize imagetype @@ -70,7 +71,8 @@ def usage(): guestos guestarch netmethod - optional arguments: memory + optional arguments: uuid + memory vcpu disksize imagetype @@ -89,9 +91,10 @@ def check_params(params): mandatory_args = ['guestname', 'guesttype', 'guestos', 'guestarch','netmethod'] -optional_args = ['memory', 'vcpu', 'disksize', 'imagepath', - 'hdmodel', 'nicmodel', 'ifacetype', - 'imagetype', 'source', 'type'] +optional_args = ['uuid', 'memory', 'vcpu', 'disksize', + 'imagepath', 'hdmodel', 'nicmodel', + 'ifacetype', 'imagetype', 'source', + 'type'] for arg in
[libvirt] [test-API][PATCH 1/2] Update define module with remote define
* Add support for define domain on remote host Added optional argument target_machine, username and password. If they are provided, define domain on remote host, else define domain on local. * added new optional argument uuid it supports define domain with given static uuid if it's provided in conf file, else dynamic uuid will be created for domain. For remote define with static uuid, use following: domain:define guestname $defaultos guesttype $defaulthv uuid $static_uuid target_machine $target_machine username $target_user password $target_password For local define with static uuid, use following: domain:define guestname $defaultos guesttype $defaulthv uuid $static_uuid For local define with dynamic created uuid, use following: domain:define guestname $defaultos guesttype $defaulthv --- repos/domain/define.py | 141 1 files changed, 131 insertions(+), 10 deletions(-) diff --git a/repos/domain/define.py b/repos/domain/define.py index d965f63..8f0095a 100644 --- a/repos/domain/define.py +++ b/repos/domain/define.py @@ -3,7 +3,8 @@ define domain from xml mandatory arguments:guesttype guestname - optional arguments: memory + optional arguments: uuid + memory vcpu disksize fullimagepath @@ -24,6 +25,9 @@ __all__ = ['usage', 'check_define_domain', 'define'] import os import re import sys +import commands +import string +import pexpect def append_path(path): Append root path of package @@ -40,11 +44,16 @@ from lib import connectAPI from lib import domainAPI from utils.Python import utils from utils.Python import xmlbuilder +from exception import LibvirtAPI + +SSH_KEYGEN = ssh-keygen -t rsa +SSH_COPY_ID = ssh-copy-id def usage(): print '''usage: mandatory arguments:guesttype guestname - optional arguments: memory + optional arguments: uuid + memory vcpu disksize fullimagepath @@ -54,6 +63,9 @@ def usage(): macaddr ifacetype source + target_machine + username + password ''' def check_params(params): @@ -67,7 +79,61 @@ def check_params(params): return 1 return 0 -def check_define_domain(guestname, guesttype, logger): +def ssh_keygen(logger): +using pexpect to generate RSA +logger.info(generate ssh RSA \%s\ % SSH_KEYGEN) +child = pexpect.spawn(SSH_KEYGEN) +while True: +index = child.expect(['Enter file in which to save the key ', + 'Enter passphrase ', + 'Enter same passphrase again: ', + pexpect.EOF, + pexpect.TIMEOUT]) +if index == 0: +child.sendline(\r) +elif index == 1: +child.sendline(\r) +elif index == 2: +child.sendline(\r) +elif index == 3: +logger.debug(string.strip(child.before)) +child.close() +return 0 +elif index == 4: +logger.error(ssh_keygen timeout) +logger.debug(string.strip(child.before)) +child.close() +return 1 + +return 0 + +def ssh_tunnel(hostname, username, password, logger): +setup a tunnel to a give host +logger.info(setup ssh tunnel with host %s % hostname) +user_host = %s@%s % (username, hostname) +child = pexpect.spawn(SSH_COPY_ID, [ user_host]) +while True: +index = child.expect(['yes\/no', 'password: ', + pexpect.EOF, + pexpect.TIMEOUT]) +if index == 0: +child.sendline(yes) +elif index == 1: +child.sendline(password) +elif index == 2: +logger.debug(string.strip(child.before)) +child.close() +return 0 +elif index == 3: +logger.error(setup tunnel timeout) +logger.debug(string.strip(child.before)) +child.close() +return 1 + +return 0 + +def check_define_domain(guestname, guesttype, target_machine, username, \ +password, util, logger): Check define domain result, if define domain is successful, guestname.xml will exist under /etc/libvirt/qemu/ and can use virt-xml-validate tool to check the file validity @@
[libvirt] (no subject)
For last patch of update migrate module, remote define domain is deleted in migrate. Now add this function into define module in this fix. Also update modules which include create, define and installation for the new option uuid added. [test-API][PATCH 1/2] Update define module with remote define [test-API][PATCH 2/2] Add optional argument uuid in create and install module repos/domain/create.py|6 +- repos/domain/define.py| 141 +++-- repos/domain/install_image.py |5 +- repos/domain/install_linux_cdrom.py | 12 +-- repos/domain/install_linux_net.py | 13 +-- repos/domain/install_windows_cdrom.py | 12 +-- 6 files changed, 29 insertions(+), 160 deletions(-) -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 0/5] Update migrate module
Current migrate module include the remote_guest_define function which has problem with transient guest created on local, it can't find persistent domain xml on local and copy to remote. This series fixes aim to solve this problem. First, remote_guest_define function in migrate is removed, then add support for define domain on remote in define module. New xml element uuid is added into xmlgenerator, and new variable static_uuid is introduced into env.cfg, this will help domain created on local and defined on remote get the same uuid. At last, all related conf files are updated. [test-API][PATCH 1/5] Remove remote_guest_define function in migrate [test-API][PATCH 2/5] Add new variable static_uuid into env.cfg [test-API][PATCH 3/5] Update add_option_value function in parser [test-API][PATCH 4/5] Add new element uuid in xmlgenerator [test-API][PATCH 5/5] Update the migration case conf file .../migration/ssh_persistent_paused_with_dst.conf | 192 --- .../migration/ssh_persistent_running_with_dst.conf | 192 --- cases/migration/ssh_transient_paused_with_dst.conf | 200 .../migration/ssh_transient_running_with_dst.conf | 198 --- .../migration/tcp_persistent_paused_with_dst.conf | 192 --- .../migration/tcp_persistent_running_with_dst.conf | 192 --- .../tcp_sasl_persistent_paused_with_dst.conf | 64 --- .../tcp_sasl_persistent_running_with_dst.conf | 64 --- .../tcp_sasl_transient_paused_with_dst.conf| 64 --- .../tcp_sasl_transient_running_with_dst.conf | 64 --- cases/migration/tcp_transient_paused_with_dst.conf | 200 .../migration/tcp_transient_running_with_dst.conf | 198 --- .../migration/tls_persistent_paused_with_dst.conf | 192 --- .../migration/tls_persistent_running_with_dst.conf | 192 --- .../tls_sasl_persistent_paused_with_dst.conf | 64 --- .../tls_sasl_persistent_running_with_dst.conf | 64 --- .../tls_sasl_transient_paused_with_dst.conf| 64 --- .../tls_sasl_transient_running_with_dst.conf | 64 --- cases/migration/tls_transient_paused_with_dst.conf | 200 .../migration/tls_transient_running_with_dst.conf | 198 --- env.cfg|3 - parser.py | 12 +- repos/domain/migrate.py| 31 +++- utils/Python/xmlgenerator.py |7 - 24 files changed, 33 insertions(+), 2878 deletions(-) -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 4/5] Add new element uuid in xmlgenerator
* uuid xml element will be created and append to domain xml only if uuid option in params is provided --- utils/Python/xmlgenerator.py |7 +++ 1 files changed, 7 insertions(+), 0 deletions(-) diff --git a/utils/Python/xmlgenerator.py b/utils/Python/xmlgenerator.py index d57dd33..4aa045a 100644 --- a/utils/Python/xmlgenerator.py +++ b/utils/Python/xmlgenerator.py @@ -45,6 +45,13 @@ def domain_xml(params, install = False): name_element.appendChild(name_node) domain_element.appendChild(name_element) +# uuid +if params.has_key('uuid'): +uuid_element = domain.createElement('uuid') +uuid_node = domain.createTextNode(params['uuid']) +uuid_element.appendChild(uuid_node) +domain_element.appendChild(uuid_element) + # memory memory_element = domain.createElement('memory') if params.has_key('memory'): -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 1/5] Remove remote_guest_define function in migrate
* this function will fail when domain is created on local, no persistent config xml generated, so can't be copied and used on dst for define domain --- repos/domain/migrate.py | 31 +-- 1 files changed, 5 insertions(+), 26 deletions(-) diff --git a/repos/domain/migrate.py b/repos/domain/migrate.py index 6b86188..5414c32 100644 --- a/repos/domain/migrate.py +++ b/repos/domain/migrate.py @@ -64,7 +64,6 @@ from exception import LibvirtAPI SSH_KEYGEN = ssh-keygen -t rsa SSH_COPY_ID = ssh-copy-id -GUEST_XML = /etc/libvirt/qemu/%s.xml def exec_command(logger, command, flag): execute shell command @@ -167,28 +166,6 @@ def ssh_tunnel(hostname, username, password, logger): return 0 -def remote_guest_define(target_machine, username, guestname, logger): -copy guest xml description to target machine and define it -xml_file = GUEST_XML % guestname - -if not os.path.exists(xml_file): -logger.error(guest %s xml file doesn't exsits % guestname) -return 1 - -SCP_CMD = scp %s %s@%s:/tmp %(xml_file, username, target_machine) -status, ret = exec_command(logger, SCP_CMD, 0) -if status: -logger.error(copy guest file failed) -return 1 - -VIRSH_DEFINE = ssh %s \virsh define /tmp/%s.xml\ % (target_machine, guestname) -status, ret = exec_command(logger, VIRSH_DEFINE, 0) -if status: -logger.error(faied to define guest on target machine) -return 1 - -return 0 - def migrate(params): migrate a guest back and forth between two machines logger = params['logger'] @@ -258,12 +235,14 @@ def migrate(params): dstdom = DomainAPI(dst) if predstconfig == true: -ret = remote_guest_define(target_machine, username, guestname, logger) -if ret: +guest_names = dstdom.get_defined_list() +if guestname in guest_names: +logger.info(Dst VM exists) +else: +logger.error(Dst VM missing config, should define VM on Dst first) env_clean(src, dst, srcdom, dstdom, target_machine, guestname, logger) return 1 - try: if(migflags VIR_MIGRATE_PEER2PEER): logger.info(use migrate_to_uri() API to migrate) -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 3/5] Update add_option_value function in parser
* Option should be added to the last dictionary in caselist. The old function will cause problem when multiple dictionaries in caselist have same testkey. --- parser.py | 12 +--- 1 files changed, 5 insertions(+), 7 deletions(-) diff --git a/parser.py b/parser.py index 8d41776..085a1f2 100644 --- a/parser.py +++ b/parser.py @@ -125,13 +125,11 @@ class CaseFileParser(object): def add_option_value(self, caselist, casename, option, value): Add option to the data list. -for dictionary in caselist: -testkey = dictionary.keys()[0] -if casename == testkey: -if not dictionary[testkey].has_key(option): -dictionary[testkey][option] = value -else: -continue +dictionary = caselist[-1] +testkey = dictionary.keys()[0] +if casename == testkey: +if not dictionary[testkey].has_key(option): +dictionary[testkey][option] = value def debug_print(self, str1, str2=): Nicely print two strings and an arrow. For internal use. -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 2/5] Add new variable static_uuid into env.cfg
--- env.cfg |3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/env.cfg b/env.cfg index b5e404a..f0373b6 100644 --- a/env.cfg +++ b/env.cfg @@ -112,6 +112,9 @@ defaultvcpu = 1 # default the memory size(kilobytes) to use for defining or installing a guest defaultmem = 1048576 +# static uuid for define, create and installing a guest +static_uuid = 05867c1a-afeb-300e-e55e-2673391ae080 + # path to a disk image containing a preinstalled guest for testing testfullimagepath = /var/lib/libvirt/images/f14.img -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH 5/5] Update the migration case conf file
For patch size consideration, only one file chage is display here --- .../migration/ssh_persistent_paused_with_dst.conf | 192 +++ .../migration/ssh_persistent_running_with_dst.conf | 192 +++ cases/migration/ssh_transient_paused_with_dst.conf | 200 .../migration/ssh_transient_running_with_dst.conf | 198 +++ .../migration/tcp_persistent_paused_with_dst.conf | 192 +++ .../migration/tcp_persistent_running_with_dst.conf | 192 +++ .../tcp_sasl_persistent_paused_with_dst.conf | 64 +++ .../tcp_sasl_persistent_running_with_dst.conf | 64 +++ .../tcp_sasl_transient_paused_with_dst.conf| 64 +++ .../tcp_sasl_transient_running_with_dst.conf | 64 +++ cases/migration/tcp_transient_paused_with_dst.conf | 200 .../migration/tcp_transient_running_with_dst.conf | 198 +++ .../migration/tls_persistent_paused_with_dst.conf | 192 +++ .../migration/tls_persistent_running_with_dst.conf | 192 +++ .../tls_sasl_persistent_paused_with_dst.conf | 64 +++ .../tls_sasl_persistent_running_with_dst.conf | 64 +++ .../tls_sasl_transient_paused_with_dst.conf| 64 +++ .../tls_sasl_transient_running_with_dst.conf | 64 +++ cases/migration/tls_transient_paused_with_dst.conf | 200 .../migration/tls_transient_running_with_dst.conf | 198 +++ 20 files changed, 2858 insertions(+), 0 deletions(-) diff --git a/cases/migration/ssh_persistent_paused_with_dst.conf b/cases/migration/ssh_persistent_paused_with_dst.conf index 67e4191..05f9277 100644 --- a/cases/migration/ssh_persistent_paused_with_dst.conf +++ b/cases/migration/ssh_persistent_paused_with_dst.conf @@ -7,6 +7,22 @@ domain:define $defaultos guesttype $defaulthv +uuid +$static_uuid + +domain:define +guestname +$defaultos +guesttype +$defaulthv +uuid +$static_uuid +target_machine +$target_machine +username +$target_user +password +$target_password domain:start guestname @@ -45,6 +61,22 @@ domain:define $defaultos guesttype $defaulthv +uuid +$static_uuid + +domain:define +guestname +$defaultos +guesttype +$defaulthv +uuid +$static_uuid +target_machine +$target_machine +username +$target_user +password +$target_password domain:start guestname @@ -83,6 +115,22 @@ domain:define $defaultos guesttype $defaulthv +uuid +$static_uuid + +domain:define +guestname +$defaultos +guesttype +$defaulthv +uuid +$static_uuid +target_machine +$target_machine +username +$target_user +password +$target_password domain:start guestname @@ -121,6 +169,22 @@ domain:define $defaultos guesttype $defaulthv +uuid +$static_uuid + +domain:define +guestname +$defaultos +guesttype +$defaulthv +uuid +$static_uuid +target_machine +$target_machine +username +$target_user +password +$target_password domain:start guestname @@ -159,6 +223,22 @@ domain:define $defaultos guesttype $defaulthv +uuid +$static_uuid + +domain:define +guestname +$defaultos +guesttype +$defaulthv +uuid +$static_uuid +target_machine +$target_machine +username +$target_user +password +$target_password domain:start guestname @@ -197,6 +277,22 @@ domain:define $defaultos guesttype $defaulthv +uuid +$static_uuid + +domain:define +guestname +$defaultos +guesttype +$defaulthv +uuid +$static_uuid +target_machine +$target_machine +username +$target_user +password +$target_password domain:start guestname @@ -235,6 +331,22 @@ domain:define $defaultos guesttype $defaulthv +uuid +$static_uuid + +domain:define +guestname +$defaultos +guesttype +$defaulthv +uuid +$static_uuid +target_machine +$target_machine +username +$target_user +password +$target_password domain:start guestname @@ -273,6 +385,22 @@ domain:define $defaultos guesttype $defaulthv +uuid +$static_uuid + +domain:define +guestname +$defaultos +guesttype +$defaulthv +uuid +$static_uuid +target_machine +$target_machine +username +$target_user +password +$target_password
[libvirt] [test-API][PATCH] Fix typos for existing problems
--- cases/basic_nodedevice.conf |2 +- lib/nodedevAPI.py |2 +- lib/storageAPI.py |2 +- repos/nodedevice/detach.py| 22 +++--- repos/storage/define_scsi_pool.py |2 +- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/cases/basic_nodedevice.conf b/cases/basic_nodedevice.conf index cea431e..85a68f7 100644 --- a/cases/basic_nodedevice.conf +++ b/cases/basic_nodedevice.conf @@ -1,4 +1,4 @@ -nodedevice:dettach +nodedevice:detach pciaddress $testpci diff --git a/lib/nodedevAPI.py b/lib/nodedevAPI.py index 8d588c4..cc96909 100644 --- a/lib/nodedevAPI.py +++ b/lib/nodedevAPI.py @@ -185,7 +185,7 @@ class NodedevAPI: code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def connect(self, name) +def connect(self, name): try: obj = self.lookup_by_name(name) return obj.connect() diff --git a/lib/storageAPI.py b/lib/storageAPI.py index 2d75a0b..6c9d286 100644 --- a/lib/storageAPI.py +++ b/lib/storageAPI.py @@ -231,7 +231,7 @@ class StorageAPI(object): code = e.get_error_code() raise exception.LibvirtAPI(message, code) -def delete_pool(self, poolname, flags = 1): +def delete_pool(self, poolname, flags = 0): try: pool_obj = self.get_pool_obj(poolname) return pool_obj.delete(flags) diff --git a/repos/nodedevice/detach.py b/repos/nodedevice/detach.py index ac9aa89..18e792e 100644 --- a/repos/nodedevice/detach.py +++ b/repos/nodedevice/detach.py @@ -1,13 +1,13 @@ #!/usr/bin/env python this test case is used for testing - dettach a specific node device + detach a specific node device __author__ = 'Alex Jia: a...@redhat.com' __date__ = 'Tue Apr 6, 2010' __version__ = '0.1.0' __credits__ = 'Copyright (C) 2009 Red Hat, Inc.' -__all__ = ['usage', 'check_node_dettach', 'dettach'] +__all__ = ['usage', 'check_node_detach', 'detach'] import os @@ -45,8 +45,8 @@ def usage(params): else: pass -def check_node_dettach(pciaddress): -Check node device dettach result, if dettachment is successful, the +def check_node_detach(pciaddress): +Check node device detach result, if detachment is successful, the device host driver should be hided and the device should be bound to pci-stub driver, argument 'address' is a address of the node device @@ -64,7 +64,7 @@ def check_node_dettach(pciaddress): driver = os.path.basename(retval) return driver -def dettach(dicts): +def detach(dicts): Dettach a specific node device and bind it to pci-stub driver, argument 'dicts' is a dictionary type and includes 'pciaddress' key, whose value uniquely identify a pci address of the node device @@ -78,7 +78,7 @@ def dettach(dicts): pciaddress = dicts['pciaddress'] -original_driver = check_node_dettach(pciaddress) +original_driver = check_node_detach(pciaddress) logger.info(original device driver: %s % original_driver) util = utils.Utils() @@ -120,22 +120,22 @@ def dettach(dicts): try: try: -logger.info(dettach the node device) +logger.info(detach the node device) nodeobj.dettach(device_name) -current_driver = check_node_dettach(pciaddress) +current_driver = check_node_detach(pciaddress) logger.info(current device driver: %s % current_driver) if current_driver != original_driver and current_driver == pciback: -logger.info(the node %s device dettach is successful \ +logger.info(the node %s device detach is successful \ % device_name) test_result = True else: -logger.info(the node %s device dettach is failed % device_name) +logger.info(the node %s device detach is failed % device_name) test_result = False return 1 except LibvirtAPI, e: logger.error(API error message: %s, error code is %s \ % (e.response()['message'], e.response()['code'])) -logger.error(Error: fail to dettach %s node device % device_name) +logger.error(Error: fail to detach %s node device % device_name) test_result = False return 1 finally: diff --git a/repos/storage/define_scsi_pool.py b/repos/storage/define_scsi_pool.py index a5193e2..6afbe01 100644 --- a/repos/storage/define_scsi_pool.py +++ b/repos/storage/define_scsi_pool.py @@ -140,7 +140,7 @@ def define_scsi_pool(params): logger.error(API error message: %s, error code is %s \ % (e.response()['message'], e.response()['code'])) return 1 -finally +finally: conn.close() logger.info(closed hypervisor connection) --
[libvirt] [test-API][PATCH v2 2/4] Add libvirtd restart test case
* repos/libvirtd/restart.py libvirtd restart should not affect the running domains. This test check the libvirtd status before and after libvirtd restart, and also by checking the domain pid to confirm the domain is not affected. --- repos/libvirtd/restart.py | 180 + 1 files changed, 180 insertions(+), 0 deletions(-) create mode 100644 repos/libvirtd/restart.py diff --git a/repos/libvirtd/restart.py b/repos/libvirtd/restart.py new file mode 100644 index 000..7ccbb38 --- /dev/null +++ b/repos/libvirtd/restart.py @@ -0,0 +1,180 @@ +#!/usr/bin/evn python + Restart libvirtd testing. A running guest is required in this test. +During libvirtd restart, the guest remains running and not affected +by libvirtd restart. +libvirtd:restart +guestname +#GUESTNAME# + + +__author__ = 'Wayne Sun: g...@redhat.com' +__date__ = 'Thu Aug 4, 2011' +__version__ = '0.1.0' +__credits__ = 'Copyright (C) 2011 Red Hat, Inc.' +__all__ = ['restart'] + +import os +import re +import sys + +def append_path(path): +Append root path of package +if path not in sys.path: +sys.path.append(path) + +pwd = os.getcwd() +result = re.search('(.*)libvirt-test-API', pwd) +append_path(result.group(0)) + +from lib import connectAPI +from lib import domainAPI +from utils.Python import utils + +VIRSH_LIST = virsh list --all +RESTART_CMD = service libvirtd restart + +def check_params(params): +Verify inputing parameter dictionary +logger = params['logger'] +keys = ['guestname'] +for key in keys: +if key not in params: +logger.error(%s is required %key) +return 1 +return 0 + +def check_domain_running(domobj, guestname, logger): + check if the domain exists, may or may not be active +guest_names = domobj.get_list() + +if guestname not in guest_names: +logger.error(%s doesn't exist or not running % guestname) +return 1 +else: +return 0 + +def libvirtd_check(util, logger): +check libvirtd status + +cmd = service libvirtd status +ret, out = util.exec_cmd(cmd, shell=True) +if ret != 0: +logger.error(failed to get libvirtd status) +return 1 +else: +logger.info(out[0]) + +logger.info(VIRSH_LIST) +ret, out = util.exec_cmd(VIRSH_LIST, shell=True) +if ret != 0: +logger.error(failed to get virsh list result) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +return 0 + +def get_domain_pid(util, logger, guestname): +get the pid of running domain +logger.info(get the pid of running domain %s % guestname) +get_pid_cmd = cat /var/run/libvirt/qemu/%s.pid % guestname +ret, pid = util.exec_cmd(get_pid_cmd, shell=True) +if ret: +logger.error(fail to get the pid of runnings domain %s % \ + guestname) +return 1, +else: +logger.info(the pid of domain %s is %s % \ +(guestname, pid[0])) +return 0, pid[0] + +def restart(params): +restart libvirtd test +# Initiate and check parameters +params_check_result = check_params(params) +if params_check_result: +return 1 + +logger = params['logger'] +guestname = params['guestname'] +util = utils.Utils() +uri = util.get_uri('127.0.0.1') + +conn = connectAPI.ConnectAPI() +virconn = conn.open(uri) +domobj = domainAPI.DomainAPI(virconn) + +logger.info(check the domain state) +ret = check_domain_running(domobj, guestname, logger) +if ret: +return 1 + +conn.close() + +logger.info(check the libvirtd status:) +ret = libvirtd_check(util, logger) +if ret: +return 1 + +# Get domain ip +logger.info(get the mac address of domain %s % guestname) +mac = util.get_dom_mac_addr(guestname) +logger.info(the mac address of domain %s is %s % (guestname, mac)) +logger.info(get ip by mac address) +ip = util.mac_to_ip(mac, 180) +logger.info(the ip address of domain %s is %s % (guestname, ip)) +timeout = 600 + +logger.info(ping to domain %s % guestname) +if util.do_ping(ip, 0): +logger.info(Success ping domain %s % guestname) +else: +logger.error(fail to ping domain %s % guestname) +return 1 + +ret, pid_before = get_domain_pid(util, logger, guestname) +if ret: +return 1 + +logger.info(restart libvirtd service:) +ret, out = util.exec_cmd(RESTART_CMD, shell=True) +if ret != 0: +logger.error(failed to restart libvirtd) +for i in range(len(out)): +logger.error(out[i]) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +logger.info(recheck libvirtd status:) +ret = libvirtd_check(util, logger) +if ret: +return 1 + +logger.info(ping to domain %s again % guestname
[libvirt] [test-API][PATCH v2 1/4] Add init module under libvirtd
* repos/libvirtd/__init__.py --- 0 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 repos/libvirtd/__init__.py diff --git a/repos/libvirtd/__init__.py b/repos/libvirtd/__init__.py new file mode 100644 index 000..e69de29 -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH v2 3/4] Add libvirtd upstart test case
* repos/libvirtd/upstart.py After set libvirtd upstart, libvirtd could restarted after force kill libvirtd process --- repos/libvirtd/upstart.py | 213 + 1 files changed, 213 insertions(+), 0 deletions(-) create mode 100644 repos/libvirtd/upstart.py diff --git a/repos/libvirtd/upstart.py b/repos/libvirtd/upstart.py new file mode 100644 index 000..904baa9 --- /dev/null +++ b/repos/libvirtd/upstart.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python +Upstart libvirtd testing + + +__author__ = 'Wayne Sun: g...@redhat.com' +__date__ = 'Thu Aug 4, 2011' +__version__ = '0.1.0' +__credits__ = 'Copyright (C) 2011 Red Hat, Inc.' +__all__ = ['upstart'] + +import os +import re +import sys +import time + +def append_path(path): +Append root path of package +if path not in sys.path: +sys.path.append(path) + +pwd = os.getcwd() +result = re.search('(.*)libvirt-test-API', pwd) +append_path(result.group(0)) + +from utils.Python import utils +from shutil import copy + +VIRSH_LIST = virsh list --all +UPSTART_CONF = rpm -ql libvirt|grep upstart +INITCTL_CMD = /sbin/initctl +SYSTEMCTL_CMD = /bin/systemctl +INITCTL_RELOAD_CMD = initctl reload-configuration +SYSTEMCTL_RELOAD_CMD = systemctl daemon-reload +INIT_CONF = /etc/init/libvirtd.conf + +def libvirtd_check(util, logger): +check libvirtd status + +cmd = service libvirtd status +ret, out = util.exec_cmd(cmd, shell=True) +if ret != 0: +logger.error(failed to get libvirtd status) +return 1 +else: +logger.info(out[0]) + +logger.info(VIRSH_LIST) +ret, out = util.exec_cmd(VIRSH_LIST, shell=True) +if ret != 0: +logger.error(failed to get virsh list result) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +return 0 + +def upstart(params): +Set libvirtd upstart +logger = params['logger'] +util = utils.Utils() + +logger.info(chkconfig libvirtd off:) +cmd = chkconfig libvirtd off +ret, out = util.exec_cmd(cmd, shell=True) +if ret != 0: +logger.error(failed) +return 1 +else: +logger.info(succeed) + +cmd = service libvirtd stop +logger.info(cmd) +ret, out = util.exec_cmd(cmd, shell=True) +if ret != 0: +logger.error(failed to stop libvirtd service) +return 1 +else: +logger.info(out[0]) + +logger.info(find libvirtd.upstart file in libvirt package:) +ret, conf = util.exec_cmd(UPSTART_CONF, shell=True) +if ret != 0: +logger.error(can't find libvirtd.upstart as part of libvirt package) +return 1 +elif conf[0]: +logger.info(succeed) +logger.info(copy %s to %s % (conf[0], INIT_CONF)) +copy(conf[0], INIT_CONF) + +if os.path.exists(INITCTL_CMD): +logger.info(INITCTL_RELOAD_CMD) +ret, out = util.exec_cmd(INITCTL_RELOAD_CMD, shell=True) +if ret != 0: +logger.error(failed to reload configuration) +return 1 +else: +logger.info(succeed) + +cmd = initctl start libvirtd +logger.info(cmd) +ret, out = util.exec_cmd(cmd, shell=True) +if ret != 0: +logger.error(failed to start libvirtd by initctl) +return 1 +else: +logger.info(out[0]) + +cmd = initctl status libvirtd +logger.info(get libvirtd status by initctl:) +ret, out = util.exec_cmd(cmd, shell=True) +if ret !=0: +logger.info(failed to get libvirtd status by initctl) +return 1 +else: +logger.info(out[0]) + +elif os.path.exists(SYSTEMCTL_CMD): +logger.info(SYSTEMCTL_RELOAD_CMD) +ret, out = util.exec_cmd(SYSTEMCTL_RELOAD_CMD, shell=True) +if ret != 0: +logger.error(failed to reload systemd manager configuration) +return 1 +else: +logger.info(succeed) + +cmd = systemctl start libvirtd.service +logger.info(cmd) +ret, out = util.exec_cmd(cmd, shell=True) +if ret != 0: +logger.error(failed to start libvirtd.service by systemctl) +return 1 +else: +logger.info(out[0]) + +cmd = systemctl status libvirtd.service +logger.info(get libvirtd.service status by systemctl:) +ret, out = util.exec_cmd(cmd, shell=True) +if ret !=0: +logger.info(failed to get libvirtd.service status by systemctl) +return 1 +else: +logger.info(out[0]) +else: +return 1 + +time.sleep(5) + +logger.info(check the libvirtd status:) +result = libvirtd_check(util, logger) +if result: +return 1 + +cmd = killall -9 libvirtd +logger.info(kill libvirtd process) +ret, out = util.exec_cmd(cmd, shell=True) +if ret != 0: +logger.error(failed to kill libvirtd
[libvirt] [test-API][PATCH v2 4/4] Add qemu_hang test case under libvirtd
* repos/libvirtd/qemu_hang.py qemu process get hang should not cause libvirtd hang or dead --- repos/libvirtd/qemu_hang.py | 142 +++ 1 files changed, 142 insertions(+), 0 deletions(-) create mode 100644 repos/libvirtd/qemu_hang.py diff --git a/repos/libvirtd/qemu_hang.py b/repos/libvirtd/qemu_hang.py new file mode 100644 index 000..f377df3 --- /dev/null +++ b/repos/libvirtd/qemu_hang.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python + QEMU get hang should not cause libvirtd hang or dead. This test stop +a qemu process and check whether libvird get hang. For doing this +test, a running domain is required. +libvirtd:qemu_hang +guestname +#GUESTNAME# + + +__author__ = 'Wayne Sun: g...@redhat.com' +__date__ = 'Thu Sep 2, 2011' +__version__ = '0.1.0' +__credits__ = 'Copyright (C) 2011 Red Hat, Inc.' +__all__ = ['qemu_hang'] + +import os +import re +import sys + +def append_path(path): +Append root path of package +if path not in sys.path: +sys.path.append(path) + +pwd = os.getcwd() +result = re.search('(.*)libvirt-test-API', pwd) +append_path(result.group(0)) + +from lib import connectAPI +from lib import domainAPI +from utils.Python import utils + +VIRSH_LIST = virsh list --all +RESTART_CMD = service libvirtd restart + +def check_params(params): +Verify inputing parameter dictionary +logger = params['logger'] +keys = ['guestname'] +for key in keys: +if key not in params: +logger.error(%s is required %key) +return 1 +return 0 + +def check_domain_running(domobj, guestname, logger): + check if the domain exists, may or may not be active +guest_names = domobj.get_list() + +if guestname not in guest_names: +logger.error(%s doesn't exist or not running % guestname) +return 1 +else: +return 0 + +def libvirtd_check(util, logger): +check libvirtd status + +cmd = service libvirtd status +ret, out = util.exec_cmd(cmd, shell=True) +if ret != 0: +logger.error(failed to get libvirtd status) +return 1 +else: +logger.info(out[0]) + +logger.info(VIRSH_LIST) +ret, out = util.exec_cmd(VIRSH_LIST, shell=True) +if ret != 0: +logger.error(failed to get virsh list result) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +return 0 + +def get_domain_pid(util, logger, guestname): +get the pid of running domain +logger.info(get the pid of running domain %s % guestname) +get_pid_cmd = cat /var/run/libvirt/qemu/%s.pid % guestname +ret, pid = util.exec_cmd(get_pid_cmd, shell=True) +if ret: +logger.error(fail to get the pid of runnings domain %s % \ + guestname) +return 1, +else: +logger.info(the pid of domain %s is %s % \ +(guestname, pid[0])) +return 0, pid[0] + +def qemu_hang(params): +Hang qemu process, check libvirtd status +# Initiate and check parameters +params_check_result = check_params(params) +if params_check_result: +return 1 + +logger = params['logger'] +guestname = params['guestname'] +util = utils.Utils() +uri = util.get_uri('127.0.0.1') + +conn = connectAPI.ConnectAPI() +virconn = conn.open(uri) +domobj = domainAPI.DomainAPI(virconn) + +logger.info(check the domain state) +ret = check_domain_running(domobj, guestname, logger) +if ret: +return 1 + +conn.close() + +logger.info(check the libvirtd status:) +result = libvirtd_check(util, logger) +if result: +return 1 + +ret, pid = get_domain_pid(util, logger, guestname) +if ret: +return 1 + +cmd = kill -STOP %s % pid +logger.info(cmd) +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to stop qemu process of %s % guestname) +return 1 + +logger.info(recheck libvirtd status:) +result = libvirtd_check(util, logger) +if result: +return 1 + +return 0 + +def qemu_hang_cleanup(params): + clean testing environment +pass + + -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH v4] Add test module ownership_test
* repos/domain/ownership_test.py Setting the dynamic_ownership value in /etc/libvirt/qemu.conf, check the ownership of file after domain save and restore. Test could be on local or root_squash nfs. The default owner of the saved domain file is qemu:qemu in this case. --- repos/domain/ownership_test.py | 316 1 files changed, 316 insertions(+), 0 deletions(-) create mode 100644 repos/domain/ownership_test.py diff --git a/repos/domain/ownership_test.py b/repos/domain/ownership_test.py new file mode 100644 index 000..74620f4 --- /dev/null +++ b/repos/domain/ownership_test.py @@ -0,0 +1,316 @@ +#!/usr/bin/env python +Setting the dynamic_ownership in /etc/libvirt/qemu.conf, + check the ownership of saved domain file. Test could be on + local or root_squash nfs. The default owner of the saved + domain file is qemu:qemu in this case. + domain:ownership_test + guestname + #GUESTNAME# + dynamic_ownership + enable|disable + use_nfs + enable|disable + + +__author__ = 'Wayne Sun: g...@redhat.com' +__date__ = 'Mon Jul 25, 2011' +__version__ = '0.1.0' +__credits__ = 'Copyright (C) 2011 Red Hat, Inc.' +__all__ = ['ownership_test'] + +import os +import re +import sys + +QEMU_CONF = /etc/libvirt/qemu.conf +SAVE_FILE = /mnt/test.save +TEMP_FILE = /tmp/test.save + +def append_path(path): +Append root path of package +if path not in sys.path: +sys.path.append(path) + +from lib import connectAPI +from lib import domainAPI +from utils.Python import utils +from exception import LibvirtAPI + +pwd = os.getcwd() +result = re.search('(.*)libvirt-test-API', pwd) +append_path(result.group(0)) + +def return_close(conn, logger, ret): +close hypervisor connection and return the given value +conn.close() +logger.info(closed hypervisor connection) +return ret + +def check_params(params): +Verify inputing parameter dictionary +logger = params['logger'] +keys = ['guestname', 'dynamic_ownership', 'use_nfs'] +for key in keys: +if key not in params: +logger.error(%s is required %key) +return 1 +return 0 + +def check_domain_running(domobj, guestname, logger): + check if the domain exists, may or may not be active +guest_names = domobj.get_list() + +if guestname not in guest_names: +logger.error(%s doesn't exist or not running % guestname) +return 1 +else: +return 0 + +def nfs_setup(util, logger): +setup nfs on localhost + +logger.info(set nfs service) +cmd = echo /tmp *\(rw,root_squash\) /etc/exports +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to config nfs export) +return 1 + +logger.info(restart nfs service) +cmd = service nfs restart +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to restart nfs service) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +return 0 + +def chown_file(util, filepath, logger): +touch a file and setting the chown + +if os.path.exists(filepath): +os.remove(filepath) + +touch_cmd = touch %s % filepath +logger.info(touch_cmd) +ret, out = util.exec_cmd(touch_cmd, shell=True) +if ret: +logger.error(failed to touch a new file) +logger.error(out[0]) +return 1 + +logger.info(set chown of %s as 107:107 % filepath) +chown_cmd = chown 107:107 %s % filepath +ret, out = util.exec_cmd(chown_cmd, shell=True) +if ret: +logger.error(failed to set the ownership of %s % filepath) +return 1 + +logger.info(set %s mode as 664 % filepath) +cmd = chmod 664 %s % filepath +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to set the mode of %s % filepath) +return 1 + +return 0 + +def prepare_env(util, dynamic_ownership, use_nfs, logger): +configure dynamic_ownership in /etc/libvirt/qemu.conf, + set chown of the file to save + +if dynamic_ownership == 'enable': +d_ownership = 1 +elif dynamic_ownership == 'disable': +d_ownership = 0 +else: +logger.error(wrong dynamic_ownership value) +return 1 + +logger.info(set the dynamic ownership in %s as %s % \ +(QEMU_CONF, d_ownership)) +set_cmd = echo dynamic_ownership = %s %s % \ + (d_ownership, QEMU_CONF) +ret, out = util.exec_cmd(set_cmd, shell=True) +if ret: +logger.error(failed to set dynamic ownership) +return 1 + +logger.info(restart libvirtd) +restart_cmd = service libvirtd restart +ret, out = util.exec_cmd(restart_cmd, shell=True) +if ret: +logger.error(failed to restart libvirtd) +return 1 +else: +for i in range(len(out)): +logger.info(out[i
[libvirt] [test-API][PATCH v2] Add test case for start domain on nfs storage
* This test is for start a domain with img file on nfs storage. Under SElinux boolean virt_use_nfs on or off, combine with setting the dynamic_ownership in /etc/libvirt/qemu.conf, check whether the guest can be started or not. The nfs could be root_squash or no_root_squash. SElinux should be enabled and enforcing on host. --- repos/sVirt/domain_nfs_start.py | 485 +++ 1 files changed, 485 insertions(+), 0 deletions(-) create mode 100644 repos/sVirt/__init__.py create mode 100644 repos/sVirt/domain_nfs_start.py diff --git a/repos/sVirt/__init__.py b/repos/sVirt/__init__.py new file mode 100644 index 000..e69de29 diff --git a/repos/sVirt/domain_nfs_start.py b/repos/sVirt/domain_nfs_start.py new file mode 100644 index 000..e4bc867 --- /dev/null +++ b/repos/sVirt/domain_nfs_start.py @@ -0,0 +1,485 @@ +#!/usr/bin/env python +This test is for start a guest with img file on nfs storage. + Under SElinux boolean virt_use_nfs on or off, combine with + setting the dynamic_ownership in /etc/libvirt/qemu.conf, + check whether the guest can be started or not. The nfs could + be root_squash or no_root_squash. SElinux should be enabled + and enforcing on host. + sVirt:domain_nfs_start + guestname + #GUESTNAME# + dynamic_ownership + enable|disable + virt_use_nfs + on|off + root_squash + yes|no + + +__author__ = 'Wayne Sun: g...@redhat.com' +__date__ = 'Mon Sep 2, 2011' +__version__ = '0.1.0' +__credits__ = 'Copyright (C) 2011 Red Hat, Inc.' +__all__ = ['domain_nfs_start'] + +import os +import re +import sys + +QEMU_CONF = /etc/libvirt/qemu.conf + +def append_path(path): +Append root path of package +if path not in sys.path: +sys.path.append(path) + +from lib import connectAPI +from lib import domainAPI +from utils.Python import utils +from exception import LibvirtAPI +from shutil import copy + +pwd = os.getcwd() +result = re.search('(.*)libvirt-test-API', pwd) +append_path(result.group(0)) + +def return_close(conn, logger, ret): +close hypervisor connection and return the given value +conn.close() +logger.info(closed hypervisor connection) +return ret + +def check_params(params): +Verify inputing parameter dictionary +logger = params['logger'] +keys = ['guestname', 'dynamic_ownership', 'virt_use_nfs', 'root_squash'] +for key in keys: +if key not in params: +logger.error(%s is required %key) +return 1 +return 0 + +def nfs_setup(util, root_squash, logger): +setup nfs on localhost + +logger.info(set nfs service) +if root_squash == yes: +option = root_squash +elif root_squash == no: +option = no_root_squash +else: +logger.error(wrong root_squash value) +return 1 + +cmd = echo /tmp *\(rw,%s\) /etc/exports % option +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to config nfs export) +return 1 + +logger.info(restart nfs service) +cmd = service nfs restart +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to restart nfs service) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +return 0 + +def prepare_env(util, d_ownership, virt_use_nfs, guestname, root_squash, \ +disk_file, img_dir, logger): +set virt_use_nfs SElinux boolean, configure + dynamic_ownership in /etc/libvirt/qemu.conf + +logger.info(set virt_use_nfs selinux boolean) +cmd = setsebool virt_use_nfs %s % virt_use_nfs +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to set virt_use_nfs SElinux boolean) +return 1 + +logger.info(set the dynamic ownership in %s as %s % \ +(QEMU_CONF, d_ownership)) +if d_ownership == enable: +option = 1 +elif d_ownership == disable: +option = 0 +else: +logger.error(wrong dynamic_ownership value) +return 1 + +set_cmd = echo dynamic_ownership = %s %s % \ + (option, QEMU_CONF) +ret, out = util.exec_cmd(set_cmd, shell=True) +if ret: +logger.error(failed to set dynamic ownership) +return 1 + +logger.info(restart libvirtd) +restart_cmd = service libvirtd restart +ret, out = util.exec_cmd(restart_cmd, shell=True) +if ret: +logger.error(failed to restart libvirtd) +for i in range(len(out)): +logger.info(out[i]) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +file_name = os.path.basename(disk_file) +filepath = /tmp/%s % file_name +if os.path.exists(filepath): +os.remove(filepath) + +logger.info(copy %s img file to nfs path % guestname) +copy(disk_file, /tmp) + +logger.info(set up nfs service on localhost
[libvirt] [test-API][PATCH] Add test case for start domain on nfs storage
* This test is for start a domain with img file on nfs storage. Under SElinux boolean virt_use_nfs on or off, combine with setting the dynamic_ownership in /etc/libvirt/qemu.conf, check whether the guest can be started or not. The nfs could be root_squash or no_root_squash. SElinux should be enabled and enforcing on host. --- repos/sVirt/domain_nfs_start.py | 476 +++ 1 files changed, 476 insertions(+), 0 deletions(-) create mode 100644 repos/sVirt/__init__.py create mode 100644 repos/sVirt/domain_nfs_start.py diff --git a/repos/sVirt/__init__.py b/repos/sVirt/__init__.py new file mode 100644 index 000..e69de29 diff --git a/repos/sVirt/domain_nfs_start.py b/repos/sVirt/domain_nfs_start.py new file mode 100644 index 000..0b66128 --- /dev/null +++ b/repos/sVirt/domain_nfs_start.py @@ -0,0 +1,476 @@ +#!/usr/bin/env python +This test is for start a guest with img file on nfs storage. + Under SElinux boolean virt_use_nfs on or off, combine with + setting the dynamic_ownership in /etc/libvirt/qemu.conf, + check whether the guest can be started or not. The nfs could + be root_squash or no_root_squash. SElinux should be enabled + and enforcing on host. + sVirt:domain_nfs_start + guestname + #GUESTNAME# + dynamic_ownership + enable|disable + virt_use_nfs + on|off + root_squash + yes|no + + +__author__ = 'Wayne Sun: g...@redhat.com' +__date__ = 'Mon Sep 2, 2011' +__version__ = '0.1.0' +__credits__ = 'Copyright (C) 2011 Red Hat, Inc.' +__all__ = ['domain_nfs_start'] + +import os +import re +import sys + +QEMU_CONF = /etc/libvirt/qemu.conf + +def append_path(path): +Append root path of package +if path in sys.path: +pass +else: +sys.path.append(path) + +from lib import connectAPI +from lib import domainAPI +from utils.Python import utils +from exception import LibvirtAPI + +pwd = os.getcwd() +result = re.search('(.*)libvirt-test-API', pwd) +append_path(result.group(0)) + +def return_close(conn, logger, ret): +close hypervisor connection and return the given value +conn.close() +logger.info(closed hypervisor connection) +return ret + +def check_params(params): +Verify inputing parameter dictionary +logger = params['logger'] +keys = ['guestname', 'dynamic_ownership', 'virt_use_nfs', 'root_squash'] +for key in keys: +if key not in params: +logger.error(%s is required %key) +return 1 +return 0 + +def nfs_setup(util, root_squash, logger): +setup nfs on localhost + +logger.info(set nfs service) +if root_squash == yes: +option = root_squash +elif root_squash == no: +option = no_root_squash + +cmd = echo /tmp *\(rw,%s\) /etc/exports % option +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to config nfs export) +return 1 + +logger.info(restart nfs service) +cmd = service nfs restart +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to restart nfs service) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +return 0 + +def prepare_env(util, d_ownership, virt_use_nfs, guestname, root_squash, \ +disk_file, img_dir, logger): +set virt_use_nfs SElinux boolean, configure + dynamic_ownership in /etc/libvirt/qemu.conf + +logger.info(set virt_use_nfs selinux boolean) +cmd = setsebool virt_use_nfs %s % virt_use_nfs +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to set virt_use_nfs SElinux boolean) +return 1 + +logger.info(set the dynamic ownership in %s as %s % \ +(QEMU_CONF, d_ownership)) +if d_ownership == enable: +option = 1 +elif d_ownership == disable: +option = 0 +set_cmd = echo dynamic_ownership = %s %s % \ + (option, QEMU_CONF) +ret, out = util.exec_cmd(set_cmd, shell=True) +if ret: +logger.error(failed to set dynamic ownership) +return 1 + +logger.info(restart libvirtd) +restart_cmd = service libvirtd restart +ret, out = util.exec_cmd(restart_cmd, shell=True) +if ret: +logger.error(failed to restart libvirtd) +for i in range(len(out)): +logger.info(out[i]) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +logger.info(copy %s img file to nfs path %guestname) +cmd = cp %s /tmp % disk_file +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to cp %s img file to nfs path % guestname) +return 1 + +logger.info(set up nfs service on localhost) +ret = nfs_setup(util, root_squash, logger) +if ret: +return 1 + +logger.info(mount nfs to img dir path) +mount_cmd = mount -o
[libvirt] [test-API][PATCH] Add libvirtd restart test case
* libvirtd restart should not affect the running domains. This test check the libvirtd status before and after libvirtd restart, and also by checking the domain pid to confirm the domain is not affected. --- repos/libvirtd/restart.py | 143 + 1 files changed, 143 insertions(+), 0 deletions(-) create mode 100644 repos/libvirtd/restart.py diff --git a/repos/libvirtd/restart.py b/repos/libvirtd/restart.py new file mode 100644 index 000..15dd43c --- /dev/null +++ b/repos/libvirtd/restart.py @@ -0,0 +1,143 @@ +#!/usr/bin/evn python + Restart libvirtd testing. A running guest is required in this test. +During libvirtd restart, the guest remains running and not affected +by libvirtd restart. +libvirtd:restart +guestname +#GUESTNAME# + + +__author__ = 'Wayne Sun: g...@redhat.com' +__date__ = 'Thu Aug 4, 2011' +__version__ = '0.1.0' +__credits__ = 'Copyright (C) 2011 Red Hat, Inc.' +__all__ = ['restart'] + +import os +import re +import sys +import time + +def append_path(path): +Append root path of package +if path not in sys.path: +sys.path.append(path) + +pwd = os.getcwd() +result = re.search('(.*)libvirt-test-API', pwd) +append_path(result.group(0)) + +from lib import connectAPI +from lib import domainAPI +from utils.Python import utils + +VIRSH_LIST = virsh list --all +RESTART_CMD = service libvirtd restart + +def check_params(params): +Verify inputing parameter dictionary +logger = params['logger'] +keys = ['guestname'] +for key in keys: +if key not in params: +logger.error(%s is required %key) +return 1 +return 0 + +def libvirtd_check(util, logger): +check libvirtd status + +cmd = service libvirtd status +ret, out = util.exec_cmd(cmd, shell=True) +if ret != 0: +logger.error(failed to get libvirtd status) +return 1 +else: +logger.info(out[0]) + +logger.info(VIRSH_LIST) +ret, out = util.exec_cmd(VIRSH_LIST, shell=True) +if ret != 0: +logger.error(failed to get virsh list result) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +return 0 + +def get_domain_pid(util, logger, guestname): +get the pid of running domain +logger.info(get the pid of running domain %s % guestname) +get_pid_cmd = cat /var/run/libvirt/qemu/%s.pid % guestname +ret, pid = util.exec_cmd(get_pid_cmd, shell=True) +if ret: +logger.error(fail to get the pid of runnings domain %s % \ + guestname) +return 1, +else: +logger.info(the pid of domain %s is %s % \ +(guestname, pid[0])) +return 0, pid[0] + +def restart(params): +restart libvirtd test +# Initiate and check parameters +params_check_result = check_params(params) +if params_check_result: +return 1 + +logger = params['logger'] +guestname = params['guestname'] +util = utils.Utils() +uri = util.get_uri('127.0.0.1') + +conn = connectAPI.ConnectAPI() +virconn = conn.open(uri) +domobj = domainAPI.DomainAPI(virconn) +state = domobj.get_state(guestname) +conn.close() + +if(state == shutoff): +logger.info(guest is shutoff, if u want to run this case, \ + guest must be running) +return 1 + +logger.info(check the libvirtd status:) +result = libvirtd_check(util, logger) +if result: +return 1 + +ret, pid_before = get_domain_pid(util, logger, guestname) +if ret: +return 1 + +logger.info(restart libvirtd service:) +ret, out = util.exec_cmd(RESTART_CMD, shell=True) +if ret != 0: +logger.error(failed to restart libvirtd) +for i in range(len(out)): +logger.error(out[i]) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +logger.info(recheck libvirtd status:) +result = libvirtd_check(util, logger) +if result: +return 1 + +ret, pid_after = get_domain_pid(util, logger, guestname) +if ret: +return 1 + +if pid_before != pid_after: +logger.error(%s pid changed during libvirtd restart % \ + guestname) +return 1 +else: +logger.info(domain pid not change, %s keeps running during \ + libvirtd restart % guestname) + +return 0 -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH] Fix a typo which block windows cdrom install
--- repos/domain/install_windows_cdrom.py |2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/repos/domain/install_windows_cdrom.py b/repos/domain/install_windows_cdrom.py index 9cf9e3b..b8333e2 100644 --- a/repos/domain/install_windows_cdrom.py +++ b/repos/domain/install_windows_cdrom.py @@ -296,7 +296,7 @@ def install_windows_cdrom(params): logger.debug(the uri to connect is %s % uri) if params.has_key('imagepath') and not params.has_key('volumepath'): -imgfullpath = os..path.join(params.get('imagepath'), guestname) +imgfullpath = os.path.join(params.get('imagepath'), guestname) elif not params.has_key('imagepath') and not params.has_key('volumepath'): if hypervisor == 'xen': imgfullpath = os.path.join('/var/lib/xen/images', guestname) -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [test-API][PATCH v3] Add ownership_test.py test case
* Save a domain to a file which chown is qemu:qemu, check the ownership of the file after save and restore operation. With use_nfs enable or not, the saved file could be on local or mounted root_squash nfs dir. --- repos/domain/ownership_test.py | 315 1 files changed, 315 insertions(+), 0 deletions(-) create mode 100644 repos/domain/ownership_test.py diff --git a/repos/domain/ownership_test.py b/repos/domain/ownership_test.py new file mode 100644 index 000..1957428 --- /dev/null +++ b/repos/domain/ownership_test.py @@ -0,0 +1,315 @@ +#!/usr/bin/env python +Setting the dynamic_ownership in /etc/libvirt/qemu.conf, + check the ownership of saved domain file. Test could be on + local or root_squash nfs. The default owner of the saved + domain file is qemu:qemu in this case. + domain:ownership_test + guestname + #GUESTNAME# + dynamic_ownership + 0|1 + use_nfs + enable|disable + + use_nfs is a flag for decide using root_squash nfs or not + + +__author__ = 'Wayne Sun: g...@redhat.com' +__date__ = 'Mon Jul 25, 2011' +__version__ = '0.1.0' +__credits__ = 'Copyright (C) 2011 Red Hat, Inc.' +__all__ = ['ownership_test'] + +import os +import re +import sys +import commands + +QEMU_CONF = /etc/libvirt/qemu.conf +SAVE_FILE = /mnt/test.save +TEMP_FILE = /tmp/test.save + +from utils.Python import utils + +def append_path(path): +Append root path of package +if path in sys.path: +pass +else: +sys.path.append(path) + +from lib import connectAPI +from lib import domainAPI +from utils.Python import utils +from exception import LibvirtAPI + +pwd = os.getcwd() +result = re.search('(.*)libvirt-test-API', pwd) +append_path(result.group(0)) + +def return_close(conn, logger, ret): +conn.close() +logger.info(closed hypervisor connection) +return ret + +def check_params(params): +Verify inputing parameter dictionary +logger = params['logger'] +keys = ['guestname', 'dynamic_ownership', 'use_nfs'] +for key in keys: +if key not in params: +logger.error(%s is required %key) +return 1 +return 0 + +def nfs_setup(util, logger): +setup nfs on localhost + +logger.info(set nfs service) +cmd = echo /tmp *\(rw,root_squash\) /etc/exports +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to config nfs export) +return 1 + +logger.info(start nfs service) +cmd = service nfs start +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to start nfs service) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +return 0 + +def chown_file(util, filepath, logger): +touch a file and setting the chown + +if os.path.exists(filepath): +os.remove(filepath) + +touch_cmd = touch %s % filepath +logger.info(touch_cmd) +ret, out = util.exec_cmd(touch_cmd, shell=True) +if ret: +logger.error(failed to touch a new file) +logger.error(out[0]) +return 1 + +logger.info(set chown of %s as 107:107 % filepath) +chown_cmd = chown 107:107 %s % filepath +ret, out = util.exec_cmd(chown_cmd, shell=True) +if ret: +logger.error(failed to set the ownership of %s % filepath) +return 1 + +logger.info(set %s mode as 664 % filepath) +cmd = chmod 664 %s % filepath +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to set the mode of %s % filepath) +return 1 + +return 0 + +def prepare_env(util, guestname, dynamic_ownership, use_nfs, logger): +configure dynamic_ownership in /etc/libvirt/qemu.conf, + set chown of the file to save + +logger.info(set the dynamic ownership in %s as %s % \ +(QEMU_CONF, dynamic_ownership)) +set_cmd = echo dynamic_ownership = %s %s % (dynamic_ownership, QEMU_CONF) +ret, out = util.exec_cmd(set_cmd, shell=True) +if ret: +logger.error(failed to set dynamic ownership) +return 1 + +logger.info(restart libvirtd) +restart_cmd = service libvirtd restart +ret, out = util.exec_cmd(restart_cmd, shell=True) +if ret: +logger.error(failed to restart libvirtd) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +if use_nfs == 'enable': +filepath = TEMP_FILE +elif use_nfs == 'disable': +filepath = SAVE_FILE + +ret = chown_file(util, filepath, logger) +if ret: +return 1 + +if use_nfs == 'enable': +ret = nfs_setup(util, logger) +if ret: +return 1 + +cmd = setsebool virt_use_nfs 1 +logger.info(cmd) +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(Failed to setsebool virt_use_nfs) +return 1
[libvirt] [test-API][PATCH v2] Add ownership_test.py test case
* Save a domain to a file, check the ownership of the file after save and restore operation --- repos/domain/ownership_test.py | 302 1 files changed, 302 insertions(+), 0 deletions(-) create mode 100644 repos/domain/ownership_test.py diff --git a/repos/domain/ownership_test.py b/repos/domain/ownership_test.py new file mode 100644 index 000..cba1424 --- /dev/null +++ b/repos/domain/ownership_test.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python +Setting the dynamic_ownership in /etc/libvirt/qemu.conf, + check the ownership of saved domain file. Test could be on + local or root_squash nfs. + domain:ownership_test + guestname + #GUESTNAME# + dynamic_ownership + 0|1 + use_nfs + 0|1 + + use_nfs is a flag for decide using nfs or not + + +__author__ = 'Wayne Sun: g...@redhat.com' +__date__ = 'Mon Jul 25, 2011' +__version__ = '0.1.0' +__credits__ = 'Copyright (C) 2011 Red Hat, Inc.' +__all__ = ['ownership_test'] + +import os +import re +import sys +import commands + +QEMU_CONF = /etc/libvirt/qemu.conf +SAVE_FILE = /mnt/test.save +TEMP_FILE = /tmp/test.save + +from utils.Python import utils + +def append_path(path): +Append root path of package +if path in sys.path: +pass +else: +sys.path.append(path) + +from lib import connectAPI +from lib import domainAPI +from utils.Python import utils +from exception import LibvirtAPI + +pwd = os.getcwd() +result = re.search('(.*)libvirt-test-API', pwd) +append_path(result.group(0)) + +def return_close(conn, logger, ret): +conn.close() +logger.info(closed hypervisor connection) +return ret + +def check_params(params): +Verify inputing parameter dictionary +logger = params['logger'] +keys = ['guestname', 'dynamic_ownership'] +for key in keys: +if key not in params: +logger.error(%s is required %key) +return 1 +return 0 + +def nfs_setup(util, logger): +setup nfs on localhost + +logger.info(set nfs service) +cmd = echo /tmp *\(rw,root_squash\) /etc/exports +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to config nfs export) +return 1 + +logger.info(start nfs service) +cmd = service nfs start +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to start nfs service) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +return 0 + +def chown_file(util, filepath, logger): +touch a file and setting the chown + +if os.path.exists(filepath): +os.remove(filepath) + +touch_cmd = touch %s % filepath +logger.info(touch_cmd) +ret, out = util.exec_cmd(touch_cmd, shell=True) +if ret: +logger.error(failed to touch a new file) +logger.error(out[0]) +return 1 + +logger.info(set chown of %s as 107:107 % filepath) +chown_cmd = chown 107:107 %s % filepath +ret, out = util.exec_cmd(chown_cmd, shell=True) +if ret: +logger.error(failed to set the ownership of %s % filepath) +return 1 + +logger.info(set %s mode as 644 % filepath) +cmd = chmod 644 %s % filepath +ret, out = util.exec_cmd(cmd, shell=True) +if ret: +logger.error(failed to set the mode of %s % filepath) +return 1 + +return 0 + +def prepare_env(util, guestname, dynamic_ownership, use_nfs, logger): +configure dynamic_ownership in /etc/libvirt/qemu.conf, + set chown of the file to save + +logger.info(set the dynamic ownership in %s as %s % \ +(QEMU_CONF, dynamic_ownership)) +set_cmd = echo dynamic_ownership = %s %s % (dynamic_ownership, QEMU_CONF) +ret, out = util.exec_cmd(set_cmd, shell=True) +if ret: +logger.error(failed to set dynamic ownership) + +logger.info(restart libvirtd) +restart_cmd = service libvirtd restart +ret, out = util.exec_cmd(restart_cmd, shell=True) +if ret: +logger.error(failed to restart libvirtd) +return 1 +else: +for i in range(len(out)): +logger.info(out[i]) + +if use_nfs == '1': +ret = nfs_setup(util, logger) +if ret: +return 1 + +logger.info(mount the nfs path to /mnt) +mount_cmd = mount -o vers=3 127.0.0.1:/tmp /mnt +ret, out = util.exec_cmd(mount_cmd, shell=True) +if ret: +logger.error(Failed to mount the nfs path) +for i in range(len(out)): +logger.info(out[i]) + +filepath = TEMP_FILE +else: +filepath = SAVE_FILE + +ret = chown_file(util, filepath, logger) +if ret: +return 1 + +return 0 + +def ownership_get(logger): +check the ownership of file + +statinfo = os.stat(SAVE_FILE) +uid = statinfo.st_uid +gid = statinfo.st_gid + +logger.info(the uid and gid of %s is %s
[libvirt] [test-API][PATCH] Fix the missing ret variable problem
--- repos/remoteAccess/tls_setup.py |2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/repos/remoteAccess/tls_setup.py b/repos/remoteAccess/tls_setup.py index 80d6b42..4e7f24e 100644 --- a/repos/remoteAccess/tls_setup.py +++ b/repos/remoteAccess/tls_setup.py @@ -343,6 +343,7 @@ def request_credentials(credentials, user_data): def hypervisor_connecting_test(uri, auth_tls, username, password, logger, expected_result): connect remote server +ret = 0 try: conn = connectAPI.ConnectAPI() if auth_tls == 'none': @@ -355,6 +356,7 @@ def hypervisor_connecting_test(uri, auth_tls, username, except LibvirtAPI, e: logger.error(API error message: %s, error code is %s % \ (e.response()['message'], e.response()['code'])) +ret = 1 conn.close() -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [libvirt-test-API][PATCH] fix the problem of return None value of self.conn
--- lib/connectAPI.py |6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/connectAPI.py b/lib/connectAPI.py index 702a088..cfa4fea 100644 --- a/lib/connectAPI.py +++ b/lib/connectAPI.py @@ -44,7 +44,7 @@ class ConnectAPI(object): def open(self, uri): try: -conn = libvirt.open(uri) +self.conn = libvirt.open(uri) return self.conn except libvirt.libvirtError, e: message = e.get_error_message() @@ -53,7 +53,7 @@ class ConnectAPI(object): def open_read_only(self, uri): try: -conn = libvirt.openReadOnly(uri) +self.conn = libvirt.openReadOnly(uri) return self.conn except libvirt.libvirtError, e: message = e.get_error_message() @@ -62,7 +62,7 @@ class ConnectAPI(object): def openAuth(self, uri, auth, flags = 0): try: -conn = libvirt.openAuth(uri, auth, flags) +self.conn = libvirt.openAuth(uri, auth, flags) return self.conn except libvirt.libvirtError, e: message = e.get_error_message() -- 1.7.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [libvirt-test-API][PATCH 2/2] Add clean part for install_windows_cdrom
* Add cleanup function install_windows_cdrom_clean --- repos/domain/install_windows_cdrom.py | 88 +++- 1 files changed, 74 insertions(+), 14 deletions(-) diff --git a/repos/domain/install_windows_cdrom.py b/repos/domain/install_windows_cdrom.py index f1441df..2ea0ee7 100644 --- a/repos/domain/install_windows_cdrom.py +++ b/repos/domain/install_windows_cdrom.py @@ -51,6 +51,15 @@ __version__ = 0.1.0 __credits__ = Copyright (C) 2010 Red Hat, Inc. __all__ = ['install_windows_cdrom', 'usage'] +VIRSH_QUIET_LIST = virsh --quiet list --all|awk '{print $2}'|grep \^%s$\ +VM_STAT = virsh --quiet list --all| grep \\\b%s\\b\|grep off +VM_DESTROY = virsh destroy %s +VM_UNDEFINE = virsh undefine %s + +FLOOPY_IMG = /tmp/floppy.img +ISO_MOUNT_POINT = /mnt/libvirt_windows + + def usage(): print '''usage: mandatory arguments:guesttype guestname @@ -132,19 +141,19 @@ def prepare_iso(iso_file, mount_point): return 0, iso_local_path def prepare_floppy_image(guestname, guestos, guestarch, - windows_unattended_path, cdkey, floppy_img): + windows_unattended_path, cdkey, FLOOPY_IMG): Making corresponding floppy images for the given guestname -if os.path.exists(floppy_img): -os.remove(floppy_img) +if os.path.exists(FLOOPY_IMG): +os.remove(FLOOPY_IMG) -create_cmd = 'dd if=/dev/zero of=%s bs=1440k count=1' % floppy_img +create_cmd = 'dd if=/dev/zero of=%s bs=1440k count=1' % FLOOPY_IMG (status, text) = commands.getstatusoutput(create_cmd) if status: logger.error(failed to create floppy image) return 1 -format_cmd = 'mkfs.msdos -s 1 %s' % floppy_img +format_cmd = 'mkfs.msdos -s 1 %s' % FLOOPY_IMG (status, text) = commands.getstatusoutput(format_cmd) if status: logger.error(failed to format floppy image) @@ -159,7 +168,7 @@ def prepare_floppy_image(guestname, guestos, guestarch, os.makedirs(floppy_mount) try: -mount_cmd = 'mount -o loop %s %s' % (floppy_img, floppy_mount) +mount_cmd = 'mount -o loop %s %s' % (FLOOPY_IMG, floppy_mount) (status, text) = commands.getstatusoutput(mount_cmd) if status: logger.error( @@ -202,7 +211,7 @@ def prepare_floppy_image(guestname, guestos, guestarch, cleanup(floppy_mount) -os.chmod(floppy_img, 0755) +os.chmod(FLOOPY_IMG, 0755) logger.info(Boot floppy created successfuly) return 0 @@ -339,22 +348,18 @@ def install_windows_cdrom(params): logger.info('prepare pre-installation environment...') logger.info('mount windows nfs server to /mnt/libvirt_windows') -iso_mount_point = /mnt/libvirt_windows - -status, iso_local_path = prepare_iso(iso_file, iso_mount_point) +status, iso_local_path = prepare_iso(iso_file, ISO_MOUNT_POINT) if status: logger.error(installation failed) return 1 params['bootcd'] = iso_local_path -floppy_img = /tmp/floppy.img - status = prepare_floppy_image(guestname, guestos, guestarch, - windows_unattended_path, cdkey, floppy_img) + windows_unattended_path, cdkey, FLOOPY_IMG) if status: logger.error(making floppy image failed) return 1 -params['floppysource'] = floppy_img +params['floppysource'] = FLOOPY_IMG xmlobj = xmlbuilder.XmlBuilder() guestxml = xmlobj.build_domain_install_win(params) @@ -458,3 +463,58 @@ def install_windows_cdrom(params): return return_close(conn, logger, 0) +def install_windows_cdrom_clean(params): + clean testing environment +logger = params['logger'] +guestname = params.get('guestname') +guesttype = params.get('guesttype') + +util = utils.Utils() +hypervisor = util.get_hypervisor() +if hypervisor == 'xen': +imgfullpath = os.path.join('/var/lib/xen/images', guestname) +elif hypervisor == 'kvm': +imgfullpath = os.path.join('/var/lib/libvirt/images', guestname) + +(status, output) = commands.getstatusoutput(VIRSH_QUIET_LIST % guestname) +if status: +pass +else: +logger.info(remove guest %s, and its disk image file % guestname) +(status, output) = commands.getstatusoutput(VM_STAT % guestname) +if status: +(status, output) = commands.getstatusoutput(VM_DESTROY % guestname) +if status: +logger.error(failed to destroy guest %s % guestname) +logger.error(%s % output) +else: +(status, output) = commands.getstatusoutput(VM_UNDEFINE % guestname) +if status: +logger.error(failed to undefine guest %s % guestname) +logger.error(%s % output) +else: +(status, output) = commands.getstatusoutput(VM_UNDEFINE % guestname) +
[libvirt] [libvirt-test-API][PATCH 1/2] Add cleap part for install_linux_net
* Add the install_linux_net_clean function --- repos/domain/install_linux_net.py | 58 + 1 files changed, 58 insertions(+), 0 deletions(-) diff --git a/repos/domain/install_linux_net.py b/repos/domain/install_linux_net.py index 924bb05..21ae378 100644 --- a/repos/domain/install_linux_net.py +++ b/repos/domain/install_linux_net.py @@ -51,6 +51,13 @@ from utils.Python import env_parser from utils.Python import xmlbuilder from exception import LibvirtAPI +VIRSH_QUIET_LIST = virsh --quiet list --all|awk '{print $2}'|grep \^%s$\ +VM_STAT = virsh --quiet list --all| grep \\\b%s\\b\|grep off +VM_DESTROY = virsh destroy %s +VM_UNDEFINE = virsh undefine %s + +BOOT_DIR = /var/lib/libvirt/boot/ + def return_close(conn, logger, ret): conn.close() logger.info(closed hypervisor connection) @@ -433,3 +440,54 @@ def install_linux_net(params): return return_close(conn, logger, 1) return return_close(conn, logger, 0) + +def install_linux_net_clean(params): + clean testing environment +logger = params['logger'] +guestname = params.get('guestname') +guesttype = params.get('guesttype') + +util = utils.Utils() +hypervisor = util.get_hypervisor() +if hypervisor == 'xen': +imgfullpath = os.path.join('/var/lib/xen/images', guestname) +elif hypervisor == 'kvm': +imgfullpath = os.path.join('/var/lib/libvirt/images', guestname) + +(status, output) = commands.getstatusoutput(VIRSH_QUIET_LIST % guestname) +if status: +pass +else: +logger.info(remove guest %s, and its disk image file % guestname) +(status, output) = commands.getstatusoutput(VM_STAT % guestname) +if status: +(status, output) = commands.getstatusoutput(VM_DESTROY % guestname) +if status: +logger.error(failed to destroy guest %s % guestname) +logger.error(%s % output) +else: +(status, output) = commands.getstatusoutput(VM_UNDEFINE % guestname) +if status: +logger.error(failed to undefine guest %s % guestname) +logger.error(%s % output) +else: +(status, output) = commands.getstatusoutput(VM_UNDEFINE % guestname) +if status: +logger.error(failed to undefine guest %s % guestname) +logger.error(%s % output) + +if os.path.exists(imgfullpath): +os.remove(imgfullpath) + +if guesttype == 'xenpv' or guesttype == 'kvm': +vmlinuz = os.path.join(BOOT_DIR, 'vmlinuz') +initrd = os.path.join(BOOT_DIR, 'initrd.img') +if os.path.exists(vmlinuz): +os.remove(vmlinuz) +if os.path.exists(initrd): +os.remove(initrd) +elif guesttype == 'xenfv': +guest_dir = os.path.join(homepath, guestname) +if os.path.exists(guest_dir): +shutil.rmtree(guest_dir) + -- 1.7.6 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [libvirt-test-API][PATCH 2/2] Add clean part for install_windows_cdrom
* Add cleanup function install_windows_cdrom_clean --- repos/domain/install_windows_cdrom.py | 88 +++- 1 files changed, 74 insertions(+), 14 deletions(-) diff --git a/repos/domain/install_windows_cdrom.py b/repos/domain/install_windows_cdrom.py index f1441df..2ea0ee7 100644 --- a/repos/domain/install_windows_cdrom.py +++ b/repos/domain/install_windows_cdrom.py @@ -51,6 +51,15 @@ __version__ = 0.1.0 __credits__ = Copyright (C) 2010 Red Hat, Inc. __all__ = ['install_windows_cdrom', 'usage'] +VIRSH_QUIET_LIST = virsh --quiet list --all|awk '{print $2}'|grep \^%s$\ +VM_STAT = virsh --quiet list --all| grep \\\b%s\\b\|grep off +VM_DESTROY = virsh destroy %s +VM_UNDEFINE = virsh undefine %s + +FLOOPY_IMG = /tmp/floppy.img +ISO_MOUNT_POINT = /mnt/libvirt_windows + + def usage(): print '''usage: mandatory arguments:guesttype guestname @@ -132,19 +141,19 @@ def prepare_iso(iso_file, mount_point): return 0, iso_local_path def prepare_floppy_image(guestname, guestos, guestarch, - windows_unattended_path, cdkey, floppy_img): + windows_unattended_path, cdkey, FLOOPY_IMG): Making corresponding floppy images for the given guestname -if os.path.exists(floppy_img): -os.remove(floppy_img) +if os.path.exists(FLOOPY_IMG): +os.remove(FLOOPY_IMG) -create_cmd = 'dd if=/dev/zero of=%s bs=1440k count=1' % floppy_img +create_cmd = 'dd if=/dev/zero of=%s bs=1440k count=1' % FLOOPY_IMG (status, text) = commands.getstatusoutput(create_cmd) if status: logger.error(failed to create floppy image) return 1 -format_cmd = 'mkfs.msdos -s 1 %s' % floppy_img +format_cmd = 'mkfs.msdos -s 1 %s' % FLOOPY_IMG (status, text) = commands.getstatusoutput(format_cmd) if status: logger.error(failed to format floppy image) @@ -159,7 +168,7 @@ def prepare_floppy_image(guestname, guestos, guestarch, os.makedirs(floppy_mount) try: -mount_cmd = 'mount -o loop %s %s' % (floppy_img, floppy_mount) +mount_cmd = 'mount -o loop %s %s' % (FLOOPY_IMG, floppy_mount) (status, text) = commands.getstatusoutput(mount_cmd) if status: logger.error( @@ -202,7 +211,7 @@ def prepare_floppy_image(guestname, guestos, guestarch, cleanup(floppy_mount) -os.chmod(floppy_img, 0755) +os.chmod(FLOOPY_IMG, 0755) logger.info(Boot floppy created successfuly) return 0 @@ -339,22 +348,18 @@ def install_windows_cdrom(params): logger.info('prepare pre-installation environment...') logger.info('mount windows nfs server to /mnt/libvirt_windows') -iso_mount_point = /mnt/libvirt_windows - -status, iso_local_path = prepare_iso(iso_file, iso_mount_point) +status, iso_local_path = prepare_iso(iso_file, ISO_MOUNT_POINT) if status: logger.error(installation failed) return 1 params['bootcd'] = iso_local_path -floppy_img = /tmp/floppy.img - status = prepare_floppy_image(guestname, guestos, guestarch, - windows_unattended_path, cdkey, floppy_img) + windows_unattended_path, cdkey, FLOOPY_IMG) if status: logger.error(making floppy image failed) return 1 -params['floppysource'] = floppy_img +params['floppysource'] = FLOOPY_IMG xmlobj = xmlbuilder.XmlBuilder() guestxml = xmlobj.build_domain_install_win(params) @@ -458,3 +463,58 @@ def install_windows_cdrom(params): return return_close(conn, logger, 0) +def install_windows_cdrom_clean(params): + clean testing environment +logger = params['logger'] +guestname = params.get('guestname') +guesttype = params.get('guesttype') + +util = utils.Utils() +hypervisor = util.get_hypervisor() +if hypervisor == 'xen': +imgfullpath = os.path.join('/var/lib/xen/images', guestname) +elif hypervisor == 'kvm': +imgfullpath = os.path.join('/var/lib/libvirt/images', guestname) + +(status, output) = commands.getstatusoutput(VIRSH_QUIET_LIST % guestname) +if status: +pass +else: +logger.info(remove guest %s, and its disk image file % guestname) +(status, output) = commands.getstatusoutput(VM_STAT % guestname) +if status: +(status, output) = commands.getstatusoutput(VM_DESTROY % guestname) +if status: +logger.error(failed to destroy guest %s % guestname) +logger.error(%s % output) +else: +(status, output) = commands.getstatusoutput(VM_UNDEFINE % guestname) +if status: +logger.error(failed to undefine guest %s % guestname) +logger.error(%s % output) +else: +(status, output) = commands.getstatusoutput(VM_UNDEFINE % guestname) +
[libvirt] [libvirt-test-API][PATCH 1/2] Add cleap part for install_linux_net
* Add the install_linux_net_clean function --- repos/domain/install_linux_net.py | 58 + 1 files changed, 58 insertions(+), 0 deletions(-) diff --git a/repos/domain/install_linux_net.py b/repos/domain/install_linux_net.py index 924bb05..21ae378 100644 --- a/repos/domain/install_linux_net.py +++ b/repos/domain/install_linux_net.py @@ -51,6 +51,13 @@ from utils.Python import env_parser from utils.Python import xmlbuilder from exception import LibvirtAPI +VIRSH_QUIET_LIST = virsh --quiet list --all|awk '{print $2}'|grep \^%s$\ +VM_STAT = virsh --quiet list --all| grep \\\b%s\\b\|grep off +VM_DESTROY = virsh destroy %s +VM_UNDEFINE = virsh undefine %s + +BOOT_DIR = /var/lib/libvirt/boot/ + def return_close(conn, logger, ret): conn.close() logger.info(closed hypervisor connection) @@ -433,3 +440,54 @@ def install_linux_net(params): return return_close(conn, logger, 1) return return_close(conn, logger, 0) + +def install_linux_net_clean(params): + clean testing environment +logger = params['logger'] +guestname = params.get('guestname') +guesttype = params.get('guesttype') + +util = utils.Utils() +hypervisor = util.get_hypervisor() +if hypervisor == 'xen': +imgfullpath = os.path.join('/var/lib/xen/images', guestname) +elif hypervisor == 'kvm': +imgfullpath = os.path.join('/var/lib/libvirt/images', guestname) + +(status, output) = commands.getstatusoutput(VIRSH_QUIET_LIST % guestname) +if status: +pass +else: +logger.info(remove guest %s, and its disk image file % guestname) +(status, output) = commands.getstatusoutput(VM_STAT % guestname) +if status: +(status, output) = commands.getstatusoutput(VM_DESTROY % guestname) +if status: +logger.error(failed to destroy guest %s % guestname) +logger.error(%s % output) +else: +(status, output) = commands.getstatusoutput(VM_UNDEFINE % guestname) +if status: +logger.error(failed to undefine guest %s % guestname) +logger.error(%s % output) +else: +(status, output) = commands.getstatusoutput(VM_UNDEFINE % guestname) +if status: +logger.error(failed to undefine guest %s % guestname) +logger.error(%s % output) + +if os.path.exists(imgfullpath): +os.remove(imgfullpath) + +if guesttype == 'xenpv' or guesttype == 'kvm': +vmlinuz = os.path.join(BOOT_DIR, 'vmlinuz') +initrd = os.path.join(BOOT_DIR, 'initrd.img') +if os.path.exists(vmlinuz): +os.remove(vmlinuz) +if os.path.exists(initrd): +os.remove(initrd) +elif guesttype == 'xenfv': +guest_dir = os.path.join(homepath, guestname) +if os.path.exists(guest_dir): +shutil.rmtree(guest_dir) + -- 1.7.6 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list