[libvirt] [test-API][PATCH] Add volume upload and download cases

2012-12-28 Thread Wayne Sun
* test download storage volumes using storage download API.
* test upload storage volumes using storage upload API.
  For upload case, only raw volume format is supported, other
  format will fail.
  The offset and length value should be chosen from 0 and
  1048576, because upload size is set as 1M.
* both case use blocking stream.
* sample conf is added.

Signed-off-by: Wayne Sun g...@redhat.com
---
 cases/storage_vol_upload_download.conf |  127 ++
 repos/storage/vol_download.py  |  157 +++
 repos/storage/vol_upload.py|  183 
 3 files changed, 467 insertions(+), 0 deletions(-)
 create mode 100644 cases/storage_vol_upload_download.conf
 create mode 100644 repos/storage/vol_download.py
 create mode 100644 repos/storage/vol_upload.py

diff --git a/cases/storage_vol_upload_download.conf 
b/cases/storage_vol_upload_download.conf
new file mode 100644
index 000..b393814
--- /dev/null
+++ b/cases/storage_vol_upload_download.conf
@@ -0,0 +1,127 @@
+storage:create_dir_pool
+poolname
+$defaultpoolname
+
+storage:vol_upload
+poolname
+$defaultpoolname
+volname
+$defaultvolumename
+capacity
+10M
+volformat
+raw
+offset
+0
+length
+0
+clean
+
+storage:vol_upload
+poolname
+$defaultpoolname
+volname
+$defaultvolumename
+capacity
+10M
+volformat
+raw
+offset
+1048576
+length
+0
+clean
+
+storage:vol_upload
+poolname
+$defaultpoolname
+volname
+$defaultvolumename
+capacity
+10M
+volformat
+raw
+offset
+0
+length
+1048576
+clean
+
+storage:vol_upload
+poolname
+$defaultpoolname
+volname
+$defaultvolumename
+capacity
+10M
+volformat
+raw
+offset
+1048576
+length
+1048576
+clean
+
+storage:vol_download
+poolname
+$defaultpoolname
+volname
+$defaultvolumename
+capacity
+50M
+volformat
+raw
+offset
+0
+length
+0
+clean
+
+storage:vol_download
+poolname
+$defaultpoolname
+volname
+$defaultvolumename
+capacity
+50M
+volformat
+qcow2
+offset
+1048576
+length
+0
+clean
+
+storage:vol_download
+poolname
+$defaultpoolname
+volname
+$defaultvolumename
+capacity
+50M
+volformat
+qed
+offset
+0
+length
+1048576
+clean
+
+storage:vol_download
+poolname
+$defaultpoolname
+volname
+$defaultvolumename
+capacity
+50M
+volformat
+raw
+offset
+1048576
+length
+1048576
+clean
+
+storage:destroy_pool
+poolname
+$defaultpoolname
diff --git a/repos/storage/vol_download.py b/repos/storage/vol_download.py
new file mode 100644
index 000..839bc8a
--- /dev/null
+++ b/repos/storage/vol_download.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python
+# storage volume download testing
+
+import os
+import string
+import hashlib
+from xml.dom import minidom
+
+import libvirt
+from libvirt import libvirtError
+
+from src import sharedmod
+from utils import utils
+
+required_params = ('poolname', 'volname', 'volformat', 'capacity', 'offset',
+   'length',)
+optional_params = {'xml' : 'xmls/dir_volume.xml',
+  }
+
+def get_pool_path(poolobj):
+ get pool xml description
+
+poolxml = poolobj.XMLDesc(0)
+
+logger.debug(the xml description of pool is %s % poolxml)
+
+doc = minidom.parseString(poolxml)
+path_element = doc.getElementsByTagName('path')[0]
+textnode = path_element.childNodes[0]
+path_value = textnode.data
+
+return path_value
+
+def write_file(path, capacity):
+write test data to file
+
+logger.info(write %s data into file %s % (capacity, path))
+out = utils.get_capacity_suffix_size(capacity)
+f = open(path, 'w')
+datastr = ''.join(string.lowercase + string.uppercase
+  + string.digits + '.' + '\n')
+repeat = out['capacity_byte'] / 64
+data = ''.join(repeat * datastr)
+f.write(data)
+f.close()
+
+def digest(path, offset, length):
+read data from file with length bytes, begin at offset
+   and return md5 hexdigest
+
+f = open(path, 'r')
+f.seek(offset)
+m = hashlib.md5()
+done = 0
+
+while True:
+want = 1024
+if length and length - done  want:
+want = length - done
+outstr = f.read(want)
+got = len(outstr)
+if got == 0:
+break
+done += got
+m.update(outstr)
+
+logger.debug(total %s bytes data is readed % done)
+
+f.close()
+return m.hexdigest()
+
+def handler(stream, data, file_):
+return file_.write(data)
+
+def 

Re: [libvirt] libvirtd segfault

2012-12-28 Thread Scott Sullivan

In case its useful, here's the 'bt' output from this session as well:

(gdb) bt
#0  qemuDomainObjBeginJobInternal (driver=0x7fffe401d740, 
driver_locked=true, obj=0x7fff80001b00, job=QEMU_JOB_DESTROY, 
asyncJob=QEMU_ASYNC_JOB_NONE) at qemu/qemu_domain.c:768
#1  0x7fffeac2b223 in qemuDomainDestroyFlags (dom=value optimized 
out, flags=value optimized out) at qemu/qemu_driver.c:2052
#2  0x0039f10f97df in virDomainDestroy (domain=0x7fff741b8540) at 
libvirt.c:2201
#3  0x00428e22 in remoteDispatchDomainDestroy (server=value 
optimized out, client=value optimized out, msg=value optimized out, 
rerr=0x7fff8e1fbbe0, args=value optimized out, ret=value optimized 
out) at remote_dispatch.h:1277
#4  remoteDispatchDomainDestroyHelper (server=value optimized out, 
client=value optimized out, msg=value optimized out, 
rerr=0x7fff8e1fbbe0, args=value optimized out, ret=value optimized 
out) at remote_dispatch.h:1255
#5  0x0039f1146152 in virNetServerProgramDispatchCall 
(prog=0x6884a0, server=0x67fe60, client=0x68eb50, msg=0x6930b0) at 
rpc/virnetserverprogram.c:431
#6  virNetServerProgramDispatch (prog=0x6884a0, server=0x67fe60, 
client=0x68eb50, msg=0x6930b0) at rpc/virnetserverprogram.c:304
#7  0x0039f1143fee in virNetServerProcessMsg (srv=value optimized 
out, client=0x68eb50, prog=value optimized out, msg=0x6930b0) at 
rpc/virnetserver.c:171
#8  0x0039f1144a8b in virNetServerHandleJob (jobOpaque=value 
optimized out, opaque=value optimized out) at rpc/virnetserver.c:192
#9  0x0039f10643ec in virThreadPoolWorker (opaque=value optimized 
out) at util/threadpool.c:144
#10 0x0039f1063cd9 in virThreadHelper (data=value optimized out) 
at util/threads-pthread.c:161

#11 0x00300a2077f1 in start_thread () from /lib64/libpthread.so.0
#12 0x003009ae570d in clone () from /lib64/libc.so.6
(gdb)


On 12/27/2012 01:15 PM, Scott Sullivan wrote:

### Libvirt version: ###

libvirt tagged v1.0.0 from git , with 
f0e72b2f5c675f927d04545dc5095f9e5998f171 applied


### Problem: ###

Libvirtd segfaults

### Steps to reproduce: ###

This is difficult to reproduce; but appears to happen only when doing 
'virsh destroys', and when they are ran in concurrent intervals, over 
extended periods of time.


Here are the steps I have taken to reproduce this problem 
(debian_6.0_amd64_kvm.img.gz is just a zipped up squeeze stock ISO 
install):


First, copy this script to ease the steps (assumed to be at 
/root/libvirt_crash.pl later):



#!/usr/bin/env perl

use strict;
use warnings;

use Getopt::Long qw(:config no_ignore_case);
use String::Random;
use UUID::Random;

my ($lvms,$sleep,$restarts_per_lvm);
GetOptions(
'lvms=i' = \$lvms,
'restarts_per_lvm=i' = \$restarts_per_lvm,
'sleep=i' = \$sleep,
);


die USAGE:\n$0 [--lvms int | --restarts_per_lvm int | --sleep int 
(optional) ]\n unless ( $lvms  $restarts_per_lvm );


sub _create_lvm {
my $opts = shift;
print \tlvcreate -n $opts-{lvm} -L 5G /dev/LVM\n;
system(lvcreate -n $opts-{lvm} -L 5G /dev/LVM);
}

sub _kpartx_lvm {
my $opts = shift;
die _kpartx_lvm needs modifier!\n unless ( $opts-{modifier} );
print \tkpartx -p p -$opts-{modifier} -v 
/dev/LVM/$opts-{lvm}\n;
system(kpartx -p p -$opts-{modifier} -v 
/dev/LVM/$opts-{lvm});

}

sub _remove_lvm {
my $opts = shift;
print \tlvremove -f /dev/LVM/$opts-{lvm}\n;
system(lvremove -f /dev/LVM/$opts-{lvm});
unlink(/xen/configs/$opts-{lvm}.cfg);
}

sub _display_lvm {
my $opts = shift;
print \tparted -s /dev/LVM/$opts-{lvm} unit gb p\n;
system(parted -s /dev/LVM/$opts-{lvm} unit gb p);
}

sub _dd_template {
my $opts = shift;
print \tdd if=/root/ssullivan/debian_6.0_amd64_kvm.img.gz 
bs=1M | gunzip -c | dd of=/dev/LVM/$opts-{lvm} bs=1M\n;
system(dd if=/root/ssullivan/debian_6.0_amd64_kvm.img.gz 
bs=1M | gunzip -c | dd of=/dev/LVM/$opts-{lvm} bs=1M);

}

sub _e2fsck_lvm {
my $opts = shift;
print \te2fsck -y -f /dev/mapper/LVM-$opts-{lvm}p1\n;
system(e2fsck -y -f /dev/mapper/LVM-$opts-{lvm}p1);
print \te2fsck -y -f /dev/mapper/LVM-$opts-{lvm}p3\n;
system(e2fsck -y -f /dev/mapper/LVM-$opts-{lvm}p3)
}

sub _place_virtconf {
my $opts = shift;

my $uuid = UUID::Random::generate;
my $virt_conf = END;
domain type=kvm
name$opts-{lvm}/name
uuid$uuid/uuid
memory450560/memory
currentMemory450560/currentMemory
vcpu2/vcpu
cpu
topology sockets=2 cores=4 threads=1/
/cpu
os
type arch=x86_64 machine=pc-1.1hvm/type
boot dev=hd/
/os
features
acpi/
apic/
pae/
/features
clock offset=localtime/
on_poweroffdestroy/on_poweroff
on_rebootrestart/on_reboot
on_crashrestart/on_crash
devices
emulator/opt/libexec/qemu-kvm-wrapper/emulator
disk type=block device=disk
driver name=qemu cache=none/
source dev=/dev/LVM/$opts-{lvm}/
target dev=vda bus=virtio/
/disk


serial type=pty
target port=1/
/serial
console type=pty

[libvirt] [PATCH] python: Adapt to virevent rename

2012-12-28 Thread Michal Privoznik
With our recent renames under src/util/* we forgot to adapt
python wrapper code generator. This results in some methods being
not exposed:

$ python examples/domain-events/events-python/event-test.py
Using uri:qemu:///system
Traceback (most recent call last):
  File examples/domain-events/events-python/event-test.py, line 585, in 
module
main()
  File examples/domain-events/events-python/event-test.py, line 543, in main
virEventLoopPureStart()
  File examples/domain-events/events-python/event-test.py, line 416, in 
virEventLoopPureStart
virEventLoopPureRegister()
  File examples/domain-events/events-python/event-test.py, line 397, in 
virEventLoopPureRegister
libvirt.virEventRegisterImpl(virEventAddHandleImpl,
AttributeError: 'module' object has no attribute 'virEventRegisterImpl'
---

Pushed under trivial rule.

 python/generator.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/python/generator.py b/python/generator.py
index e9b9270..bae4edc 100755
--- a/python/generator.py
+++ b/python/generator.py
@@ -132,7 +132,7 @@ class docParser(xml.sax.handler.ContentHandler):
 if tag == 'function':
 if self.function != None:
 if (self.function_module == libvirt or
-self.function_module == event or
+self.function_module == virevent or
 self.function_module == virterror):
 function(self.function, self.function_descr,
  self.function_return, self.function_args,
-- 
1.8.0.2

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] libvirtd segfault

2012-12-28 Thread Michal Privoznik
On 28.12.2012 14:24, Scott Sullivan wrote:
 In case its useful, here's the 'bt' output from this session as well:
 
 (gdb) bt
 #0  qemuDomainObjBeginJobInternal (driver=0x7fffe401d740, driver_locked=true, 
 obj=0x7fff80001b00, job=QEMU_JOB_DESTROY, asyncJob=QEMU_ASYNC_JOB_NONE) at 
 qemu/qemu_domain.c:768
 #1  0x7fffeac2b223 in qemuDomainDestroyFlags (dom=value optimized out, 
 flags=value optimized out) at qemu/qemu_driver.c:2052
 #2  0x0039f10f97df in virDomainDestroy (domain=0x7fff741b8540) at 
 libvirt.c:2201
 #3  0x00428e22 in remoteDispatchDomainDestroy (server=value 
 optimized out, client=value optimized out, msg=value optimized out, 
 rerr=0x7fff8e1fbbe0, args=value optimized out, ret=value optimized  out) 
 at remote_dispatch.h:1277
 #4  remoteDispatchDomainDestroyHelper (server=value optimized out, 
 client=value optimized out, msg=value optimized out, rerr=0x7fff8e1fbbe0, 
 args=value optimized out, ret=value optimized out) at 
 remote_dispatch.h:1255
 #5  0x0039f1146152 in virNetServerProgramDispatchCall (prog=0x6884a0, 
 server=0x67fe60, client=0x68eb50, msg=0x6930b0) at 
 rpc/virnetserverprogram.c:431
 #6  virNetServerProgramDispatch (prog=0x6884a0, server=0x67fe60, 
 client=0x68eb50, msg=0x6930b0) at rpc/virnetserverprogram.c:304
 #7  0x0039f1143fee in virNetServerProcessMsg (srv=value optimized out, 
 client=0x68eb50, prog=value optimized out, msg=0x6930b0) at 
 rpc/virnetserver.c:171
 #8  0x0039f1144a8b in virNetServerHandleJob (jobOpaque=value optimized 
 out, opaque=value optimized out) at rpc/virnetserver.c:192
 #9  0x0039f10643ec in virThreadPoolWorker (opaque=value optimized out) 
 at util/threadpool.c:144
 #10 0x0039f1063cd9 in virThreadHelper (data=value optimized out) at 
 util/threads-pthread.c:161
 #11 0x00300a2077f1 in start_thread () from /lib64/libpthread.so.0
 #12 0x003009ae570d in clone () from /lib64/libc.so.6
 (gdb)

I think this patch should fix your problem:

https://www.redhat.com/archives/libvir-list/2012-December/msg00935.html

If so, can you please test and confirm that since you already have everything 
set up.

Michal

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] sanlock: Chown lease files as well

2012-12-28 Thread Michal Privoznik
Since sanlock doesn't run under root:root, we have chown()'ed the
__LIBVIRT__DISKS__ lease file to the user:group defined in the
sanlock config. However, when writing the patch I've forgot about
lease files for each disk (this is the
/var/lib/libvirt/sanlock/md5) file.
---
 src/locking/lock_driver_sanlock.c | 11 +++
 1 file changed, 11 insertions(+)

diff --git a/src/locking/lock_driver_sanlock.c 
b/src/locking/lock_driver_sanlock.c
index 75ced84..c955003 100644
--- a/src/locking/lock_driver_sanlock.c
+++ b/src/locking/lock_driver_sanlock.c
@@ -679,6 +679,17 @@ static int virLockManagerSanlockCreateLease(struct 
sanlk_resource *res)
 }
 VIR_DEBUG(Someone else just created lockspace %s, 
res-disks[0].path);
 } else {
+/* chown() the path to make sure sanlock can access it */
+if ((driver-user != -1 || driver-group != -1) 
+(fchown(fd, driver-user, driver-group)  0)) {
+virReportSystemError(errno,
+ _(cannot chown '%s' to (%u, %u)),
+ res-disks[0].path,
+ (unsigned int) driver-user,
+ (unsigned int) driver-group);
+goto error_unlink;
+}
+
 if ((rv = sanlock_align(res-disks[0]))  0) {
 if (rv = -200)
 virReportError(VIR_ERR_INTERNAL_ERROR,
-- 
1.8.0.2

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] virsh net-create explanation

2012-12-28 Thread Michal Privoznik
On 26.12.2012 16:36, Bilal Ahmad wrote:
 Hi all, 
 
 I am new to libvirt and started looking at the source code. While
 tracing back the virsh command net-create, I got stuck into a loop and
 I would really like someone to explain how this works. 
 
 In the virsh-network.c, from:
 
 network = virNetworkCreateXML(ctl-conn, buffer); 
 
 I traced back to: 
 
 if (conn-networkDriver  conn-networkDriver-networkCreateXML) {
 virNetworkPtr ret;
 ret = conn-networkDriver-networkCreateXML(conn, xmlDesc);

Some hypervisors manage networks on their own (e.g. VBox) while others
rely on our bridge driver. Since we've switched to C99 struct initialization,
you can simply grep for networkCreateXML:

$ git grep networkCreateXML

and you'll see which functions implements the functionality:

[...]
src/network/bridge_driver.c:.networkCreateXML = networkCreate, /* 0.2.0 */
src/remote/remote_driver.c:.networkCreateXML = remoteNetworkCreateXML, /* 
0.3.0 */
src/test/test_driver.c:.networkCreateXML = testNetworkCreate, /* 0.3.2 */
src/vbox/vbox_tmpl.c:.networkCreateXML   = vboxNetworkCreateXML, /* 
0.6.4 */

And now you can look deeper into networkCreate(), testNetworkCreate() or 
vboxNetworkCreateXML().
You can repeat the process with other driver methods and drivers as well.

Michal

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] libvirtd segfault

2012-12-28 Thread Scott Sullivan

On 12/28/2012 10:50 AM, Michal Privoznik wrote:

On 28.12.2012 14:24, Scott Sullivan wrote:
   

In case its useful, here's the 'bt' output from this session as well:

(gdb) bt
#0  qemuDomainObjBeginJobInternal (driver=0x7fffe401d740, driver_locked=true, 
obj=0x7fff80001b00, job=QEMU_JOB_DESTROY, asyncJob=QEMU_ASYNC_JOB_NONE) at 
qemu/qemu_domain.c:768
#1  0x7fffeac2b223 in qemuDomainDestroyFlags (dom=value optimized out, 
flags=value optimized out) at qemu/qemu_driver.c:2052
#2  0x0039f10f97df in virDomainDestroy (domain=0x7fff741b8540) at 
libvirt.c:2201
#3  0x00428e22 in remoteDispatchDomainDestroy (server=value optimized out, client=value 
optimized out, msg=value optimized out, rerr=0x7fff8e1fbbe0, args=value optimized out, 
ret=value optimized  out) at remote_dispatch.h:1277
#4  remoteDispatchDomainDestroyHelper (server=value optimized out, client=value optimized 
out, msg=value optimized out, rerr=0x7fff8e1fbbe0, args=value optimized out, 
ret=value optimized out) at remote_dispatch.h:1255
#5  0x0039f1146152 in virNetServerProgramDispatchCall (prog=0x6884a0, 
server=0x67fe60, client=0x68eb50, msg=0x6930b0) at rpc/virnetserverprogram.c:431
#6  virNetServerProgramDispatch (prog=0x6884a0, server=0x67fe60, 
client=0x68eb50, msg=0x6930b0) at rpc/virnetserverprogram.c:304
#7  0x0039f1143fee in virNetServerProcessMsg (srv=value optimized out, 
client=0x68eb50, prog=value optimized out, msg=0x6930b0) at rpc/virnetserver.c:171
#8  0x0039f1144a8b in virNetServerHandleJob (jobOpaque=value optimized out, 
opaque=value optimized out) at rpc/virnetserver.c:192
#9  0x0039f10643ec in virThreadPoolWorker (opaque=value optimized out) at 
util/threadpool.c:144
#10 0x0039f1063cd9 in virThreadHelper (data=value optimized out) at 
util/threads-pthread.c:161
#11 0x00300a2077f1 in start_thread () from /lib64/libpthread.so.0
#12 0x003009ae570d in clone () from /lib64/libc.so.6
(gdb)
 

I think this patch should fix your problem:

https://www.redhat.com/archives/libvir-list/2012-December/msg00935.html

If so, can you please test and confirm that since you already have everything 
set up.

Michal
   


I have applied this patch, on top of stable v1.0.1. I am going have my 
test running over the weekend, and will report back if I get another 
SIGSEGV as I saw before.


However so far, I have ran the same test as I noted in my previous 
posting, and after maybe 20 minutes this time I got another halt in my 
GDB session, though this time it is a SIGPIPE. Here is the output:


Program received signal SIGPIPE, Broken pipe.
0x00300a20e48d in write () from /lib64/libpthread.so.0
Missing separate debuginfos, use: debuginfo-install 
audit-libs-2.1.3-3.el6.x86_64 augeas-libs-0.9.0-1.el6.x86_64 
avahi-libs-0.6.25-11.el6.x86_64 cyrus-sasl-gssapi-2.1.23-13.el6.x86_64 
cyrus-sasl-lib-2.1.23-13.el6.x86_64 cyrus-sasl-md5-2.1.23-13.el6.x86_64 
cyrus-sasl-plain-2.1.23-13.el6.x86_64 db4-4.7.25-16.el6.x86_64 
dbus-libs-1.2.24-5.el6_1.x86_64 device-mapper-libs-1.02.66-7storm.x86_64 
glibc-2.12-1.47.el6.x86_64 gnutls-2.8.5-4.el6.x86_64 
keyutils-libs-1.4-3.el6.x86_64 krb5-libs-1.9-22.el6_2.1.x86_64 
libblkid-2.17.2-12.4.el6.x86_64 libcap-ng-0.6.4-3.el6_0.1.x86_64 
libcom_err-1.41.12-11.el6.x86_64 libcurl-7.19.7-26.el6_1.2.x86_64 
libgcrypt-1.4.5-9.el6.x86_64 libgpg-error-1.7-4.el6.x86_64 
libidn-1.18-2.el6.x86_64 libnl-1.1-14.el6.x86_64 
libpcap-1.0.0-6.20091201git117cb5.el6.x86_64 
libpciaccess-0.12.1-1.el6.x86_64 libselinux-2.0.94-5.2.el6.x86_64 
libsepol-2.0.41-4.el6.x86_64 libtasn1-2.3-3.el6.x86_64 
libudev-147-2.40.el6.x86_64 libuuid-2.17.2-12.4.el6.x86_64 
libxml2-2.7.6-4.el6.x86_64 libxslt-1.1.26-2.el6.x86_64 
netcf-libs-0.1.9-2.el6.x86_64 nspr-4.8.8-3.el6.x86_64 
nss-3.12.10-17.el6_2.x86_64 nss-softokn-freebl-3.12.9-11.el6.x86_64 
nss-util-3.12.10-2.el6.x86_64 numactl-2.0.3-9.el6.x86_64 
openldap-2.4.23-20.el6.x86_64 openssl-1.0.0-20.el6.x86_64 
yajl-1.0.7-3.el6.x86_64 zlib-1.2.3-27.el6.x86_64

(gdb) bt
#0  0x00300a20e48d in write () from /lib64/libpthread.so.0
#1  0x77adb47e in virNetSocketWriteWire (sock=0x685af0, 
buf=0x1e90690 , len=36) at rpc/virnetsocket.c:1344
#2  0x77adb67e in virNetSocketWrite (sock=0x685af0, 
buf=0x1e90690 , len=36) at rpc/virnetsocket.c:1490
#3  0x77acb3d6 in virNetServerClientWrite (sock=value optimized 
out, events=11, opaque=0x68a7e0) at rpc/virnetserverclient.c:1095
#4  virNetServerClientDispatchWrite (sock=value optimized out, 
events=11, opaque=0x68a7e0) at rpc/virnetserverclient.c:1116
#5  virNetServerClientDispatchEvent (sock=value optimized out, 
events=11, opaque=0x68a7e0) at rpc/virnetserverclient.c:1227
#6  0x779d6305 in virEventPollDispatchHandles () at 
util/event_poll.c:500

#7  virEventPollRunOnce () at util/event_poll.c:647
#8  0x779d4ecd in virEventRunDefaultImpl () at util/event.c:247
#9  0x77ace275 in virNetServerRun (srv=0x678df0) at 
rpc/virnetserver.c:1121
#10 0x0040c76a in 

Re: [libvirt] libvirtd segfault

2012-12-28 Thread Scott Sullivan

On 12/28/2012 01:31 PM, Scott Sullivan wrote:

On 12/28/2012 10:50 AM, Michal Privoznik wrote:

On 28.12.2012 14:24, Scott Sullivan wrote:

In case its useful, here's the 'bt' output from this session as well:

(gdb) bt
#0  qemuDomainObjBeginJobInternal (driver=0x7fffe401d740, 
driver_locked=true, obj=0x7fff80001b00, job=QEMU_JOB_DESTROY, 
asyncJob=QEMU_ASYNC_JOB_NONE) at qemu/qemu_domain.c:768
#1  0x7fffeac2b223 in qemuDomainDestroyFlags (dom=value 
optimized out, flags=value optimized out) at qemu/qemu_driver.c:2052
#2  0x0039f10f97df in virDomainDestroy (domain=0x7fff741b8540) 
at libvirt.c:2201
#3  0x00428e22 in remoteDispatchDomainDestroy (server=value 
optimized out, client=value optimized out, msg=value optimized 
out, rerr=0x7fff8e1fbbe0, args=value optimized out, ret=value 
optimized  out) at remote_dispatch.h:1277
#4  remoteDispatchDomainDestroyHelper (server=value optimized out, 
client=value optimized out, msg=value optimized out, 
rerr=0x7fff8e1fbbe0, args=value optimized out, ret=value 
optimized out) at remote_dispatch.h:1255
#5  0x0039f1146152 in virNetServerProgramDispatchCall 
(prog=0x6884a0, server=0x67fe60, client=0x68eb50, msg=0x6930b0) at 
rpc/virnetserverprogram.c:431
#6  virNetServerProgramDispatch (prog=0x6884a0, server=0x67fe60, 
client=0x68eb50, msg=0x6930b0) at rpc/virnetserverprogram.c:304
#7  0x0039f1143fee in virNetServerProcessMsg (srv=value 
optimized out, client=0x68eb50, prog=value optimized out, 
msg=0x6930b0) at rpc/virnetserver.c:171
#8  0x0039f1144a8b in virNetServerHandleJob (jobOpaque=value 
optimized out, opaque=value optimized out) at rpc/virnetserver.c:192
#9  0x0039f10643ec in virThreadPoolWorker (opaque=value 
optimized out) at util/threadpool.c:144
#10 0x0039f1063cd9 in virThreadHelper (data=value optimized 
out) at util/threads-pthread.c:161

#11 0x00300a2077f1 in start_thread () from /lib64/libpthread.so.0
#12 0x003009ae570d in clone () from /lib64/libc.so.6
(gdb)

I think this patch should fix your problem:

https://www.redhat.com/archives/libvir-list/2012-December/msg00935.html

If so, can you please test and confirm that since you already have 
everything set up.


Michal


I have applied this patch, on top of stable v1.0.1. I am going have my 
test running over the weekend, and will report back if I get another 
SIGSEGV as I saw before.


However so far, I have ran the same test as I noted in my previous 
posting, and after maybe 20 minutes this time I got another halt in my 
GDB session, though this time it is a SIGPIPE. Here is the output:


Program received signal SIGPIPE, Broken pipe.
0x00300a20e48d in write () from /lib64/libpthread.so.0
Missing separate debuginfos, use: debuginfo-install 
audit-libs-2.1.3-3.el6.x86_64 augeas-libs-0.9.0-1.el6.x86_64 
avahi-libs-0.6.25-11.el6.x86_64 cyrus-sasl-gssapi-2.1.23-13.el6.x86_64 
cyrus-sasl-lib-2.1.23-13.el6.x86_64 
cyrus-sasl-md5-2.1.23-13.el6.x86_64 
cyrus-sasl-plain-2.1.23-13.el6.x86_64 db4-4.7.25-16.el6.x86_64 
dbus-libs-1.2.24-5.el6_1.x86_64 
device-mapper-libs-1.02.66-7storm.x86_64 glibc-2.12-1.47.el6.x86_64 
gnutls-2.8.5-4.el6.x86_64 keyutils-libs-1.4-3.el6.x86_64 
krb5-libs-1.9-22.el6_2.1.x86_64 libblkid-2.17.2-12.4.el6.x86_64 
libcap-ng-0.6.4-3.el6_0.1.x86_64 libcom_err-1.41.12-11.el6.x86_64 
libcurl-7.19.7-26.el6_1.2.x86_64 libgcrypt-1.4.5-9.el6.x86_64 
libgpg-error-1.7-4.el6.x86_64 libidn-1.18-2.el6.x86_64 
libnl-1.1-14.el6.x86_64 libpcap-1.0.0-6.20091201git117cb5.el6.x86_64 
libpciaccess-0.12.1-1.el6.x86_64 libselinux-2.0.94-5.2.el6.x86_64 
libsepol-2.0.41-4.el6.x86_64 libtasn1-2.3-3.el6.x86_64 
libudev-147-2.40.el6.x86_64 libuuid-2.17.2-12.4.el6.x86_64 
libxml2-2.7.6-4.el6.x86_64 libxslt-1.1.26-2.el6.x86_64 
netcf-libs-0.1.9-2.el6.x86_64 nspr-4.8.8-3.el6.x86_64 
nss-3.12.10-17.el6_2.x86_64 nss-softokn-freebl-3.12.9-11.el6.x86_64 
nss-util-3.12.10-2.el6.x86_64 numactl-2.0.3-9.el6.x86_64 
openldap-2.4.23-20.el6.x86_64 openssl-1.0.0-20.el6.x86_64 
yajl-1.0.7-3.el6.x86_64 zlib-1.2.3-27.el6.x86_64

(gdb) bt
#0  0x00300a20e48d in write () from /lib64/libpthread.so.0
#1  0x77adb47e in virNetSocketWriteWire (sock=0x685af0, 
buf=0x1e90690 , len=36) at rpc/virnetsocket.c:1344
#2  0x77adb67e in virNetSocketWrite (sock=0x685af0, 
buf=0x1e90690 , len=36) at rpc/virnetsocket.c:1490
#3  0x77acb3d6 in virNetServerClientWrite (sock=value 
optimized out, events=11, opaque=0x68a7e0) at 
rpc/virnetserverclient.c:1095
#4  virNetServerClientDispatchWrite (sock=value optimized out, 
events=11, opaque=0x68a7e0) at rpc/virnetserverclient.c:1116
#5  virNetServerClientDispatchEvent (sock=value optimized out, 
events=11, opaque=0x68a7e0) at rpc/virnetserverclient.c:1227
#6  0x779d6305 in virEventPollDispatchHandles () at 
util/event_poll.c:500

#7  virEventPollRunOnce () at util/event_poll.c:647
#8  0x779d4ecd in virEventRunDefaultImpl () at util/event.c:247
#9  0x77ace275 in virNetServerRun (srv=0x678df0) at 

[libvirt] [PATCH] ESX: Add AnyType_Serialize routine to esx_vi_types.c

2012-12-28 Thread Ata E Husain Bohra
Add esxVI_AnyType_Serialize routine to allow serialization
of objects containing variables of type AnyType. The routine
attempts to determine the type of the object that covers:
boolean, long, int, string, short, byte.

If variables does not fall under any above mentioned types
then it is added as anyType.
---
 src/esx/esx_vi_types.c |   48 
 src/esx/esx_vi_types.h |3 ++-
 2 files changed, 50 insertions(+), 1 deletion(-)

diff --git a/src/esx/esx_vi_types.c b/src/esx/esx_vi_types.c
index d1f91ff..2076ce4 100644
--- a/src/esx/esx_vi_types.c
+++ b/src/esx/esx_vi_types.c
@@ -1130,6 +1130,54 @@ esxVI_AnyType_Deserialize(xmlNodePtr node, esxVI_AnyType 
**anyType)
 
 
 
+int
+esxVI_AnyType_Serialize(esxVI_AnyType *anyType, const char *element,
+virBufferPtr output)
+{
+if (element == NULL || output == NULL) {
+virReportError(VIR_ERR_INTERNAL_ERROR, %s,
+_(Invalid argument));
+return -1;
+}
+
+if (anyType == NULL || anyType-value == NULL) {
+return 0;
+}
+
+switch (anyType-type) {
+case esxVI_Type_Boolean:
+ESV_VI__XML_TAG__OPEN(output, element, xsd:boolean);
+break;
+case esxVI_Type_String:
+ESV_VI__XML_TAG__OPEN(output, element, xsd:string);
+break;
+case esxVI_Type_Short:
+ESV_VI__XML_TAG__OPEN(output, element, xsd:short);
+break;
+case esxVI_Type_Byte:
+ESV_VI__XML_TAG__OPEN(output, element, xsd:byte);
+break;
+case esxVI_Type_Int:
+ESV_VI__XML_TAG__OPEN(output, element, xsd:int);
+break;
+case esxVI_Type_Long:
+ESV_VI__XML_TAG__OPEN(output, element, xsd:long);
+break;
+case esxVI_Type_Undefined:
+case esxVI_Type_Other:
+default:
+ESV_VI__XML_TAG__OPEN(output, element, xsd:anyType);
+break;
+}
+
+virBufferAdd(output, anyType-value, -1);
+
+ESV_VI__XML_TAG__CLOSE(output, element);
+
+return 0;
+}
+
+
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
  * XSD: String
  */
diff --git a/src/esx/esx_vi_types.h b/src/esx/esx_vi_types.h
index 92dc16f..5150377 100644
--- a/src/esx/esx_vi_types.h
+++ b/src/esx/esx_vi_types.h
@@ -161,7 +161,8 @@ const char *esxVI_AnyType_TypeToString(esxVI_AnyType 
*anyType);
 int esxVI_AnyType_ExpectType(esxVI_AnyType *anyType, esxVI_Type type);
 int esxVI_AnyType_DeepCopy(esxVI_AnyType **dest, esxVI_AnyType *src);
 int esxVI_AnyType_Deserialize(xmlNodePtr node, esxVI_AnyType **anyType);
-
+int esxVI_AnyType_Serialize(esxVI_AnyType *anyType, const char *element,
+virBufferPtr output);
 
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
-- 
1.7.9.5

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list