dony71 opened a new issue #4334: URL: https://github.com/apache/cloudstack/issues/4334
I follow installation at https://rohityadav.cloud/blog/cloudstack-rpi4-kvm/ Everything working except consoleproxy stop and cannot be restart management-server.log shows ``` 2020-09-18 17:50:21,883 WARN [c.c.u.d.Merovingian2] (consoleproxy-1:ctx-add1d9ee) (logid:4ee49b96) Timed out on acquiring lock vm_instance2473 . Waited for 1800seconds com.cloud.utils.exception.CloudRuntimeException: Timed out on acquiring lock vm_instance2473 . Waited for 1800seconds at com.cloud.utils.db.Merovingian2.acquire(Merovingian2.java:151) at com.cloud.utils.db.TransactionLegacy.lock(TransactionLegacy.java:386) at com.cloud.utils.db.GenericDaoBase.lockInLockTable(GenericDaoBase.java:1079) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:566) at org.springframework.aop.support.AopUtils.invokeJoinpointUsingReflection(AopUtils.java:344) at org.springframework.aop.framework.ReflectiveMethodInvocation.invokeJoinpoint(ReflectiveMethodInvocation.java:198) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) at com.cloud.utils.db.TransactionContextInterceptor.invoke(TransactionContextInterceptor.java:34) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:175) at org.springframework.aop.interceptor.ExposeInvocationInterceptor.invoke(ExposeInvocationInterceptor.java:95) at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) at org.springframework.aop.framework.JdkDynamicAopProxy.invoke(JdkDynamicAopProxy.java:212) at com.sun.proxy.$Proxy91.lockInLockTable(Unknown Source) at org.apache.cloudstack.framework.jobs.impl.AsyncJobManagerImpl.submitAsyncJob(AsyncJobManagerImpl.java:229) at com.cloud.vm.VirtualMachineManagerImpl.startVmThroughJobQueue(VirtualMachineManagerImpl.java:4737) at com.cloud.vm.VirtualMachineManagerImpl.advanceStart(VirtualMachineManagerImpl.java:904) at com.cloud.vm.VirtualMachineManagerImpl.advanceStart(VirtualMachineManagerImpl.java:883) at com.cloud.consoleproxy.ConsoleProxyManagerImpl.startProxy(ConsoleProxyManagerImpl.java:544) at com.cloud.consoleproxy.ConsoleProxyManagerImpl.allocCapacity(ConsoleProxyManagerImpl.java:978) at com.cloud.consoleproxy.ConsoleProxyManagerImpl.expandPool(ConsoleProxyManagerImpl.java:1696) at com.cloud.consoleproxy.ConsoleProxyManagerImpl.expandPool(ConsoleProxyManagerImpl.java:159) at com.cloud.vm.SystemVmLoadScanner.loadScan(SystemVmLoadScanner.java:121) at com.cloud.vm.SystemVmLoadScanner$1.reallyRun(SystemVmLoadScanner.java:91) at com.cloud.vm.SystemVmLoadScanner$1.runInContext(SystemVmLoadScanner.java:82) at org.apache.cloudstack.managed.context.ManagedContextRunnable$1.run(ManagedContextRunnable.java:49) at org.apache.cloudstack.managed.context.impl.DefaultManagedContext$1.call(DefaultManagedContext.java:56) at org.apache.cloudstack.managed.context.impl.DefaultManagedContext.callWithContext(DefaultManagedContext.java:103) at org.apache.cloudstack.managed.context.impl.DefaultManagedContext.runWithContext(DefaultManagedContext.java:53) at org.apache.cloudstack.managed.context.ManagedContextRunnable.run(ManagedContextRunnable.java:46) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) at java.base/java.util.concurrent.FutureTask.runAndReset(FutureTask.java:305) at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:305) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:834) 2020-09-18 17:50:21,886 WARN [o.a.c.f.j.i.AsyncJobManagerImpl] (consoleproxy-1:ctx-add1d9ee) (logid:4ee49b96) Unable to schedule async job for command com.cloud.vm.VmWorkStart, unexpected exception. com.cloud.utils.exception.CloudRuntimeException: Failed to acquire lock in submitting async job: com.cloud.vm.VmWorkStart with timeout value = 1800 at org.apache.cloudstack.framework.jobs.impl.AsyncJobManagerImpl.submitAsyncJob(AsyncJobManagerImpl.java:230) at com.cloud.vm.VirtualMachineManagerImpl.startVmThroughJobQueue(VirtualMachineManagerImpl.java:4737) at com.cloud.vm.VirtualMachineManagerImpl.advanceStart(VirtualMachineManagerImpl.java:904) at com.cloud.vm.VirtualMachineManagerImpl.advanceStart(VirtualMachineManagerImpl.java:883) at com.cloud.consoleproxy.ConsoleProxyManagerImpl.startProxy(ConsoleProxyManagerImpl.java:544) at com.cloud.consoleproxy.ConsoleProxyManagerImpl.allocCapacity(ConsoleProxyManagerImpl.java:978) at com.cloud.consoleproxy.ConsoleProxyManagerImpl.expandPool(ConsoleProxyManagerImpl.java:1696) at com.cloud.consoleproxy.ConsoleProxyManagerImpl.expandPool(ConsoleProxyManagerImpl.java:159) at com.cloud.vm.SystemVmLoadScanner.loadScan(SystemVmLoadScanner.java:121) at com.cloud.vm.SystemVmLoadScanner$1.reallyRun(SystemVmLoadScanner.java:91) at com.cloud.vm.SystemVmLoadScanner$1.runInContext(SystemVmLoadScanner.java:82) at org.apache.cloudstack.managed.context.ManagedContextRunnable$1.run(ManagedContextRunnable.java:49) at org.apache.cloudstack.managed.context.impl.DefaultManagedContext$1.call(DefaultManagedContext.java:56) at org.apache.cloudstack.managed.context.impl.DefaultManagedContext.callWithContext(DefaultManagedContext.java:103) at org.apache.cloudstack.managed.context.impl.DefaultManagedContext.runWithContext(DefaultManagedContext.java:53) at org.apache.cloudstack.managed.context.ManagedContextRunnable.run(ManagedContextRunnable.java:46) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) at java.base/java.util.concurrent.FutureTask.runAndReset(FutureTask.java:305) at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:305) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:834) 2020-09-18 17:50:21,888 WARN [c.c.c.ConsoleProxyManagerImpl] (consoleproxy-1:ctx-add1d9ee) (logid:4ee49b96) Runtime Exception while trying to start console proxy com.cloud.utils.exception.CloudRuntimeException: Unable to schedule async job for command com.cloud.vm.VmWorkStart, unexpected exception. at org.apache.cloudstack.framework.jobs.impl.AsyncJobManagerImpl.submitAsyncJob(AsyncJobManagerImpl.java:251) at com.cloud.vm.VirtualMachineManagerImpl.startVmThroughJobQueue(VirtualMachineManagerImpl.java:4737) at com.cloud.vm.VirtualMachineManagerImpl.advanceStart(VirtualMachineManagerImpl.java:904) at com.cloud.vm.VirtualMachineManagerImpl.advanceStart(VirtualMachineManagerImpl.java:883) at com.cloud.consoleproxy.ConsoleProxyManagerImpl.startProxy(ConsoleProxyManagerImpl.java:544) at com.cloud.consoleproxy.ConsoleProxyManagerImpl.allocCapacity(ConsoleProxyManagerImpl.java:978) at com.cloud.consoleproxy.ConsoleProxyManagerImpl.expandPool(ConsoleProxyManagerImpl.java:1696) at com.cloud.consoleproxy.ConsoleProxyManagerImpl.expandPool(ConsoleProxyManagerImpl.java:159) at com.cloud.vm.SystemVmLoadScanner.loadScan(SystemVmLoadScanner.java:121) at com.cloud.vm.SystemVmLoadScanner$1.reallyRun(SystemVmLoadScanner.java:91) at com.cloud.vm.SystemVmLoadScanner$1.runInContext(SystemVmLoadScanner.java:82) at org.apache.cloudstack.managed.context.ManagedContextRunnable$1.run(ManagedContextRunnable.java:49) at org.apache.cloudstack.managed.context.impl.DefaultManagedContext$1.call(DefaultManagedContext.java:56) at org.apache.cloudstack.managed.context.impl.DefaultManagedContext.callWithContext(DefaultManagedContext.java:103) at org.apache.cloudstack.managed.context.impl.DefaultManagedContext.runWithContext(DefaultManagedContext.java:53) at org.apache.cloudstack.managed.context.ManagedContextRunnable.run(ManagedContextRunnable.java:46) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) at java.base/java.util.concurrent.FutureTask.runAndReset(FutureTask.java:305) at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:305) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base/java.lang.Thread.run(Thread.java:834) 2020-09-18 17:50:21,890 INFO [c.c.c.ConsoleProxyManagerImpl] (consoleproxy-1:ctx-add1d9ee) (logid:4ee49b96) Unable to start console proxy vm for standby capacity, vm id : 2473, will recycle it and start a new one ``` v-2473-VM.log shows ``` 2020-09-17 23:10:03.040+0000: starting up libvirt version: 6.0.0, package: 0ubuntu8.3 (Marc Deslauriers <[email protected]> Thu, 30 Jul 2020 06:40:28 -0400), qemu version: 4.2.1Debian 1:4.2-3ubuntu6.5, kernel: 5.4.0-1018-raspi, hostname: cloudstack-mgmt LC_ALL=C \ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin \ HOME=/var/lib/libvirt/qemu/domain-2-v-2473-VM \ XDG_DATA_HOME=/var/lib/libvirt/qemu/domain-2-v-2473-VM/.local/share \ XDG_CACHE_HOME=/var/lib/libvirt/qemu/domain-2-v-2473-VM/.cache \ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain-2-v-2473-VM/.config \ QEMU_AUDIO_DRV=none \ /usr/bin/qemu-system-aarch64 \ -name guest=v-2473-VM,debug-threads=on \ -S \ -object secret,id=masterKey0,format=raw,file=/var/lib/libvirt/qemu/domain-2-v-2473-VM/master-key.aes \ -blockdev '{"driver":"file","filename":"/usr/share/AAVMF/AAVMF_CODE.fd","node-name":"libvirt-pflash0-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-pflash0-format","read-only":true,"driver":"raw","file":"libvirt-pflash0-storage"}' \ -blockdev '{"driver":"file","filename":"/var/lib/libvirt/qemu/nvram/v-2473-VM_VARS.fd","node-name":"libvirt-pflash1-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-pflash1-format","read-only":false,"driver":"raw","file":"libvirt-pflash1-storage"}' \ -machine virt-4.2,accel=kvm,usb=off,dump-guest-core=off,gic-version=2,pflash0=libvirt-pflash0-format,pflash1=libvirt-pflash1-format \ -cpu host \ -m 1024 \ -overcommit mem-lock=off \ -smp 1,sockets=1,cores=1,threads=1 \ -uuid 6a6f45fd-0f70-4467-bac9-43991d8c565f \ -no-user-config \ -nodefaults \ -chardev socket,id=charmonitor,fd=38,server,nowait \ -mon chardev=charmonitor,id=monitor,mode=control \ -rtc base=utc \ -no-shutdown \ -boot strict=on \ -device pcie-root-port,port=0x8,chassis=1,id=pci.1,bus=pcie.0,multifunction=on,addr=0x1 \ -device pcie-root-port,port=0x9,chassis=2,id=pci.2,bus=pcie.0,addr=0x1.0x1 \ -device pcie-root-port,port=0xa,chassis=3,id=pci.3,bus=pcie.0,addr=0x1.0x2 \ -device pcie-root-port,port=0xb,chassis=4,id=pci.4,bus=pcie.0,addr=0x1.0x3 \ -device pcie-root-port,port=0xc,chassis=5,id=pci.5,bus=pcie.0,addr=0x1.0x4 \ -device pcie-root-port,port=0xd,chassis=6,id=pci.6,bus=pcie.0,addr=0x1.0x5 \ -device pcie-pci-bridge,id=pci.7,bus=pci.1,addr=0x0 \ -device pcie-root-port,port=0xe,chassis=8,id=pci.8,bus=pcie.0,addr=0x1.0x6 \ -device pcie-root-port,port=0xf,chassis=9,id=pci.9,bus=pcie.0,addr=0x1.0x7 \ -device qemu-xhci,id=usb,bus=pci.5,addr=0x0 \ -device virtio-scsi-pci,id=scsi0,num_queues=1,bus=pcie.0,addr=0x9 \ -device virtio-serial-pci,id=virtio-serial0,bus=pci.6,addr=0x0 \ -blockdev '{"driver":"file","filename":"/mnt/554c497b-2903-3ea6-8f13-cfe258e24c24/658443d0-d38e-4925-94f5-28e6372fae2b","node-name":"libvirt-3-storage","cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-3-format","read-only":true,"discard":"unmap","cache":{"direct":true,"no-flush":false},"driver":"qcow2","file":"libvirt-3-storage","backing":null}' \ -blockdev '{"driver":"file","filename":"/mnt/554c497b-2903-3ea6-8f13-cfe258e24c24/fa3a600d-d7cf-4342-af92-b368a536c886","node-name":"libvirt-2-storage","cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-2-format","read-only":false,"discard":"unmap","cache":{"direct":true,"no-flush":false},"driver":"qcow2","file":"libvirt-2-storage","backing":"libvirt-3-format"}' \ -device scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,device_id=fa3a600dd7cf4342af92,drive=libvirt-2-format,id=scsi0-0-0-0,bootindex=2,write-cache=on,serial=fa3a600dd7cf4342af92 \ -blockdev '{"driver":"file","filename":"/usr/share/cloudstack-common/vms/systemvm.iso","node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-1-format","read-only":true,"driver":"raw","file":"libvirt-1-storage"}' \ -device scsi-cd,bus=scsi0.0,channel=0,scsi-id=0,lun=2,device_id=drive-scsi0-0-0-2,drive=libvirt-1-format,id=scsi0-0-0-2,bootindex=1 \ -netdev tap,fd=41,id=hostnet0,vhost=on,vhostfd=42 \ -device virtio-net-pci,netdev=hostnet0,id=net0,mac=0e:00:a9:fe:fc:97,bus=pci.2,addr=0x0,rombar=0,romfile= \ -netdev tap,fd=44,id=hostnet1,vhost=on,vhostfd=46 \ -device virtio-net-pci,netdev=hostnet1,id=net1,mac=1e:00:db:00:00:02,bus=pci.3,addr=0x0,rombar=0,romfile= \ -netdev tap,fd=50,id=hostnet2,vhost=on,vhostfd=52 \ -device virtio-net-pci,netdev=hostnet2,id=net2,mac=1e:00:c8:00:00:05,bus=pci.4,addr=0x0,rombar=0,romfile= \ -chardev pty,id=charserial0 \ -serial chardev:charserial0 \ -chardev socket,id=charchannel0,fd=53,server,nowait \ -device virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0,id=channel0,name=org.qemu.guest_agent.0 \ -device usb-tablet,id=input0,bus=usb.0,port=1 \ -device usb-kbd,id=input1,bus=usb.0,port=2 \ -device usb-mouse,id=input2,bus=usb.0,port=3 \ -vnc 172.31.36.107:1,password \ -device virtio-gpu-pci,id=video0,max_outputs=1,bus=pci.8,addr=0x0 \ -device i6300esb,id=watchdog0,bus=pci.7,addr=0x1 \ -watchdog-action none \ -sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny \ -msg timestamp=on 2020-09-17 23:10:03.041+0000: Domain id=2 is tainted: host-cpu char device redirected to /dev/pts/3 (label charserial0) 2020-09-18 00:04:36.128+0000: starting up libvirt version: 6.0.0, package: 0ubuntu8.3 (Marc Deslauriers <[email protected]> Thu, 30 Jul 2020 06:40:28 -0400), qemu version: 4.2.1Debian 1:4.2-3ubuntu6.5, kernel: 5.4.0-1018-raspi, hostname: cloudstack-mgmt LC_ALL=C \ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin \ HOME=/var/lib/libvirt/qemu/domain-2-v-2473-VM \ XDG_DATA_HOME=/var/lib/libvirt/qemu/domain-2-v-2473-VM/.local/share \ XDG_CACHE_HOME=/var/lib/libvirt/qemu/domain-2-v-2473-VM/.cache \ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain-2-v-2473-VM/.config \ QEMU_AUDIO_DRV=none \ /usr/bin/qemu-system-aarch64 \ -name guest=v-2473-VM,debug-threads=on \ -S \ -object secret,id=masterKey0,format=raw,file=/var/lib/libvirt/qemu/domain-2-v-2473-VM/master-key.aes \ -blockdev '{"driver":"file","filename":"/usr/share/AAVMF/AAVMF_CODE.fd","node-name":"libvirt-pflash0-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-pflash0-format","read-only":true,"driver":"raw","file":"libvirt-pflash0-storage"}' \ -blockdev '{"driver":"file","filename":"/var/lib/libvirt/qemu/nvram/v-2473-VM_VARS.fd","node-name":"libvirt-pflash1-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-pflash1-format","read-only":false,"driver":"raw","file":"libvirt-pflash1-storage"}' \ -machine virt-4.2,accel=kvm,usb=off,dump-guest-core=off,gic-version=2,pflash0=libvirt-pflash0-format,pflash1=libvirt-pflash1-format \ -cpu host \ -m 1024 \ -overcommit mem-lock=off \ -smp 1,sockets=1,cores=1,threads=1 \ -uuid 6a6f45fd-0f70-4467-bac9-43991d8c565f \ -no-user-config \ -nodefaults \ -chardev socket,id=charmonitor,fd=35,server,nowait \ -mon chardev=charmonitor,id=monitor,mode=control \ -rtc base=utc \ -no-shutdown \ -boot strict=on \ -device pcie-root-port,port=0x8,chassis=1,id=pci.1,bus=pcie.0,multifunction=on,addr=0x1 \ -device pcie-root-port,port=0x9,chassis=2,id=pci.2,bus=pcie.0,addr=0x1.0x1 \ -device pcie-root-port,port=0xa,chassis=3,id=pci.3,bus=pcie.0,addr=0x1.0x2 \ -device pcie-root-port,port=0xb,chassis=4,id=pci.4,bus=pcie.0,addr=0x1.0x3 \ -device pcie-root-port,port=0xc,chassis=5,id=pci.5,bus=pcie.0,addr=0x1.0x4 \ -device pcie-root-port,port=0xd,chassis=6,id=pci.6,bus=pcie.0,addr=0x1.0x5 \ -device pcie-pci-bridge,id=pci.7,bus=pci.1,addr=0x0 \ -device pcie-root-port,port=0xe,chassis=8,id=pci.8,bus=pcie.0,addr=0x1.0x6 \ -device pcie-root-port,port=0xf,chassis=9,id=pci.9,bus=pcie.0,addr=0x1.0x7 \ -device qemu-xhci,id=usb,bus=pci.5,addr=0x0 \ -device virtio-scsi-pci,id=scsi0,num_queues=1,bus=pcie.0,addr=0x9 \ -device virtio-serial-pci,id=virtio-serial0,bus=pci.6,addr=0x0 \ -blockdev '{"driver":"file","filename":"/mnt/554c497b-2903-3ea6-8f13-cfe258e24c24/658443d0-d38e-4925-94f5-28e6372fae2b","node-name":"libvirt-3-storage","cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-3-format","read-only":true,"discard":"unmap","cache":{"direct":true,"no-flush":false},"driver":"qcow2","file":"libvirt-3-storage","backing":null}' \ -blockdev '{"driver":"file","filename":"/mnt/554c497b-2903-3ea6-8f13-cfe258e24c24/fa3a600d-d7cf-4342-af92-b368a536c886","node-name":"libvirt-2-storage","cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-2-format","read-only":false,"discard":"unmap","cache":{"direct":true,"no-flush":false},"driver":"qcow2","file":"libvirt-2-storage","backing":"libvirt-3-format"}' \ -device scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,device_id=fa3a600dd7cf4342af92,drive=libvirt-2-format,id=scsi0-0-0-0,bootindex=2,write-cache=on,serial=fa3a600dd7cf4342af92 \ -blockdev '{"driver":"file","filename":"/usr/share/cloudstack-common/vms/systemvm.iso","node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-1-format","read-only":true,"driver":"raw","file":"libvirt-1-storage"}' \ -device scsi-cd,bus=scsi0.0,channel=0,scsi-id=0,lun=2,device_id=drive-scsi0-0-0-2,drive=libvirt-1-format,id=scsi0-0-0-2,bootindex=1 \ -netdev tap,fd=39,id=hostnet0,vhost=on,vhostfd=41 \ -device virtio-net-pci,netdev=hostnet0,id=net0,mac=0e:00:a9:fe:63:1c,bus=pci.2,addr=0x0,rombar=0,romfile= \ -netdev tap,fd=44,id=hostnet1,vhost=on,vhostfd=45 \ -device virtio-net-pci,netdev=hostnet1,id=net1,mac=1e:00:8f:00:00:03,bus=pci.3,addr=0x0,rombar=0,romfile= \ -netdev tap,fd=47,id=hostnet2,vhost=on,vhostfd=48 \ -device virtio-net-pci,netdev=hostnet2,id=net2,mac=1e:00:c8:00:00:05,bus=pci.4,addr=0x0,rombar=0,romfile= \ -chardev pty,id=charserial0 \ -serial chardev:charserial0 \ -chardev socket,id=charchannel0,fd=50,server,nowait \ -device virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0,id=channel0,name=org.qemu.guest_agent.0 \ -device usb-tablet,id=input0,bus=usb.0,port=1 \ -device usb-kbd,id=input1,bus=usb.0,port=2 \ -device usb-mouse,id=input2,bus=usb.0,port=3 \ -vnc 172.31.36.107:0,password \ -device virtio-gpu-pci,id=video0,max_outputs=1,bus=pci.8,addr=0x0 \ -device i6300esb,id=watchdog0,bus=pci.7,addr=0x1 \ -watchdog-action none \ -sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny \ -msg timestamp=on 2020-09-18 00:04:36.128+0000: Domain id=2 is tainted: high-privileges 2020-09-18 00:04:36.128+0000: Domain id=2 is tainted: host-cpu char device redirected to /dev/pts/1 (label charserial0) 2020-09-18 01:05:00.515+0000: starting up libvirt version: 6.0.0, package: 0ubuntu8.3 (Marc Deslauriers <[email protected]> Thu, 30 Jul 2020 06:40:28 -0400), qemu version: 4.2.1Debian 1:4.2-3ubuntu6.5, kernel: 5.4.0-1018-raspi, hostname: cloudstack-mgmt LC_ALL=C \ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin \ HOME=/var/lib/libvirt/qemu/domain-2-v-2473-VM \ XDG_DATA_HOME=/var/lib/libvirt/qemu/domain-2-v-2473-VM/.local/share \ XDG_CACHE_HOME=/var/lib/libvirt/qemu/domain-2-v-2473-VM/.cache \ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain-2-v-2473-VM/.config \ QEMU_AUDIO_DRV=none \ /usr/bin/qemu-system-aarch64 \ -name guest=v-2473-VM,debug-threads=on \ -S \ -object secret,id=masterKey0,format=raw,file=/var/lib/libvirt/qemu/domain-2-v-2473-VM/master-key.aes \ -blockdev '{"driver":"file","filename":"/usr/share/AAVMF/AAVMF_CODE.fd","node-name":"libvirt-pflash0-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-pflash0-format","read-only":true,"driver":"raw","file":"libvirt-pflash0-storage"}' \ -blockdev '{"driver":"file","filename":"/var/lib/libvirt/qemu/nvram/v-2473-VM_VARS.fd","node-name":"libvirt-pflash1-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-pflash1-format","read-only":false,"driver":"raw","file":"libvirt-pflash1-storage"}' \ -machine virt-4.2,accel=kvm,usb=off,dump-guest-core=off,gic-version=2,pflash0=libvirt-pflash0-format,pflash1=libvirt-pflash1-format \ -cpu host \ -m 1024 \ -overcommit mem-lock=off \ -smp 1,sockets=1,cores=1,threads=1 \ -uuid 6a6f45fd-0f70-4467-bac9-43991d8c565f \ -no-user-config \ -nodefaults \ -chardev socket,id=charmonitor,fd=36,server,nowait \ -mon chardev=charmonitor,id=monitor,mode=control \ -rtc base=utc \ -no-shutdown \ -boot strict=on \ -device pcie-root-port,port=0x8,chassis=1,id=pci.1,bus=pcie.0,multifunction=on,addr=0x1 \ -device pcie-root-port,port=0x9,chassis=2,id=pci.2,bus=pcie.0,addr=0x1.0x1 \ -device pcie-root-port,port=0xa,chassis=3,id=pci.3,bus=pcie.0,addr=0x1.0x2 \ -device pcie-root-port,port=0xb,chassis=4,id=pci.4,bus=pcie.0,addr=0x1.0x3 \ -device pcie-root-port,port=0xc,chassis=5,id=pci.5,bus=pcie.0,addr=0x1.0x4 \ -device pcie-root-port,port=0xd,chassis=6,id=pci.6,bus=pcie.0,addr=0x1.0x5 \ -device pcie-pci-bridge,id=pci.7,bus=pci.1,addr=0x0 \ -device pcie-root-port,port=0xe,chassis=8,id=pci.8,bus=pcie.0,addr=0x1.0x6 \ -device pcie-root-port,port=0xf,chassis=9,id=pci.9,bus=pcie.0,addr=0x1.0x7 \ -device qemu-xhci,id=usb,bus=pci.5,addr=0x0 \ -device virtio-scsi-pci,id=scsi0,num_queues=1,bus=pcie.0,addr=0x9 \ -device virtio-serial-pci,id=virtio-serial0,bus=pci.6,addr=0x0 \ -blockdev '{"driver":"file","filename":"/mnt/554c497b-2903-3ea6-8f13-cfe258e24c24/658443d0-d38e-4925-94f5-28e6372fae2b","node-name":"libvirt-3-storage","cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-3-format","read-only":true,"discard":"unmap","cache":{"direct":true,"no-flush":false},"driver":"qcow2","file":"libvirt-3-storage","backing":null}' \ -blockdev '{"driver":"file","filename":"/mnt/554c497b-2903-3ea6-8f13-cfe258e24c24/fa3a600d-d7cf-4342-af92-b368a536c886","node-name":"libvirt-2-storage","cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-2-format","read-only":false,"discard":"unmap","cache":{"direct":true,"no-flush":false},"driver":"qcow2","file":"libvirt-2-storage","backing":"libvirt-3-format"}' \ -device scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,device_id=fa3a600dd7cf4342af92,drive=libvirt-2-format,id=scsi0-0-0-0,bootindex=2,write-cache=on,serial=fa3a600dd7cf4342af92 \ -blockdev '{"driver":"file","filename":"/usr/share/cloudstack-common/vms/systemvm.iso","node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-1-format","read-only":true,"driver":"raw","file":"libvirt-1-storage"}' \ -device scsi-cd,bus=scsi0.0,channel=0,scsi-id=0,lun=2,device_id=drive-scsi0-0-0-2,drive=libvirt-1-format,id=scsi0-0-0-2,bootindex=1 \ -netdev tap,fd=40,id=hostnet0,vhost=on,vhostfd=42 \ -device virtio-net-pci,netdev=hostnet0,id=net0,mac=0e:00:a9:fe:6b:9f,bus=pci.2,addr=0x0,rombar=0,romfile= \ -netdev tap,fd=44,id=hostnet1,vhost=on,vhostfd=46 \ -device virtio-net-pci,netdev=hostnet1,id=net1,mac=1e:00:60:00:00:02,bus=pci.3,addr=0x0,rombar=0,romfile= \ -netdev tap,fd=49,id=hostnet2,vhost=on,vhostfd=51 \ -device virtio-net-pci,netdev=hostnet2,id=net2,mac=1e:00:c8:00:00:05,bus=pci.4,addr=0x0,rombar=0,romfile= \ -chardev pty,id=charserial0 \ -serial chardev:charserial0 \ -chardev socket,id=charchannel0,fd=53,server,nowait \ -device virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0,id=channel0,name=org.qemu.guest_agent.0 \ -device usb-tablet,id=input0,bus=usb.0,port=1 \ -device usb-kbd,id=input1,bus=usb.0,port=2 \ -device usb-mouse,id=input2,bus=usb.0,port=3 \ -vnc 172.31.36.107:0,password \ -device virtio-gpu-pci,id=video0,max_outputs=1,bus=pci.8,addr=0x0 \ -device i6300esb,id=watchdog0,bus=pci.7,addr=0x1 \ -watchdog-action none \ -sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny \ -msg timestamp=on 2020-09-18 01:05:00.516+0000: Domain id=2 is tainted: high-privileges 2020-09-18 01:05:00.516+0000: Domain id=2 is tainted: host-cpu char device redirected to /dev/pts/2 (label charserial0) 2020-09-18 02:06:22.957+0000: starting up libvirt version: 6.0.0, package: 0ubuntu8.3 (Marc Deslauriers <[email protected]> Thu, 30 Jul 2020 06:40:28 -0400), qemu version: 4.2.1Debian 1:4.2-3ubuntu6.5, kernel: 5.4.0-1018-raspi, hostname: cloudstack-mgmt LC_ALL=C \ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin \ HOME=/var/lib/libvirt/qemu/domain-1-v-2473-VM \ XDG_DATA_HOME=/var/lib/libvirt/qemu/domain-1-v-2473-VM/.local/share \ XDG_CACHE_HOME=/var/lib/libvirt/qemu/domain-1-v-2473-VM/.cache \ XDG_CONFIG_HOME=/var/lib/libvirt/qemu/domain-1-v-2473-VM/.config \ QEMU_AUDIO_DRV=none \ /usr/bin/qemu-system-aarch64 \ -name guest=v-2473-VM,debug-threads=on \ -S \ -object secret,id=masterKey0,format=raw,file=/var/lib/libvirt/qemu/domain-1-v-2473-VM/master-key.aes \ -blockdev '{"driver":"file","filename":"/usr/share/AAVMF/AAVMF_CODE.fd","node-name":"libvirt-pflash0-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-pflash0-format","read-only":true,"driver":"raw","file":"libvirt-pflash0-storage"}' \ -blockdev '{"driver":"file","filename":"/var/lib/libvirt/qemu/nvram/v-2473-VM_VARS.fd","node-name":"libvirt-pflash1-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-pflash1-format","read-only":false,"driver":"raw","file":"libvirt-pflash1-storage"}' \ -machine virt-4.2,accel=kvm,usb=off,dump-guest-core=off,gic-version=2,pflash0=libvirt-pflash0-format,pflash1=libvirt-pflash1-format \ -cpu host \ -m 1024 \ -overcommit mem-lock=off \ -smp 1,sockets=1,cores=1,threads=1 \ -uuid 6a6f45fd-0f70-4467-bac9-43991d8c565f \ -no-user-config \ -nodefaults \ -chardev socket,id=charmonitor,fd=35,server,nowait \ -mon chardev=charmonitor,id=monitor,mode=control \ -rtc base=utc \ -no-shutdown \ -boot strict=on \ -device pcie-root-port,port=0x8,chassis=1,id=pci.1,bus=pcie.0,multifunction=on,addr=0x1 \ -device pcie-root-port,port=0x9,chassis=2,id=pci.2,bus=pcie.0,addr=0x1.0x1 \ -device pcie-root-port,port=0xa,chassis=3,id=pci.3,bus=pcie.0,addr=0x1.0x2 \ -device pcie-root-port,port=0xb,chassis=4,id=pci.4,bus=pcie.0,addr=0x1.0x3 \ -device pcie-root-port,port=0xc,chassis=5,id=pci.5,bus=pcie.0,addr=0x1.0x4 \ -device pcie-root-port,port=0xd,chassis=6,id=pci.6,bus=pcie.0,addr=0x1.0x5 \ -device pcie-pci-bridge,id=pci.7,bus=pci.1,addr=0x0 \ -device pcie-root-port,port=0xe,chassis=8,id=pci.8,bus=pcie.0,addr=0x1.0x6 \ -device pcie-root-port,port=0xf,chassis=9,id=pci.9,bus=pcie.0,addr=0x1.0x7 \ -device qemu-xhci,id=usb,bus=pci.5,addr=0x0 \ -device virtio-scsi-pci,id=scsi0,num_queues=1,bus=pcie.0,addr=0x9 \ -device virtio-serial-pci,id=virtio-serial0,bus=pci.6,addr=0x0 \ -blockdev '{"driver":"file","filename":"/mnt/554c497b-2903-3ea6-8f13-cfe258e24c24/658443d0-d38e-4925-94f5-28e6372fae2b","node-name":"libvirt-3-storage","cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-3-format","read-only":true,"discard":"unmap","cache":{"direct":true,"no-flush":false},"driver":"qcow2","file":"libvirt-3-storage","backing":null}' \ -blockdev '{"driver":"file","filename":"/mnt/554c497b-2903-3ea6-8f13-cfe258e24c24/fa3a600d-d7cf-4342-af92-b368a536c886","node-name":"libvirt-2-storage","cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-2-format","read-only":false,"discard":"unmap","cache":{"direct":true,"no-flush":false},"driver":"qcow2","file":"libvirt-2-storage","backing":"libvirt-3-format"}' \ -device scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,device_id=fa3a600dd7cf4342af92,drive=libvirt-2-format,id=scsi0-0-0-0,bootindex=2,write-cache=on,serial=fa3a600dd7cf4342af92 \ -blockdev '{"driver":"file","filename":"/usr/share/cloudstack-common/vms/systemvm.iso","node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap"}' \ -blockdev '{"node-name":"libvirt-1-format","read-only":true,"driver":"raw","file":"libvirt-1-storage"}' \ -device scsi-cd,bus=scsi0.0,channel=0,scsi-id=0,lun=2,device_id=drive-scsi0-0-0-2,drive=libvirt-1-format,id=scsi0-0-0-2,bootindex=1 \ -netdev tap,fd=37,id=hostnet0,vhost=on,vhostfd=41 \ -device virtio-net-pci,netdev=hostnet0,id=net0,mac=0e:00:a9:fe:ba:fe,bus=pci.2,addr=0x0,rombar=0,romfile= \ -netdev tap,fd=44,id=hostnet1,vhost=on,vhostfd=46 \ -device virtio-net-pci,netdev=hostnet1,id=net1,mac=1e:00:33:00:00:01,bus=pci.3,addr=0x0,rombar=0,romfile= \ -netdev tap,fd=47,id=hostnet2,vhost=on,vhostfd=50 \ -device virtio-net-pci,netdev=hostnet2,id=net2,mac=1e:00:c8:00:00:05,bus=pci.4,addr=0x0,rombar=0,romfile= \ -chardev pty,id=charserial0 \ -serial chardev:charserial0 \ -chardev socket,id=charchannel0,fd=51,server,nowait \ -device virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0,id=channel0,name=org.qemu.guest_agent.0 \ -device usb-tablet,id=input0,bus=usb.0,port=1 \ -device usb-kbd,id=input1,bus=usb.0,port=2 \ -device usb-mouse,id=input2,bus=usb.0,port=3 \ -vnc 172.31.36.107:0,password \ -device virtio-gpu-pci,id=video0,max_outputs=1,bus=pci.8,addr=0x0 \ -device i6300esb,id=watchdog0,bus=pci.7,addr=0x1 \ -watchdog-action none \ -sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny \ -msg timestamp=on 2020-09-18 02:06:22.958+0000: Domain id=1 is tainted: high-privileges 2020-09-18 02:06:22.958+0000: Domain id=1 is tainted: host-cpu char device redirected to /dev/pts/1 (label charserial0) 2020-09-18 02:07:04.974+0000: Domain id=1 is tainted: custom-ga-command ``` ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected]
