On Fri, Jan 15, 2021, 22:04 penguin pages <jeremey.w...@gmail.com> wrote:

>
> Thanks for replies.
>
> Here is where it is at:
>
> # Two nodes think no VMs exist
> [root@odin ~]# vdsm-client Host getVMList
> []
>
> #One showing one VM but down
> [root@medusa ~]# vdsm-client Host getVMList
> [
>     {
>         "status": "Down",
>         "statusTime": "2153886148",
>         "vmId": "69ab4f82-1a53-42c8-afca-210a3a2715f1"
>     }
> ]
> [root@medusa ~]# vdsm-client Host getAllVmStats
> [
>     {
>         "exitCode": 1,
>         "exitMessage": "VM terminated with error",
>         "exitReason": 1,
>         "status": "Down",
>         "statusTime": "2153916276",
>         "vmId": "69ab4f82-1a53-42c8-afca-210a3a2715f1"
>     }
> ]
> [root@medusa ~]# vdsm-client VM cont
> vmID="69ab4f82-1a53-42c8-afca-210a3a2715f1"
> vdsm-client: Command VM.cont with args {'vmID':
> '69ab4f82-1a53-42c8-afca-210a3a2715f1'} failed:
> (code=16, message=Unexpected exception)
>
>
> # Assuming that ID represents the hosted-engine I tried to start it
> [root@medusa ~]# hosted-engine --vm-start
> The hosted engine configuration has not been retrieved from shared
> storage. Please ensure that ovirt-ha-agent is running and the storage
> server is reachable.
>
> # Back to ovirt-ha-agent being fubar and stoping things.
>
> I have about 8 or so VMs on the cluster. Two are my IDM nodes which has
> DNS and other core services.. which is what I am really trying to get up ..
> even if manual until I figure out oVirt issue.  I think you are correct.
> "engine" volume is for just the engine.  Data is where the other VMs are
>
> [root@medusa images]# tree
> .
> ├── 335c6b1a-d8a5-4664-9a9c-39744d511af8
> │   ├── 579323ad-bf7b-479b-b682-6e1e234a7908
> │   ├── 579323ad-bf7b-479b-b682-6e1e234a7908.lease
> │   └── 579323ad-bf7b-479b-b682-6e1e234a7908.meta
> ├── d318cb8f-743a-461b-b246-75ffcde6bc5a
> │   ├── c16877d0-eb23-42ef-a06e-a3221ea915fc
> │   ├── c16877d0-eb23-42ef-a06e-a3221ea915fc.lease
> │   └── c16877d0-eb23-42ef-a06e-a3221ea915fc.meta
> └── junk
>     ├── 296163f2-846d-4a2c-9a4e-83a58640b907
>     │   ├── 376b895f-e0f2-4387-b038-fbef4705fbcc
>     │   ├── 376b895f-e0f2-4387-b038-fbef4705fbcc.lease
>     │   └── 376b895f-e0f2-4387-b038-fbef4705fbcc.meta
>     ├── 45a478d7-4c1b-43e8-b106-7acc75f066fa
>     │   ├── b5249e6c-0ba6-4302-8e53-b74d2b919d20
>     │   ├── b5249e6c-0ba6-4302-8e53-b74d2b919d20.lease
>     │   └── b5249e6c-0ba6-4302-8e53-b74d2b919d20.meta
>     ├── d8b708c1-5762-4215-ae1f-0e57444c99ad
>     │   ├── 2536ca6d-3254-4cdc-bbd8-349ec1b8a0e9
>     │   ├── 2536ca6d-3254-4cdc-bbd8-349ec1b8a0e9.lease
>     │   └── 2536ca6d-3254-4cdc-bbd8-349ec1b8a0e9.meta
>     └── eaf12f3c-301f-4b61-b5a1-0c6d0b0a7f7b
>         ├── fbf3bf59-a23a-4c6f-b66e-71369053b406
>         ├── fbf3bf59-a23a-4c6f-b66e-71369053b406.lease
>         └── fbf3bf59-a23a-4c6f-b66e-71369053b406.meta
>
> 7 directories, 18 files
> [root@medusa images]# cd /media/engine/
> [root@medusa engine]# ls
> 3afc47ba-afb9-413f-8de5-8d9a2f45ecde
> [root@medusa engine]# tree
> .
> └── 3afc47ba-afb9-413f-8de5-8d9a2f45ecde
>     ├── dom_md
>     │   ├── ids
>     │   ├── inbox
>     │   ├── leases
>     │   ├── metadata
>     │   ├── outbox
>     │   └── xleases
>     ├── ha_agent
>     ├── images
>     │   ├── 1dc69552-dcc6-484d-8149-86c93ff4b8cc
>     │   │   ├── e4e26573-09a5-43fa-91ec-37d12de46480
>     │   │   ├── e4e26573-09a5-43fa-91ec-37d12de46480.lease
>     │   │   └── e4e26573-09a5-43fa-91ec-37d12de46480.meta
>     │   ├── 375d2483-ee83-4cad-b421-a5a70ec06ba6
>     │   │   ├── f936d4be-15e3-4983-8bf0-9ba5b97e638a
>     │   │   ├── f936d4be-15e3-4983-8bf0-9ba5b97e638a.lease
>     │   │   └── f936d4be-15e3-4983-8bf0-9ba5b97e638a.meta
>     │   ├── 6023f2b1-ea6e-485b-9ac2-8decd5f7820d
>     │   │   ├── b38a5e37-fac4-4c23-a0c4-7359adff619c
>     │   │   ├── b38a5e37-fac4-4c23-a0c4-7359adff619c.lease
>     │   │   └── b38a5e37-fac4-4c23-a0c4-7359adff619c.meta
>     │   ├── 685309b1-1ae9-45f3-90c3-d719a594482d
>     │   │   ├── 9eddcf51-fd15-4de5-a4b6-a83a9082dee0
>     │   │   ├── 9eddcf51-fd15-4de5-a4b6-a83a9082dee0.lease
>     │   │   └── 9eddcf51-fd15-4de5-a4b6-a83a9082dee0.meta
>     │   ├── 74f1b2e7-2483-4e4d-8301-819bcd99129e
>     │   │   ├── c1888b6a-c48e-46ce-9677-02e172ef07af
>     │   │   ├── c1888b6a-c48e-46ce-9677-02e172ef07af.lease
>     │   │   └── c1888b6a-c48e-46ce-9677-02e172ef07af.meta
>     │   └── 77082dd8-7cb5-41cc-a69f-0f4c0380db23
>     │       ├── 38d552c5-689d-47b7-9eea-adb308da8027
>     │       ├── 38d552c5-689d-47b7-9eea-adb308da8027.lease
>     │       └── 38d552c5-689d-47b7-9eea-adb308da8027.meta
>     └── master
>         ├── tasks
>         │   ├── 150927c5-bae6-45e4-842c-a7ba229fc3ba
>         │   │   └── 150927c5-bae6-45e4-842c-a7ba229fc3ba.job.0
>         │   ├── 21bba697-26e6-4fd8-ac7c-76f86b458368.temp
>         │   ├── 26c580b8-cdb2-4d21-9bea-96e0788025e6.temp
>         │   ├── 2e0e347c-fd01-404f-9459-ef175c82c354.backup
>         │   │   └── 2e0e347c-fd01-404f-9459-ef175c82c354.task
>         │   ├── 43f17022-e003-4e9f-81ec-4a01582223bd.backup
>         │   │   └── 43f17022-e003-4e9f-81ec-4a01582223bd.task
>         │   ├── 5055f61a-4cc8-459f-8fe5-19427b74a4f2.temp
>         │   ├── 6826c8f5-b9df-498e-a576-af0c4e7fe69c
>         │   │   └── 6826c8f5-b9df-498e-a576-af0c4e7fe69c.task
>         │   ├── 78ed90b0-2a87-4c48-8204-03d4b0bd7694
>         │   │   └── 78ed90b0-2a87-4c48-8204-03d4b0bd7694.job.0
>         │   ├── 7c7799a5-d28e-4b42-86ee-84bb8822e82f.temp
>         │   ├── 95d29b8c-23d9-4d1a-b995-2ba364970893
>         │   ├── 95d29b8c-23d9-4d1a-b995-2ba364970893.temp
>         │   ├── a1fa934a-5ea7-4160-ab8c-7e3476dc2676.backup
>         │   │   └── a1fa934a-5ea7-4160-ab8c-7e3476dc2676.task
>         │   ├── bcee8725-efde-4848-a108-01c262625aaa
>         │   │   └── bcee8725-efde-4848-a108-01c262625aaa.job.0
>         │   ├── c0b5a032-c4a9-4648-b348-c2a5cf4d6cad.temp
>         │   ├── ce7e2ebf-2c28-435d-b359-14d0da2e9011
>         │   └── ce7e2ebf-2c28-435d-b359-14d0da2e9011.temp
>         └── vms
>
> 29 directories, 31 files
>
>
> # Finding the XML file for hosted-engine VM
> root@medusa /]# cd /gluster_bricks/vmstore/vmstore/qemu/
> [root@medusa qemu]# ls
> ns01.xml  ns02.xml
> [root@medusa qemu]# ls -alh
> total 36K
> drwxr-xr-x. 2 root root   38 Sep 17 10:19 .
> drwxr-xr-x. 8 vdsm kvm  8.0K Jan 15 11:26 ..
> -rw-------. 2 qemu qemu 4.7K Sep 17 07:19 ns01.xml
> -rw-------. 2 root root 4.7K Sep 17 10:19 ns02.xml
> [root@medusa qemu]# cat ns01.xml
> <!--
> WARNING: THIS IS AN AUTO-GENERATED FILE. CHANGES TO IT ARE LIKELY TO BE
> OVERWRITTEN AND LOST. Changes to this xml configuration should be made
> using:
>   virsh edit ns01
> or other application using the libvirt API.
> -->
>
> <domain type='kvm'>
>   <name>ns01</name>
>   <uuid>0bfd4ad4-b405-4154-94da-ac3261ebc17e</uuid>
>   <title>ns01</title>
>   <memory unit='KiB'>16777216</memory>
>   <currentMemory unit='KiB'>4184064</currentMemory>
>   <vcpu placement='static'>4</vcpu>
>   <os>
>     <type arch='x86_64' machine='pc-i440fx-rhel7.6.0'>hvm</type>
>     <boot dev='hd'/>
>   </os>
>   <features>
>     <acpi/>
>     <apic/>
>     <vmport state='off'/>
>   </features>
>   <cpu mode='host-model' check='partial'>
>     <model fallback='allow'/>
>   </cpu>
>   <clock offset='utc'>
>     <timer name='rtc' tickpolicy='catchup'/>
>     <timer name='pit' tickpolicy='delay'/>
>     <timer name='hpet' present='no'/>
>   </clock>
>   <on_poweroff>destroy</on_poweroff>
>   <on_reboot>restart</on_reboot>
>   <on_crash>destroy</on_crash>
>   <pm>
>     <suspend-to-mem enabled='no'/>
>     <suspend-to-disk enabled='no'/>
>   </pm>
>   <devices>
>     <emulator>/usr/libexec/qemu-kvm</emulator>
>     <disk type='file' device='disk'>
>       <driver name='qemu' type='qcow2'/>
>       <source file='/media/vmstore/ns01.qcow2'/>
>       <target dev='vda' bus='virtio'/>
>       <address type='pci' domain='0x0000' bus='0x00' slot='0x07'
> function='0x0'/>
>     </disk>
>     <disk type='file' device='disk'>
>       <driver name='qemu' type='qcow2'/>
>       <source file='/media/vmstore/ns01_var.qcow2'/>
>       <target dev='vdb' bus='virtio'/>
> <snip>


> # Try to start VM via use of this XML file
> [root@medusa qemu]# cp /gluster_bricks/vmstore/vmstore/qemu/ns01.xml /tmp
> [root@medusa qemu]# virsh define /tmp/ns01.xml
> Please enter your authentication name: admin
> Please enter your password:
> Domain ns01 defined from /tmp/ns01.xml
>
Seems that it was defined

>
> [root@medusa qemu]# virsh start /tmp/ns01.xml
> Please enter your authentication name: admin
> Please enter your password:
> error: failed to get domain '/tmp/ns01.xml'
>
You need to start the defined vm not the xml. Did you try to connect with:

virsh -c qemu:///system?authfile=/etc/ovirt-hosted-
engine/virsh_auth.conf

Otherwise you can configure also libvirt to authenticate with a
sasl account. Example:

saslpasswd2 -a libvirt fred

At /etc/libvirt/auth.conf:

[credentials-test]
authname=fred
password=123456

[auth-libvirt-medusa]
credentials=test

Check https://libvirt.org/auth.html#ACL_server_username for details.

[root@medusa qemu]#
>
>
> Hmm... working on it.... but if if you have suggestion / example .. that
> would help
> _______________________________________________
> Users mailing list -- users@ovirt.org
> To unsubscribe send an email to users-le...@ovirt.org
> Privacy Statement: https://www.ovirt.org/privacy-policy.html
> oVirt Code of Conduct:
> https://www.ovirt.org/community/about/community-guidelines/
> List Archives:
> https://lists.ovirt.org/archives/list/users@ovirt.org/message/XM2RKVE2OYNX7HCM5HK6GSTFNNAWFWXR/
>
_______________________________________________
Users mailing list -- users@ovirt.org
To unsubscribe send an email to users-le...@ovirt.org
Privacy Statement: https://www.ovirt.org/privacy-policy.html
oVirt Code of Conduct: 
https://www.ovirt.org/community/about/community-guidelines/
List Archives: 
https://lists.ovirt.org/archives/list/users@ovirt.org/message/YAAUY7BDD6APGYQWHNRH5KSXRFG4UEQE/

Reply via email to