Can someone help me troubleshoot what my issue is? I have tried to setup hosted engine using static and DHCP addresses and both times it fails. Many thanks in advance! [ ERROR ] fatal: [localhost]: FAILED! => {"changed": false, "msg": "The system may not be provisioned according to the playbook results: please check the logs for the issue, fix accordingly or re-deploy from scratch.\n"} [ ERROR ] Failed to execute stage 'Closing up': Failed executing ansible-playbook
[root@vmh /]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: enp96s0f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ovirtmgmt state UP group default qlen 1000 link/ether 0c:c4:7a:f9:b9:88 brd ff:ff:ff:ff:ff:ff 3: enp96s0f1: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc mq state DOWN group default qlen 1000 link/ether 0c:c4:7a:f9:b9:89 brd ff:ff:ff:ff:ff:ff 20: ovirtmgmt: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000 link/ether 0c:c4:7a:f9:b9:88 brd ff:ff:ff:ff:ff:ff inet 172.30.50.2/24 brd 172.30.50.255 scope global dynamic ovirtmgmt valid_lft 84266sec preferred_lft 84266sec inet6 fe80::ec4:7aff:fef9:b988/64 scope link valid_lft forever preferred_lft forever 21: virbr0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000 link/ether 52:54:00:e2:1c:89 brd ff:ff:ff:ff:ff:ff inet 192.168.124.1/24 brd 192.168.124.255 scope global virbr0 valid_lft forever preferred_lft forever 22: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000 link/ether 52:54:00:e2:1c:89 brd ff:ff:ff:ff:ff:ff 23: vnet0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master virbr0 state UNKNOWN group default qlen 1000 link/ether fe:16:3e:52:19:f4 brd ff:ff:ff:ff:ff:ff inet6 fe80::fc16:3eff:fe52:19f4/64 scope link valid_lft forever preferred_lft forever 24: ;vdsmdummy;: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000 link/ether 82:43:87:7f:eb:62 brd ff:ff:ff:ff:ff:ff [root@vmh /]# df -h Filesystem Size Used Avail Use% Mounted on /dev/mapper/onn_vmh-ovirt--node--ng--4.2.5.1--0.20180731.0+1 145G 3.1G 134G 3% / devtmpfs 63G 0 63G 0% /dev tmpfs 63G 4.0K 63G 1% /dev/shm tmpfs 63G 19M 63G 1% /run tmpfs 63G 0 63G 0% /sys/fs/cgroup /dev/mapper/onn_vmh-var 15G 4.5G 9.4G 33% /var /dev/mapper/onn_vmh-tmp 976M 4.1M 905M 1% /tmp /dev/mapper/onn_vmh-home 976M 2.6M 907M 1% /home /dev/mapper/PNY_CS900_240GB_SSD_PNY14182241350103ED2p1 976M 365M 545M 41% /boot /dev/mapper/onn_vmh-var_log 7.8G 70M 7.3G 1% /var/log /dev/mapper/onn_vmh-var_log_audit 2.0G 9.2M 1.8G 1% /var/log/audit /dev/mapper/3600605b00a2faca222fb4da81ac9bdb1p1 7.4T 93M 7.1T 1% /srv tmpfs 13G 0 13G 0% /run/user/0 I have pasted my log file- https://pastebin.com/dyYksxaC _______________________________________________ Users mailing list -- users@ovirt.org To unsubscribe send an email to users-le...@ovirt.org Privacy Statement: https://www.ovirt.org/site/privacy-policy/ oVirt Code of Conduct: https://www.ovirt.org/community/about/community-guidelines/ List Archives: https://lists.ovirt.org/archives/list/users@ovirt.org/message/LPK7OGFALSQFAN4UMEIHOION4BS2HJLN/