I got a little further but just wanted to confirm a scenario. I realized part
of my issue was due to having the volume mounted. I unmounted it and the
message I get changed.
cd /
umount /gluster_bricks/vmstore
now comment out /etc/fstab, systemctl daemon-reload and mount -a to confirm it
is no longer mounted.
[root@vmh /]# gluster volume create vmstore
vmh.tourville.org:/gluster_bricks/vmstore
volume create: vmstore: failed: The brick
vmh.tourville.org:/gluster_bricks/vmstore is being created in the root
partition. It is recommended that you don't use the system's root partition for
storage backend. Or use 'force' at the end of the command if you want to
override this behavior.
## DISK LAYOUT
[root@vmh /]# vgs
VG #PV #LV #SN Attr VSize VFree
gluster_vg_sda 1 2 0 wz--n- <5.46t 0
rl_vmh 1 10 0 wz--n- <219.18g <43.84g
[root@vmh /]# lvs
LV VG Attr LSize Pool
Origin Data% Meta% Move Log Cpy%Sync Convert
gluster_lv_vmstore gluster_vg_sda Vwi-a-t--- 4.39t
gluster_thinpool_gluster_vg_sda 48.48
gluster_thinpool_gluster_vg_sda gluster_vg_sda twi-aot--- <5.43t
39.26 0.51
home rl_vmh Vwi-aotz-- 70.00g pool00
0.09
pool00 rl_vmh twi-aotz-- <171.01g
4.36 11.17
root rl_vmh Vwi-aotz-- 20.00g pool00
22.29
swap rl_vmh -wi-ao---- 4.00g
tmp rl_vmh Vwi-aotz-- 6.00g pool00
1.20
var rl_vmh Vwi-aotz-- 30.00g pool00
7.56
var_crash rl_vmh Vwi-aotz-- 15.00g pool00
0.42
var_log rl_vmh Vwi-aotz-- 16.00g pool00
0.84
var_log_audit rl_vmh Vwi-aotz-- 4.00g pool00
1.70
var_tmp rl_vmh Vwi-aotz-- 10.00g pool00
3.35
[root@vmh /]# lsblk
NAME MAJ:MIN RM SIZE RO
TYPE MOUNTPOINTS
sda 8:0 0 5.5T 0
disk
└─3600605b00a2faca2233aa64a0966bb09 253:14 0 5.5T 0
mpath
├─gluster_vg_sda-gluster_thinpool_gluster_vg_sda_tmeta 253:16 0 15.8G 0
lvm
│ └─gluster_vg_sda-gluster_thinpool_gluster_vg_sda-tpool 253:18 0 5.4T 0
lvm
│ ├─gluster_vg_sda-gluster_thinpool_gluster_vg_sda 253:19 0 5.4T 1
lvm
│ └─gluster_vg_sda-gluster_lv_vmstore 253:20 0 4.4T 0
lvm
└─gluster_vg_sda-gluster_thinpool_gluster_vg_sda_tdata 253:17 0 5.4T 0
lvm
└─gluster_vg_sda-gluster_thinpool_gluster_vg_sda-tpool 253:18 0 5.4T 0
lvm
├─gluster_vg_sda-gluster_thinpool_gluster_vg_sda 253:19 0 5.4T 1
lvm
└─gluster_vg_sda-gluster_lv_vmstore 253:20 0 4.4T 0
lvm
sdb 8:16 0 223.6G 0
disk
└─sdb1 8:17 0 219.3G 0
part
└─md127 9:127 0 219.2G 0
raid1
├─rl_vmh-pool00_tmeta 253:0 0 172M 0
lvm
│ └─rl_vmh-pool00-tpool 253:2 0 171G 0
lvm
│ ├─rl_vmh-root 253:3 0 20G 0
lvm /
│ ├─rl_vmh-pool00 253:5 0 171G 1
lvm
│ ├─rl_vmh-home 253:6 0 70G 0
lvm /home
│ ├─rl_vmh-var_crash 253:7 0 15G 0
lvm /var/crash
│ ├─rl_vmh-tmp 253:8 0 6G 0
lvm /tmp
│ ├─rl_vmh-var_tmp 253:9 0 10G 0
lvm /var/tmp
│ ├─rl_vmh-var_log_audit 253:10 0 4G 0
lvm /var/log/audit
│ ├─rl_vmh-var_log 253:11 0 16G 0
lvm /var/log
│ └─rl_vmh-var 253:12 0 30G 0
lvm /var
├─rl_vmh-pool00_tdata 253:1 0 171G 0
lvm
│ └─rl_vmh-pool00-tpool 253:2 0 171G 0
lvm
│ ├─rl_vmh-root 253:3 0 20G 0
lvm /
│ ├─rl_vmh-pool00 253:5 0 171G 1
lvm
│ ├─rl_vmh-home 253:6 0 70G 0
lvm /home
│ ├─rl_vmh-var_crash 253:7 0 15G 0
lvm /var/crash
│ ├─rl_vmh-tmp 253:8 0 6G 0
lvm /tmp
│ ├─rl_vmh-var_tmp 253:9 0 10G 0
lvm /var/tmp
│ ├─rl_vmh-var_log_audit 253:10 0 4G 0
lvm /var/log/audit
│ ├─rl_vmh-var_log 253:11 0 16G 0
lvm /var/log
│ └─rl_vmh-var 253:12 0 30G 0
lvm /var
└─rl_vmh-swap 253:4 0 4G 0
lvm [SWAP]
sdc 8:32 0 223.6G 0
disk
├─sdc1 8:33 0 600M 0
part /boot/efi
├─sdc2 8:34 0 1M 0
part
├─sdc3 8:35 0 1G 0
part /boot
└─sdc4 8:36 0 219.3G 0
part
└─md127 9:127 0 219.2G 0
raid1
├─rl_vmh-pool00_tmeta 253:0 0 172M 0
lvm
│ └─rl_vmh-pool00-tpool 253:2 0 171G 0
lvm
│ ├─rl_vmh-root 253:3 0 20G 0
lvm /
│ ├─rl_vmh-pool00 253:5 0 171G 1
lvm
│ ├─rl_vmh-home 253:6 0 70G 0
lvm /home
│ ├─rl_vmh-var_crash 253:7 0 15G 0
lvm /var/crash
│ ├─rl_vmh-tmp 253:8 0 6G 0
lvm /tmp
│ ├─rl_vmh-var_tmp 253:9 0 10G 0
lvm /var/tmp
│ ├─rl_vmh-var_log_audit 253:10 0 4G 0
lvm /var/log/audit
│ ├─rl_vmh-var_log 253:11 0 16G 0
lvm /var/log
│ └─rl_vmh-var 253:12 0 30G 0
lvm /var
├─rl_vmh-pool00_tdata 253:1 0 171G 0
lvm
│ └─rl_vmh-pool00-tpool 253:2 0 171G 0
lvm
│ ├─rl_vmh-root 253:3 0 20G 0
lvm /
│ ├─rl_vmh-pool00 253:5 0 171G 1
lvm
│ ├─rl_vmh-home 253:6 0 70G 0
lvm /home
│ ├─rl_vmh-var_crash 253:7 0 15G 0
lvm /var/crash
│ ├─rl_vmh-tmp 253:8 0 6G 0
lvm /tmp
│ ├─rl_vmh-var_tmp 253:9 0 10G 0
lvm /var/tmp
│ ├─rl_vmh-var_log_audit 253:10 0 4G 0
lvm /var/log/audit
│ ├─rl_vmh-var_log 253:11 0 16G 0
lvm /var/log
│ └─rl_vmh-var 253:12 0 30G 0
lvm /var
└─rl_vmh-swap 253:4 0 4G 0
lvm [SWAP]
sdd 8:48 0 596.2G 0
disk
└─Hitachi_HTS547564A9E384_J21B0053G8E15R 253:13 0 596.2G 0
mpath
└─Hitachi_HTS547564A9E384_J21B0053G8E15R1 253:15 0 596.2G 0
part
So I think I can do a force because they are on different VGs. Can anyone
confirm? Is there an unseen scenario (IE- bad practices) where forcing could
cause issues?
_______________________________________________
Users mailing list -- [email protected]
To unsubscribe send an email to [email protected]
Privacy Statement: https://www.ovirt.org/privacy-policy.html
oVirt Code of Conduct:
https://www.ovirt.org/community/about/community-guidelines/
List Archives:
https://lists.ovirt.org/archives/list/[email protected]/message/6XS5WQAYG5BCVSWXYZVU4VO52UZ35FIM/