I don't know why I didn't think to get some more info regarding my storage 
environment and post it here earlier.  My gluster_vg1 volume is on /dev/sda1.  
I can access the engine storage directory but I think that is because it is not 
thin provisioned.  I guess I was too bogged down in solving the problem when 
I'm stuck in emergency mode.  I had to  sneaker net my USB drive to my system 
so I could capture some info.  Anyhow here it is:

# lsblk
NAME                                                       MAJ:MIN RM   SIZE RO 
TYPE  MOUNTPOINT
sda                                                          8:0    0   5.5T  0 
disk  
└─sda1                                                       8:1    0   5.5T  0 
part  
sdb                                                          8:16   0 223.6G  0 
disk  
├─sdb1                                                       8:17   0     1G  0 
part  /boot
└─sdb2                                                       8:18   0 222.6G  0 
part  
  └─md127                                                    9:127  0 222.5G  0 
raid1 
    ├─onn_vmh-swap                                         253:0    0     4G  0 
lvm   [SWAP]
    ├─onn_vmh-pool00_tmeta                                 253:1    0     1G  0 
lvm   
    │ └─onn_vmh-pool00-tpool                               253:3    0 173.6G  0 
lvm   
    │   ├─onn_vmh-ovirt--node--ng--4.2.7.1--0.20181216.0+1 253:4    0 146.6G  0 
lvm   /
    │   ├─onn_vmh-pool00                                   253:5    0 173.6G  0 
lvm   
    │   ├─onn_vmh-root                                     253:6    0 146.6G  0 
lvm   
    │   ├─onn_vmh-home                                     253:7    0     1G  0 
lvm   /home
    │   ├─onn_vmh-tmp                                      253:8    0     1G  0 
lvm   /tmp
    │   ├─onn_vmh-var                                      253:9    0    15G  0 
lvm   /var
    │   ├─onn_vmh-var_log                                  253:10   0     8G  0 
lvm   /var/log
    │   ├─onn_vmh-var_log_audit                            253:11   0     2G  0 
lvm   /var/log/audit
    │   └─onn_vmh-var_crash                                253:12   0    10G  0 
lvm   /var/crash
    └─onn_vmh-pool00_tdata                                 253:2    0 173.6G  0 
lvm   
      └─onn_vmh-pool00-tpool                               253:3    0 173.6G  0 
lvm   
        ├─onn_vmh-ovirt--node--ng--4.2.7.1--0.20181216.0+1 253:4    0 146.6G  0 
lvm   /
        ├─onn_vmh-pool00                                   253:5    0 173.6G  0 
lvm   
        ├─onn_vmh-root                                     253:6    0 146.6G  0 
lvm   
        ├─onn_vmh-home                                     253:7    0     1G  0 
lvm   /home
        ├─onn_vmh-tmp                                      253:8    0     1G  0 
lvm   /tmp
        ├─onn_vmh-var                                      253:9    0    15G  0 
lvm   /var
        ├─onn_vmh-var_log                                  253:10   0     8G  0 
lvm   /var/log
        ├─onn_vmh-var_log_audit                            253:11   0     2G  0 
lvm   /var/log/audit
        └─onn_vmh-var_crash                                253:12   0    10G  0 
lvm   /var/crash
sdc                                                          8:32   0 223.6G  0 
disk  
└─sdc1                                                       8:33   0 222.6G  0 
part  
  └─md127                                                    9:127  0 222.5G  0 
raid1 
    ├─onn_vmh-swap                                         253:0    0     4G  0 
lvm   [SWAP]
    ├─onn_vmh-pool00_tmeta                                 253:1    0     1G  0 
lvm   
    │ └─onn_vmh-pool00-tpool                               253:3    0 173.6G  0 
lvm   
    │   ├─onn_vmh-ovirt--node--ng--4.2.7.1--0.20181216.0+1 253:4    0 146.6G  0 
lvm   /
    │   ├─onn_vmh-pool00                                   253:5    0 173.6G  0 
lvm   
    │   ├─onn_vmh-root                                     253:6    0 146.6G  0 
lvm   
    │   ├─onn_vmh-home                                     253:7    0     1G  0 
lvm   /home
    │   ├─onn_vmh-tmp                                      253:8    0     1G  0 
lvm   /tmp
    │   ├─onn_vmh-var                                      253:9    0    15G  0 
lvm   /var
    │   ├─onn_vmh-var_log                                  253:10   0     8G  0 
lvm   /var/log
    │   ├─onn_vmh-var_log_audit                            253:11   0     2G  0 
lvm   /var/log/audit
    │   └─onn_vmh-var_crash                                253:12   0    10G  0 
lvm   /var/crash
    └─onn_vmh-pool00_tdata                                 253:2    0 173.6G  0 
lvm   
      └─onn_vmh-pool00-tpool                               253:3    0 173.6G  0 
lvm   
        ├─onn_vmh-ovirt--node--ng--4.2.7.1--0.20181216.0+1 253:4    0 146.6G  0 
lvm   /
        ├─onn_vmh-pool00                                   253:5    0 173.6G  0 
lvm   
        ├─onn_vmh-root                                     253:6    0 146.6G  0 
lvm   
        ├─onn_vmh-home                                     253:7    0     1G  0 
lvm   /home
        ├─onn_vmh-tmp                                      253:8    0     1G  0 
lvm   /tmp
        ├─onn_vmh-var                                      253:9    0    15G  0 
lvm   /var
        ├─onn_vmh-var_log                                  253:10   0     8G  0 
lvm   /var/log
        ├─onn_vmh-var_log_audit                            253:11   0     2G  0 
lvm   /var/log/audit
        └─onn_vmh-var_crash                                253:12   0    10G  0 
lvm   /var/crash
sdd                                                          8:48   0 596.2G  0 
disk  
└─sdd1                                                       8:49   0     4G  0 
part  
  └─gluster_vg3-tmpLV                                      253:13   0     2G  0 
lvm   
sde                                                          8:64   1   7.5G  0 
disk  
└─sde1                                                       8:65   1   7.5G  0 
part  /mnt

# blkid
/dev/sda1: UUID="f026a2dc-201a-4b43-974e-2419a8783bce" TYPE="xfs" 
PARTLABEL="Linux filesystem" PARTUUID="4bca8a3a-42f0-4877-aa60-f544bf1fdce7" 
/dev/sdc1: UUID="e5f4acf5-a4bc-6470-7b6f-415e3f4077ff" 
UUID_SUB="a895900e-5585-8f31-7515-1ff7534e39d7" 
LABEL="vmh.cyber-range.lan:pv00" TYPE="linux_raid_member" 
/dev/sdb1: UUID="9b9546f9-25d2-42a6-835b-303f32aee4b1" TYPE="ext4" 
/dev/sdb2: UUID="e5f4acf5-a4bc-6470-7b6f-415e3f4077ff" 
UUID_SUB="6e20b5dd-0152-7f42-22a7-c17133fbce45" 
LABEL="vmh.cyber-range.lan:pv00" TYPE="linux_raid_member" 
/dev/sdd1: UUID="2nLjVF-sh3N-0qkm-aUQ1-jnls-3e8W-tUkBw5" TYPE="LVM2_member" 
/dev/md127: UUID="Mq1chn-6XhF-WCwF-LYhl-tZEz-Y8lq-8R2Ifq" TYPE="LVM2_member" 
/dev/mapper/onn_vmh-swap: UUID="1b0b9c91-22ed-41d1-aebf-e22fd9aa05d9" 
TYPE="swap" 
/dev/mapper/onn_vmh-ovirt--node--ng--4.2.7.1--0.20181216.0+1: 
UUID="b0e1c479-9696-4e19-b799-7f81236026b7" TYPE="ext4" 
/dev/mapper/onn_vmh-root: UUID="60905f5d-ed91-4ca9-9729-9a72a4678ddd" 
TYPE="ext4" 
/dev/mapper/onn_vmh-home: UUID="82a1d567-f8af-4b96-bfbf-5f79dff7384f" 
TYPE="ext4" 
/dev/mapper/onn_vmh-tmp: UUID="7dd9d3ae-3af7-4763-9683-19f583d8d15b" 
TYPE="ext4" 
/dev/mapper/onn_vmh-var: UUID="f206e030-876b-45a9-8a90-a0e54005b85c" 
TYPE="ext4" 
/dev/mapper/onn_vmh-var_log: UUID="b8a12f56-0818-416c-9fb7-33b48ef29eed" 
TYPE="ext4" 
/dev/mapper/onn_vmh-var_log_audit: UUID="bc78ad0c-9ab6-4f57-a69f-5b1ddf898552" 
TYPE="ext4" 
/dev/mapper/onn_vmh-var_crash: UUID="a941d416-4d7d-41ae-bcd4-8c1ec9d0f744" 
TYPE="ext4" 
/dev/sde1: UUID="44aa40d0-6c82-4e8e-8218-177e5c8474f4" TYPE="ext4" 

# pvscan 
  PV /dev/md127   VG onn_vmh         lvm2 [222.44 GiB / 43.66 GiB free]
  PV /dev/sdd1    VG gluster_vg3     lvm2 [<4.00 GiB / <2.00 GiB free]
  Total: 2 [<226.44 GiB] / in use: 2 [<226.44 GiB] / in no VG: 0 [0   ]
  Reading all physical volumes.  This may take a while...
  Found volume group "onn_vmh" using metadata type lvm2
  Found volume group "gluster_vg3" using metadata type lvm2

# lvscan
  ACTIVE            '/dev/onn_vmh/pool00' [173.60 GiB] inherit
  ACTIVE            '/dev/onn_vmh/root' [146.60 GiB] inherit
  ACTIVE            '/dev/onn_vmh/home' [1.00 GiB] inherit
  ACTIVE            '/dev/onn_vmh/tmp' [1.00 GiB] inherit
  ACTIVE            '/dev/onn_vmh/var' [15.00 GiB] inherit
  ACTIVE            '/dev/onn_vmh/var_log' [8.00 GiB] inherit
  ACTIVE            '/dev/onn_vmh/var_log_audit' [2.00 GiB] inherit
  ACTIVE            '/dev/onn_vmh/swap' [4.00 GiB] inherit
  inactive          '/dev/onn_vmh/ovirt-node-ng-4.2.7.1-0.20181216.0' [146.60 
GiB] inherit
  ACTIVE            '/dev/onn_vmh/ovirt-node-ng-4.2.7.1-0.20181216.0+1' [146.60 
GiB] inherit
  ACTIVE            '/dev/onn_vmh/var_crash' [10.00 GiB] inherit
  ACTIVE            '/dev/gluster_vg3/tmpLV' [2.00 GiB] inherit

[/etc/lvm/backup/gluster_vg1]
# Generated by LVM2 version 2.02.180(2)-RHEL7 (2018-07-20): Sat Dec 22 10:18:46 
2018

contents = "Text Format Volume Group"
version = 1

description = "Created *after* executing '/usr/sbin/lvcreate --virtualsize 
500GB --name lv_datadisks -T gluster_vg1/lvthinpool'"

creation_host = "vmh.cyber-range.lan"   # Linux vmh.cyber-range.lan 
3.10.0-957.1.3.el7.x86_64 #1 SMP Thu Nov 29 14:49:43 UTC 2018 x86_64
creation_time = 1545495526      # Sat Dec 22 10:18:46 2018

gluster_vg1 {
        id = "TfNqtn-2eX6-i5gC-w4ye-h29n-5Zfy-UFHSvU"
        seqno = 9
        format = "lvm2"                 # informational
        status = ["RESIZEABLE", "READ", "WRITE"]
        flags = []
        extent_size = 2048              # 1024 Kilobytes
        max_lv = 0
        max_pv = 0
        metadata_copies = 0

        physical_volumes {

                pv0 {
                        id = "DRfoKl-TUhb-cirx-Oz9P-mZEY-XoiB-mdLy6v"
                        device = "/dev/sda"     # Hint only

                        status = ["ALLOCATABLE"]
                        flags = []
                        dev_size = 11718885376  # 5.45703 Terabytes
                        pe_start = 2048
                        pe_count = 5722111      # 5.45703 Terabytes
                }
        }

        logical_volumes {

                engine_lv {
                        id = "P2ScEB-ws3V-iqVv-XTWh-Y2Jh-pg0c-6KnJXC"
                        status = ["READ", "WRITE", "VISIBLE"]
                        flags = []
                        creation_time = 1545495460      # 2018-12-22 10:17:40 
-0600
                        creation_host = "vmh.cyber-range.lan"
                        segment_count = 1

                        segment1 {
                                start_extent = 0
                                extent_count = 102400   # 100 Gigabytes

                                type = "striped"
                                stripe_count = 1        # linear

                                stripes = [
                                        "pv0", 0
                                ]
                        }
                }

                lvthinpool {
                        id = "c0yaNn-DcaB-cYjj-9ZRv-M2Em-rzLW-qY9WpI"
                        status = ["READ", "WRITE", "VISIBLE"]
                        flags = []
                        creation_time = 1545495487      # 2018-12-22 10:18:07 
-0600
                        creation_host = "vmh.cyber-range.lan"
                        segment_count = 1

                        segment1 {
                                start_extent = 0
                                extent_count = 512000   # 500 Gigabytes

                                type = "thin-pool"
                                metadata = "lvthinpool_tmeta"
                                pool = "lvthinpool_tdata"
                                transaction_id = 2
                                chunk_size = 2048       # 1024 Kilobytes
                                discards = "passdown"
                                zero_new_blocks = 1
                        }
                }

                lv_vmdisks {
                        id = "erpXRi-nPUq-mCf2-ga2J-3a0l-OWiC-i0xr8M"
                        status = ["READ", "WRITE", "VISIBLE"]
                        flags = []
                        creation_time = 1545495493      # 2018-12-22 10:18:13 
-0600
                        creation_host = "vmh.cyber-range.lan"
                        segment_count = 1

                        segment1 {
                                start_extent = 0
                                extent_count = 4613735  # 4.4 Terabytes

                                type = "thin"
                                thin_pool = "lvthinpool"
                                transaction_id = 0
                                device_id = 1
                        }
                }

                lv_datadisks {
                        id = "hKim3z-1QCh-dwhU-st2O-t4tG-wIss-UpLMZw"
                        status = ["READ", "WRITE", "VISIBLE"]
                        flags = []
                        creation_time = 1545495526      # 2018-12-22 10:18:46 
-0600
                        creation_host = "vmh.cyber-range.lan"
                        segment_count = 1

                        segment1 {
                                start_extent = 0
                                extent_count = 512000   # 500 Gigabytes

                                type = "thin"
                                thin_pool = "lvthinpool"
                                transaction_id = 1
                                device_id = 2
                        }
                }

                lvol0_pmspare {
                        id = "bHc0eC-Z4Ed-mV47-QTU4-SCvo-FWbE-L8NV7Q"
                        status = ["READ", "WRITE"]
                        flags = []
                        creation_time = 1545495487      # 2018-12-22 10:18:07 
-0600
                        creation_host = "vmh.cyber-range.lan"
                        segment_count = 1

                        segment1 {
                                start_extent = 0
                                extent_count = 16192    # 15.8125 Gigabytes

                                type = "striped"
                                stripe_count = 1        # linear

                                stripes = [
                                        "pv0", 102400
                                ]
                        }
                }

                lvthinpool_tmeta {
                        id = "WBut10-rAOP-FzA7-bJvr-ZdxL-lB70-jzz1Tv"
                        status = ["READ", "WRITE"]
                        flags = []
                        creation_time = 1545495487      # 2018-12-22 10:18:07 
-0600
                        creation_host = "vmh.cyber-range.lan"
                        segment_count = 1

                        segment1 {
                                start_extent = 0
                                extent_count = 16192    # 15.8125 Gigabytes

                                type = "striped"
                                stripe_count = 1        # linear

                                stripes = [
                                        "pv0", 630592
                                ]
                        }
                }

                lvthinpool_tdata {
                        id = "rwNZux-1fz1-dv8J-yN2j-LcES-f6ml-231td5"
                        status = ["READ", "WRITE"]
                        flags = []
                        creation_time = 1545495487      # 2018-12-22 10:18:07 
-0600
                        creation_host = "vmh.cyber-range.lan"
                        segment_count = 1

                        segment1 {
                                start_extent = 0
                                extent_count = 512000   # 500 Gigabytes

                                type = "striped"
                                stripe_count = 1        # linear

                                stripes = [
                                        "pv0", 118592
                                ]
                        }
                }
        }

}


# cd /var/log
# grep -ri gluster_vg1-lvthinpool-tpool 
messages-20190922:Sep 15 03:58:15 vmh lvm[14072]: Failed command for 
gluster_vg1-lvthinpool-tpool.
messages:Sep 22 03:44:05 vmh lvm[14072]: Failed command for 
gluster_vg1-lvthinpool-tpool.
messages-20190908:Sep  1 21:27:14 vmh lvm[14062]: Monitoring thin pool 
gluster_vg1-lvthinpool-tpool.
messages-20190908:Sep  1 21:27:24 vmh lvm[14062]: WARNING: Thin pool 
gluster_vg1-lvthinpool-tpool data is now 100.00% full.
messages-20190908:Sep  2 00:19:05 vmh lvm[14072]: Monitoring thin pool 
gluster_vg1-lvthinpool-tpool.
messages-20190908:Sep  2 00:19:15 vmh lvm[14072]: WARNING: Thin pool 
gluster_vg1-lvthinpool-tpool data is now 100.00% full.
messages-20190908:Sep  2 20:16:34 vmh lvm[14072]: Failed command for 
gluster_vg1-lvthinpool-tpool.

_______________________________________________
Users mailing list -- users@ovirt.org
To unsubscribe send an email to users-le...@ovirt.org
Privacy Statement: https://www.ovirt.org/site/privacy-policy/
oVirt Code of Conduct: 
https://www.ovirt.org/community/about/community-guidelines/
List Archives: 
https://lists.ovirt.org/archives/list/users@ovirt.org/message/PPTNPPD3SQL3IRYKGUVPZTT3ECPGPNGO/

Reply via email to