Hi,
I am creating a new ceph cluster using REEF.
This is my host_specs file
[root@controllera config]# cat hosts-specs2.yml
service_type: host
hostname: computehci01
addr: 20.1.0.2
location:
chassis: chassis1
---
service_type: host
hostname: computehci02
addr: 20.1.0.3
location:
chassis: chassis1
---
service_type: host
hostname: computehci03
addr: 20.1.0.4
location:
chassis: chassis1
---
service_type: host
hostname: computehci04
addr: 20.1.0.5
location:
chassis: chassis2
---
service_type: host
hostname: computehci05
addr: 20.1.0.6
location:
chassis: chassis2
---
service_type: host
hostname: computehci06
addr: 20.1.0.7
location:
chassis: chassis2
---
service_type: host
hostname: computehci07
addr: 20.1.0.8
location:
chassis: chassis3
---
service_type: host
hostname: computehci08
addr: 20.1.0.9
location:
chassis: chassis3
---
service_type: host
hostname: computehci09
addr: 20.1.0.10
location:
chassis: chassis3
---
service_type: host
hostname: computehci10
addr: 20.1.0.11
location:
chassis: chassis3
---
service_type: host
hostname: computehci11
addr: 20.1.0.12
location:
chassis: chassis4
---
service_type: host
hostname: computehci12
addr: 20.1.0.13
location:
chassis: chassis4
---
service_type: host
hostname: computehci13
addr: 20.1.0.14
location:
chassis: chassis4
---
service_type: host
hostname: computehci14
addr: 20.1.0.15
location:
chassis: chassis4
---
service_type: host
hostname: computehci15
addr: 20.1.0.16
location:
chassis: chassis5
---
service_type: host
hostname: computehci16
addr: 20.1.0.17
location:
chassis: chassis5
---
service_type: host
hostname: computehci17
addr: 20.1.0.18
location:
chassis: chassis5
---
service_type: host
hostname: computehci18
addr: 20.1.0.19
location:
chassis: chassis5
---
service_type: host
hostname: computehci19
addr: 20.1.0.20
location:
chassis: chassis6
---
service_type: host
hostname: computehci20
addr: 20.1.0.21
location:
chassis: chassis6
---
service_type: host
hostname: computehci21
addr: 20.1.0.22
location:
chassis: chassis6
---
service_type: host
hostname: computehci22
addr: 20.1.0.24
location:
chassis: chassis7
---
service_type: host
hostname: computehci23
addr: 20.1.0.25
location:
chassis: chassis7
---
service_type: host
hostname: computehci24
addr: 20.1.0.26
location:
chassis: chassis7
---
service_type: host
hostname: computehci25
addr: 20.1.0.28
location:
chassis: chassis8
---
service_type: host
hostname: computehci26
addr: 20.1.0.29
location:
chassis: chassis8
---
service_type: host
hostname: computehci27
addr: 20.1.0.30
location:
chassis: chassis8
---
service_type: host
hostname: controllera
addr: 20.1.0.23
---
service_type: host
hostname: controllerb
addr: 20.1.0.27
---
service_type: host
hostname: controllerc
addr: 20.1.0.31
---
service_type: mon
placement:
hosts:
- controllera
- controllerb
- controllerc
---
service_type: mgr
placement:
hosts:
- controllera
- controllerb
- controllerc
---
service_type: osd
service_id: default_drive_group
placement:
hosts:
- computehci01
- computehci02
- computehci03
- computehci04
- computehci05
- computehci06
- computehci07
- computehci08
- computehci09
- computehci10
- computehci11
- computehci12
- computehci13
- computehci14
- computehci15
- computehci16
- computehci17
- computehci18
- computehci19
- computehci20
- computehci21
- computehci22
- computehci23
- computehci24
- computehci25
- computehci26
- computehci27
spec:
data_devices:
rotational: 0
All osds were added but, pg still unknown state
I've created a pool, but it didn't change anything.
[root@controllerb ~]# ceph -s
cluster:
id: be250ade-e1f2-11ee-a6ff-3cecef2872f0
health: HEALTH_WARN
Reduced data availability: 1 pg inactive
services:
mon: 3 daemons, quorum controllera,controllerc,controllerb (age 3h)
mgr: controllerc.jevbkl(active, since 21s), standbys:
controllera.zwlolp, controllerb.vqkdga
osd: 108 osds: 108 up (since 2m), 108 in (since 24m)
data:
pools: 2 pools, 33 pgs
objects: 0 objects, 0 B
usage: 5.1 GiB used, 330 TiB / 330 TiB avail
* pgs: 100.000% pgs unknown 33 unknown*
Did I miss something?
Regards.
_______________________________________________
ceph-users mailing list -- [email protected]
To unsubscribe send an email to [email protected]