Hi,
i have upgraded my hardware and installed ceph totally new as described in http://docs.ceph.com/docs/master/rados/deployment/ The last job was creating the OSDs http://docs.ceph.com/docs/master/rados/deployment/ceph-deploy-osd/ I have used the create command and after that, the OSDs should be in and up but they are all down and out.
An additionally osd activate command does not help.

Ubuntu 14.04.4 kernel 4.2.1
ceph 10.0.2

What should i do, where is my mistake?

This is ceph.conf:

[global]
fsid = 122e929a-111b-4067-80e4-3fef39e66ecf
mon_initial_members = bd-0, bd-1, bd-2
mon_host = xxx.xxx.xxx.20,xxx.xxx.xxx.21,xxx.xxx.xxx.22
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public network = xxx.xxx.xxx.0/24
cluster network = 192.168.1.0/24
osd_journal_size = 10240
osd pool default size = 2
osd pool default min size = 1
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
osd_mkfs_type = btrfs
osd_mkfs_options_btrfs = -f -n 32k -l 32k
osd_mount_options_btrfs = rw,noatime,nodiratime,autodefrag
mds_max_file_size = 50000000000000


This is the log of the last osd:
##########
bd-2:/dev/sdaf:/dev/sdaf2
ceph-deploy disk zap bd-2:/dev/sdaf
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf [ceph_deploy.cli][INFO ] Invoked (1.5.31): /usr/bin/ceph-deploy osd create --fs-type btrfs bd-2:/dev/sdaf:/dev/sdaf2
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO ] disk : [('bd-2', '/dev/sdaf', '/dev/sdaf2')]
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : create
[ceph_deploy.cli][INFO ] dmcrypt_key_dir : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f944e197488>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : btrfs
[ceph_deploy.cli][INFO ] func : <function osd at 0x7f944e16b500>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.osd][DEBUG ] Preparing cluster ceph disks bd-2:/dev/sdaf:/dev/sdaf2
[bd-2][DEBUG ] connected to host: bd-2
[bd-2][DEBUG ] detect platform information from remote host
[bd-2][DEBUG ] detect machine type
[bd-2][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: Ubuntu 14.04 trusty
[ceph_deploy.osd][DEBUG ] Deploying osd to bd-2
[bd-2][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph_deploy.osd][DEBUG ] Preparing host bd-2 disk /dev/sdaf journal /dev/sdaf2 activate True [bd-2][INFO ] Running command: ceph-disk -v prepare --cluster ceph --fs-type btrfs -- /dev/sdaf /dev/sdaf2 [bd-2][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --check-allows-journal -i 0 --cluster ceph [bd-2][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --check-wants-journal -i 0 --cluster ceph [bd-2][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --check-needs-journal -i 0 --cluster ceph [bd-2][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdaf uuid path is /sys/dev/block/65:240/dm/uuid [bd-2][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdaf uuid path is /sys/dev/block/65:240/dm/uuid [bd-2][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdaf uuid path is /sys/dev/block/65:240/dm/uuid [bd-2][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdaf2 uuid path is /sys/dev/block/65:242/dm/uuid [bd-2][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdaf2 uuid path is /sys/dev/block/65:242/dm/uuid [bd-2][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=fsid [bd-2][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_options_btrfs [bd-2][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_btrfs [bd-2][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=osd_journal_size [bd-2][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_cryptsetup_parameters [bd-2][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_dmcrypt_key_size [bd-2][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_dmcrypt_type [bd-2][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdaf uuid path is /sys/dev/block/65:240/dm/uuid [bd-2][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdaf2 uuid path is /sys/dev/block/65:242/dm/uuid
[bd-2][WARNIN] DEBUG:ceph-disk:Journal /dev/sdaf2 is a partition
[bd-2][WARNIN] WARNING:ceph-disk:OSD will not be hot-swappable if journal is not the same device as the osd data [bd-2][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdaf2 uuid path is /sys/dev/block/65:242/dm/uuid
[bd-2][WARNIN] INFO:ceph-disk:Running command: /sbin/sgdisk -i 2 /dev/sdaf
[bd-2][WARNIN] WARNING:ceph-disk:Journal /dev/sdaf2 was not prepared with ceph-disk. Symlinking directly. [bd-2][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdaf uuid path is /sys/dev/block/65:240/dm/uuid [bd-2][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdaf uuid path is /sys/dev/block/65:240/dm/uuid
[bd-2][WARNIN] DEBUG:ceph-disk:Creating osd partition on /dev/sdaf
[bd-2][WARNIN] INFO:ceph-disk:Running command: /sbin/sgdisk --largest-new=1 --change-name=1:ceph data --partition-guid=1:c9486257-e53d-40b8-b7f6-3d228d0cb1f7 --typecode=1:89c57f98-2fe5-4dc0-89c1-f3ad0ceff2be -- /dev/sdaf
[bd-2][DEBUG ] The operation has completed successfully.
[bd-2][WARNIN] DEBUG:ceph-disk:Calling partprobe on created device /dev/sdaf
[bd-2][WARNIN] INFO:ceph-disk:Running command: /sbin/udevadm settle
[bd-2][WARNIN] INFO:ceph-disk:Running command: /sbin/partprobe /dev/sdaf
[bd-2][WARNIN] INFO:ceph-disk:Running command: /sbin/udevadm settle
[bd-2][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdaf uuid path is /sys/dev/block/65:240/dm/uuid
[bd-2][WARNIN] DEBUG:ceph-disk:Creating btrfs fs on /dev/sdaf1
[bd-2][WARNIN] INFO:ceph-disk:Running command: /sbin/mkfs -t btrfs -f -n 32k -l 32k -- /dev/sdaf1 [bd-2][WARNIN] Turning ON incompat feature 'extref': increased hardlink limit per file to 65536
[bd-2][DEBUG ]
[bd-2][DEBUG ] WARNING! - Btrfs v3.12 IS EXPERIMENTAL
[bd-2][DEBUG ] WARNING! - see http://btrfs.wiki.kernel.org before using
[bd-2][DEBUG ]
[bd-2][DEBUG ] fs created label (null) on /dev/sdaf1
[bd-2][DEBUG ]  nodesize 32768 leafsize 32768 sectorsize 4096 size 3.63TiB
[bd-2][DEBUG ] Btrfs v3.12
[bd-2][WARNIN] DEBUG:ceph-disk:Mounting /dev/sdaf1 on /var/lib/ceph/tmp/mnt.lW5X6l with options rw,noatime,nodiratime,autodefrag [bd-2][WARNIN] INFO:ceph-disk:Running command: /bin/mount -t btrfs -o rw,noatime,nodiratime,autodefrag -- /dev/sdaf1 /var/lib/ceph/tmp/mnt.lW5X6l [bd-2][WARNIN] DEBUG:ceph-disk:Preparing osd data dir /var/lib/ceph/tmp/mnt.lW5X6l [bd-2][WARNIN] DEBUG:ceph-disk:Creating symlink /var/lib/ceph/tmp/mnt.lW5X6l/journal -> /dev/sdaf2 [bd-2][WARNIN] INFO:ceph-disk:Running command: /bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.lW5X6l/ceph_fsid.35649.tmp [bd-2][WARNIN] INFO:ceph-disk:Running command: /bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.lW5X6l/fsid.35649.tmp [bd-2][WARNIN] INFO:ceph-disk:Running command: /bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.lW5X6l/magic.35649.tmp [bd-2][WARNIN] INFO:ceph-disk:Running command: /bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.lW5X6l
[bd-2][WARNIN] DEBUG:ceph-disk:Unmounting /var/lib/ceph/tmp/mnt.lW5X6l
[bd-2][WARNIN] INFO:ceph-disk:Running command: /bin/umount -- /var/lib/ceph/tmp/mnt.lW5X6l [bd-2][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdaf uuid path is /sys/dev/block/65:240/dm/uuid [bd-2][WARNIN] INFO:ceph-disk:Running command: /sbin/sgdisk --typecode=1:4fbd7e29-9d25-41b8-afd0-062c0ceff05d -- /dev/sdaf
[bd-2][DEBUG ] The operation has completed successfully.
[bd-2][WARNIN] DEBUG:ceph-disk:Calling partprobe on prepared device /dev/sdaf
[bd-2][WARNIN] INFO:ceph-disk:Running command: /sbin/udevadm settle
[bd-2][WARNIN] INFO:ceph-disk:Running command: /sbin/partprobe /dev/sdaf
[bd-2][WARNIN] INFO:ceph-disk:Running command: /sbin/udevadm settle
[bd-2][WARNIN] INFO:ceph-disk:Running command: /sbin/udevadm trigger --action=add --sysname-match sdaf1
[bd-2][INFO  ] checking OSD status...
[bd-2][INFO  ] Running command: ceph --cluster=ceph osd stat --format=json
[bd-2][WARNIN] there are 90 OSDs down
[bd-2][WARNIN] there are 90 OSDs out
[ceph_deploy.osd][DEBUG ] Host bd-2 is now ready for osd use.
root@bd-a:/etc/ceph#


root@bd-a:/etc/ceph# ceph -s
    cluster 122e929a-111b-4067-80e4-3fef39e66ecf
     health HEALTH_WARN
            64 pgs stuck inactive
            64 pgs stuck unclean
monmap e1: 3 mons at {bd-0=xxx.xxx.xxx.20:6789/0,bd-1=xxx.xxx.xxx.21:6789/0,bd-2=xxx.xxx.xxx.22:6789/0}
            election epoch 6, quorum 0,1,2 bd-0,bd-1,bd-2
     osdmap e91: 90 osds: 0 up, 0 in
            flags sortbitwise
      pgmap v92: 64 pgs, 1 pools, 0 bytes data, 0 objects
            0 kB used, 0 kB / 0 kB avail
                  64 creating
root@bd-a:/etc/ceph#

--
MfG,
  Markus Goldberg

--------------------------------------------------------------------------
Markus Goldberg       Universität Hildesheim
                      Rechenzentrum
Tel +49 5121 88392822 Universitätsplatz 1, D-31141 Hildesheim, Germany
Fax +49 5121 88392823 email goldb...@uni-hildesheim.de
--------------------------------------------------------------------------

_______________________________________________
ceph-users mailing list
ceph-users@lists.ceph.com
http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com

Reply via email to