Hi Tom,


            Yes , its mounted . I am using centos7 and kernel version 
3.10.0-229.el7.x86_64.



      /dev/xvda3     xfs       138G   33M  138G   1% /home





Regards

Prabu GJ





---- On Thu, 13 Apr 2017 17:20:34 +0530 Tom Verhaeg 
<t.verh...@real-websolutions.nl> wrote ----




Hi,



Is your OSD mounted correctly on the OS? 



Tom




From: ceph-users <ceph-users-boun...@lists.ceph.com> on behalf of gjprabu 
<gjpr...@zohocorp.com>
 Sent: Thursday, April 13, 2017 1:13:34 PM
 To: ceph-users
 Subject: Re: [ceph-users] ceph activation error
 


Hi All,



          Anybody facing this similar issue.



Regards

Prabu GJ





---- On Sat, 04 Mar 2017 09:50:35 +0530 gjprabu <gjpr...@zohocorp.com> 
wrote ----




Hi Team,



          I am installing new ceph setup(jewel) and here while activating OSD 
its throughing below error.



          I am using partition based osd like /home/osd1 not a entire disk. 
Earlier installation one month back all are working fine but this time i 
getting error like below.





[root@cphadmin mycluster]# ceph-deploy osd activate cphosd1:/home/osd1 
cphosd2:/home/osd2 cphosd3:/home/osd3

[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf

[ceph_deploy.cli][INFO  ] Invoked (1.5.37): /usr/bin/ceph-deploy osd activate 
cphosd1:/home/osd1 cphosd2:/home/osd2 cphosd3:/home/osd3

[ceph_deploy.cli][INFO  ] ceph-deploy options:

[ceph_deploy.cli][INFO  ]  username                      : None

[ceph_deploy.cli][INFO  ]  verbose                       : False

[ceph_deploy.cli][INFO  ]  overwrite_conf                : False

[ceph_deploy.cli][INFO  ]  subcommand                    : activate

[ceph_deploy.cli][INFO  ]  quiet                         : False

[ceph_deploy.cli][INFO  ]  cd_conf                       : 
<ceph_deploy.conf.cephdeploy.Conf instance at 0x7ff270353fc8>

[ceph_deploy.cli][INFO  ]  cluster                       : ceph

[ceph_deploy.cli][INFO  ]  func                          : <function osd at 
0x7ff2703492a8>

[ceph_deploy.cli][INFO  ]  ceph_conf                     : None

[ceph_deploy.cli][INFO  ]  default_release               : False

[ceph_deploy.cli][INFO  ]  disk                          : [('cphosd1', 
'/home/osd1', None), ('cphosd2', '/home/osd2', None), ('cphosd3', '/home/osd3', 
None)]

[ceph_deploy.osd][DEBUG ] Activating cluster ceph disks cphosd1:/home/osd1: 
cphosd2:/home/osd2: cphosd3:/home/osd3:

[cphosd1][DEBUG ] connected to host: cphosd1

[cphosd1][DEBUG ] detect platform information from remote host

[cphosd1][DEBUG ] detect machine type

[cphosd1][DEBUG ] find the location of an executable

[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.1.1503 Core

[ceph_deploy.osd][DEBUG ] activating host cphosd1 disk /home/osd1

[ceph_deploy.osd][DEBUG ] will use init type: systemd

[cphosd1][DEBUG ] find the location of an executable

[cphosd1][INFO  ] Running command: /usr/sbin/ceph-disk -v activate --mark-init 
systemd --mount /home/osd1

[cphosd1][WARNIN] main_activate: path = /home/osd1

[cphosd1][WARNIN] activate: Cluster uuid is 62b4f8c7-c00c-48d0-8262-549c9ef6074c

[cphosd1][WARNIN] command: Running command: /usr/bin/ceph-osd --cluster=ceph 
--show-config-value=fsid

[cphosd1][WARNIN] activate: Cluster name is ceph

[cphosd1][WARNIN] activate: OSD uuid is 241b30d8-b2ba-4380-81f8-2e30e6913bb2

[cphosd1][WARNIN] allocate_osd_id: Allocating OSD id...

[cphosd1][WARNIN] command: Running command: /usr/bin/ceph --cluster ceph --name 
client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd 
create --concise 241b30d8-b2ba-4380-81f8-2e30e6913bb2

[cphosd1][WARNIN] command: Running command: /usr/sbin/restorecon -R 
/home/osd1/whoami.22462.tmp

[cphosd1][WARNIN] command: Running command: /usr/bin/chown -R ceph:ceph 
/home/osd1/whoami.22462.tmp

[cphosd1][WARNIN] activate: OSD id is 0

[cphosd1][WARNIN] activate: Initializing OSD...

[cphosd1][WARNIN] command_check_call: Running command: /usr/bin/ceph --cluster 
ceph --name client.bootstrap-osd --keyring 
/var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o 
/home/osd1/activate.monmap

[cphosd1][WARNIN] got monmap epoch 2

[cphosd1][WARNIN] command: Running command: /usr/bin/timeout 300 ceph-osd 
--cluster ceph --mkfs --mkkey -i 0 --monmap /home/osd1/activate.monmap 
--osd-data /home/osd1 --osd-journal /home/osd1/journal --osd-uuid 
241b30d8-b2ba-4380-81f8-2e30e6913bb2 --keyring /home/osd1/keyring --setuser 
ceph --setgroup ceph

[cphosd1][WARNIN] activate: Marking with init system systemd

[cphosd1][WARNIN] command: Running command: /usr/sbin/restorecon -R 
/home/osd1/systemd

[cphosd1][WARNIN] command: Running command: /usr/bin/chown -R ceph:ceph 
/home/osd1/systemd

[cphosd1][WARNIN] activate: Authorizing OSD key...

[cphosd1][WARNIN] command_check_call: Running command: /usr/bin/ceph --cluster 
ceph --name client.bootstrap-osd --keyring 
/var/lib/ceph/bootstrap-osd/ceph.keyring auth add osd.0 -i /home/osd1/keyring 
osd allow * mon allow profile osd

[cphosd1][WARNIN] added key for osd.0

[cphosd1][WARNIN] command: Running command: /usr/sbin/restorecon -R 
/home/osd1/active.22462.tmp

[cphosd1][WARNIN] command: Running command: /usr/bin/chown -R ceph:ceph 
/home/osd1/active.22462.tmp

[cphosd1][WARNIN] activate: ceph osd.0 data dir is ready at /home/osd1

[cphosd1][WARNIN] activate_dir: Creating symlink /var/lib/ceph/osd/ceph-0 -> 
/home/osd1

[cphosd1][WARNIN] start_daemon: Starting ceph osd.0...

[cphosd1][WARNIN] command_check_call: Running command: /usr/bin/systemctl 
enable ceph-osd@0

[cphosd1][WARNIN] Created symlink from 
/etc/systemd/system/ceph-osd.target.wants/ceph-osd@0.service to 
/usr/lib/systemd/system/ceph-osd@.service.

[cphosd1][WARNIN] command_check_call: Running command: /usr/bin/systemctl start 
ceph-osd@0

[cphosd1][WARNIN] Job for  ceph-osd@0.service failed because the control 
process exited with error code. See "systemctl status ceph-osd@0.service" and 
"journalctl -xe" for details.

[cphosd1][WARNIN] Traceback (most recent call last):

[cphosd1][WARNIN]   File "/usr/sbin/ceph-disk", line 9, in <module>

[cphosd1][WARNIN]     load_entry_point('ceph-disk==1.0.0', 'console_scripts', 
'ceph-disk')()

[cphosd1][WARNIN]   File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", 
line 5009, in run

[cphosd1][WARNIN]     main(sys.argv[1:])

[cphosd1][WARNIN]   File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", 
line 4960, in main

[cphosd1][WARNIN]     args.func(args)

[cphosd1][WARNIN]   File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", 
line 3359, in main_activate

[cphosd1][WARNIN]     osd_id=osd_id,

[cphosd1][WARNIN]   File "/usr/lib/python2.7/site-packages/ceph_disk/main.py", 
line 2906, in start_daemon

[cphosd1][WARNIN]     raise Error('ceph osd start failed', e)

[cphosd1][WARNIN] ceph_disk.main.Error

[cphosd1][ERROR ] RuntimeError: command returned non-zero exit status: 1

[ceph_deploy][ERROR ] RuntimeError: Failed to execute command: 
/usr/sbin/ceph-disk -v activate --mark-init systemd --mount /home/osd1







Logs showing this error.



Mar  4 09:26:33 localhost systemd: 
[/usr/lib/systemd/system/ceph-osd@.service:18] Unknown lvalue 'TasksMax' in 
section 'Service'

Mar  4 09:26:33 localhost systemd: Reloading.

Mar  4 09:26:33 localhost systemd: 
[/usr/lib/systemd/system/ceph-osd@.service:18] Unknown lvalue 'TasksMax' in 
section 'Service'

Mar  4 09:26:33 localhost systemd: Configuration file 
/usr/lib/systemd/system/auditd.service is marked world-inaccessible. This has 
no effect as configuration data is accessible via APIs without restrictions. 
Proceeding anyway.

Mar  4 09:26:33 localhost systemd: Created slice system-ceph\x2dosd.slice.

Mar  4 09:26:33 localhost systemd: Starting system-ceph\x2dosd.slice.

Mar  4 09:26:33 localhost systemd: Starting Network Manager Wait Online...

Mar  4 09:26:33 localhost systemd: Reached target System Time Synchronized.

Mar  4 09:26:33 localhost systemd: Starting System Time Synchronized.

Mar  4 09:26:33 localhost systemd: Started Network Manager Wait Online.

Mar  4 09:26:33 localhost systemd: Reached target Network is Online.

Mar  4 09:26:33 localhost systemd: Starting Network is Online.

Mar  4 09:26:33 localhost systemd: Starting Ceph object storage daemon...

Mar  4 09:26:33 localhost ceph-osd-prestart.sh: OSD data directory 
/var/lib/ceph/osd/ceph-0 does not exist; bailing out.

Mar  4 09:26:33 localhost systemd:  ceph-osd@0.service: control process exited, 
code=exited status=1

Mar  4 09:26:33 localhost systemd: Failed to start Ceph object storage daemon.

Mar  4 09:26:33 localhost systemd: Unit  ceph-osd@0.service entered failed 
state.

Mar  4 09:26:33 localhost systemd:  ceph-osd@0.service failed.

Mar  4 09:26:34 localhost python: detected unhandled Python exception in 
'/usr/sbin/ceph-disk'

Mar  4 09:26:34 localhost systemd:  ceph-osd@0.service holdoff time over, 
scheduling restart.

Mar  4 09:26:34 localhost systemd: Starting Ceph object storage daemon...





Regards

Prabu GJ















_______________________________________________
ceph-users mailing list
ceph-users@lists.ceph.com
http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com

Reply via email to