Dear Admins,
   During last day I have been trying to deploy a new radosgw, following
jewel guide, ceph cluster is healthy (3 mon and 2 osd servers )
   root@cephrgw ceph]# ceph -v
ceph version 10.2.3 (ecc23778eb545d8dd55e2e4735b53cc93f92e65b)
   [root@cephrgw ceph]# rpm -qa | grep ceph
ceph-common-10.2.3-0.el7.x86_64
libcephfs1-10.2.3-0.el7.x86_64
ceph-deploy-1.5.36-0.noarch
ceph-release-1-1.el7.noarch
ceph-base-10.2.3-0.el7.x86_64
ceph-radosgw-10.2.3-0.el7.x86_64
python-cephfs-10.2.3-0.el7.x86_64
ceph-selinux-10.2.3-0.el7.x86_64

Cityweb is running on default port:
[root@cephrgw ceph]# systemctl status ceph-radosgw@rgw.cephrgw.service
● ceph-radosgw@rgw.cephrgw.service - Ceph rados gateway
   Loaded: loaded (/usr/lib/systemd/system/ceph-radosgw@.service; enabled;
vendor preset: disabled)
   Active: active (running) since mié 2016-09-28 10:20:34 CEST; 2s ago
 Main PID: 29311 (radosgw)
   CGroup:
/system.slice/system-ceph\x2dradosgw.slice/ceph-radosgw@rgw.cephrgw.service
           └─29311 /usr/bin/radosgw -f --cluster ceph --name
client.rgw.cephrgw --setuser ceph --setgroup ceph

sep 28 10:20:34 cephrgw.ifca.es systemd[1]: Started Ceph rados gateway.
sep 28 10:20:34 cephrgw.ifca.es systemd[1]: Starting Ceph rados gateway...

And this pools were created on ceph storage:
.rgw.root                  2            4            0            0
   0          252          195            4            5
default.rgw.control            0            8            0            0
       0            0            0            0            0
default.rgw.data.root            0            0            0            0
         0            0            0            0            0
default.rgw.gc             0           32            0            0
   0         2112         2080         1408            0
default.rgw.log            0          127            0            0
   0        47625        47498        31750            0
default.rgw.users.uid            0            0            0            0
         0            0            0            0            0

But seems that zones are not well defined.

radosgw-admin zone get --zone-id=default
2016-09-28 10:24:07.142478 7fd810b219c0  0 failed reading obj info from
.rgw.root:zone_info.default: (2) No such file or directory


[root@cephrgw ~]# radosgw-admin zone get
2016-09-28 10:25:41.740162 7f18072799c0  1 -- :/0 messenger.start
2016-09-28 10:25:41.741262 7f18072799c0  1 -- :/945549824 -->
10.10.3.3:6789/0 -- auth(proto 0 30 bytes epoch 0) v1 -- ?+0 0x7f18085c9850
con 0x7f18085c85a0
2016-09-28 10:25:41.742048 7f180726f700  1 -- 10.10.3.4:0/945549824 learned
my addr 10.10.3.4:0/945549824
2016-09-28 10:25:41.743168 7f17eae03700  1 -- 10.10.3.4:0/945549824 <==
mon.2 10.10.3.3:6789/0 1 ==== mon_map magic: 0 v1 ==== 495+0+0 (2693174994
0 0) 0x7f17d4000b90 con 0x7f18085c85a0
2016-09-28 10:25:41.743380 7f17eae03700  1 -- 10.10.3.4:0/945549824 <==
mon.2 10.10.3.3:6789/0 2 ==== auth_reply(proto 2 0 (0) Success) v1 ====
33+0+0 (3801669063 0 0) 0x7f17d4001010 con 0x7f18085c85a0
2016-09-28 10:25:41.743696 7f17eae03700  1 -- 10.10.3.4:0/945549824 -->
10.10.3.3:6789/0 -- auth(proto 2 32 bytes epoch 0) v1 -- ?+0 0x7f17e0001730
con 0x7f18085c85a0
2016-09-28 10:25:41.744541 7f17eae03700  1 -- 10.10.3.4:0/945549824 <==
mon.2 10.10.3.3:6789/0 3 ==== auth_reply(proto 2 0 (0) Success) v1 ====
206+0+0 (1705741500 0 0) 0x7f17d4001010 con 0x7f18085c85a0
2016-09-28 10:25:41.744765 7f17eae03700  1 -- 10.10.3.4:0/945549824 -->
10.10.3.3:6789/0 -- auth(proto 2 165 bytes epoch 0) v1 -- ?+0
0x7f17e0001bf0 con 0x7f18085c85a0
2016-09-28 10:25:41.745619 7f17eae03700  1 -- 10.10.3.4:0/945549824 <==
mon.2 10.10.3.3:6789/0 4 ==== auth_reply(proto 2 0 (0) Success) v1 ====
393+0+0 (482591267 0 0) 0x7f17d40008c0 con 0x7f18085c85a0
2016-09-28 10:25:41.745783 7f17eae03700  1 -- 10.10.3.4:0/945549824 -->
10.10.3.3:6789/0 -- mon_subscribe({monmap=0+}) v2 -- ?+0 0x7f18085cd560 con
0x7f18085c85a0
2016-09-28 10:25:41.745967 7f18072799c0  1 -- 10.10.3.4:0/945549824 -->
10.10.3.3:6789/0 -- mon_subscribe({osdmap=0}) v2 -- ?+0 0x7f18085c9850 con
0x7f18085c85a0
2016-09-28 10:25:41.746521 7f17eae03700  1 -- 10.10.3.4:0/945549824 <==
mon.2 10.10.3.3:6789/0 5 ==== mon_map magic: 0 v1 ==== 495+0+0 (2693174994
0 0) 0x7f17d40012b0 con 0x7f18085c85a0
2016-09-28 10:25:41.746669 7f17daffd700  2
RGWDataChangesLog::ChangesRenewThread: start
2016-09-28 10:25:41.746882 7f18072799c0 20 get_system_obj_state:
rctx=0x7ffe0afd57e0 obj=.rgw.root:default.realm state=0x7f18085cf4e8
s->prefetch_data=0
2016-09-28 10:25:41.746962 7f17eae03700  1 -- 10.10.3.4:0/945549824 <==
mon.2 10.10.3.3:6789/0 6 ==== osd_map(5792..5792 src has 5225..5792) v3
==== 13145+0+0 (1223904398 0 0) 0x7f17d40008c0 con 0x7f18085c85a0
2016-09-28 10:25:41.747661 7f18072799c0  1 -- 10.10.3.4:0/945549824 -->
10.10.3.12:6810/8166 -- osd_op(client.2974205.0:1 26.85fca992 default.realm
[getxattrs,stat] snapc 0=[] ack+read+known_if_redirected e5792) v7 -- ?+0
0x7f18085d3410 con 0x7f18085d1eb0
2016-09-28 10:25:41.749762 7f17e83fc700  1 -- 10.10.3.4:0/945549824 <==
osd.10 10.10.3.12:6810/8166 1 ==== osd_op_reply(1 default.realm
[getxattrs,stat] v0'0 uv0 ack = -2 ((2) No such file or directory)) v7 ====
175+0+0 (2651345336 0 0) 0x7f17cc000a50 con 0x7f18085d1eb0
unable to initialize zone: (2) No such file or directory
2016-09-28 10:25:41.749933 7f18072799c0 10 could not read realm id: (2) No
such file or directory
2016-09-28 10:25:41.750290 7f18072799c0  1 -- 10.10.3.4:0/945549824
mark_down 0x7f18085d1eb0 -- 0x7f18085d0ba0
2016-09-28 10:25:41.750375 7f18072799c0  1 -- 10.10.3.4:0/945549824
mark_down 0x7f18085c85a0 -- 0x7f18085c72e0
2016-09-28 10:25:41.750575 7f18072799c0  1 -- 10.10.3.4:0/945549824
mark_down_all
2016-09-28 10:25:41.750888 7f18072799c0  1 -- 10.10.3.4:0/945549824
shutdown complete.



this is my conf:

[global]
fsid = 6f5a65a7-316c-4825-afcb-428608941dd1
mon_initial_members = cephadm, cephmon02, cephmon03
mon_host = 10.10.3.1,10.10.3.2,10.10.3.3
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
filestore_xattr_use_omap = true
osd_pool_default_size = 2
public_network = 10.10.0.0/16
cluster_network = 192.168.254.0/27
debug ms = 1
debug rgw = 20

[osd]
osd_journal_size = 20000

[client.cinder]
keyring = /etc/ceph/ceph.client.cinder.keyring

[client.rgw.cephrgw]
host = cephrwg
keyring = /etc/ceph/ceph.client.rgw.cephrgw.keyring
log_file = /var/log/ceph/client.rgw.cephrgw.log

Auth list:
client.bootstrap-rgw
key: AQALRwFWPBPDIxAABXXXXcwUsGPn0fMhhP19wg==
caps: [mon] allow profile bootstrap-rgw

client.rgw.cephrgw
key: AQAfsWNXUIv9MBAAXXXXzGk4strXE0UbWW4yzg==
caps: [mon] allow rw
caps: [osd] allow rwx

[root@cephrgw ~]# cat /etc/ceph/ceph.client.rgw.cephrgw.keyring
[client.rgw.cephrgw]
key = AQAfsWNXUIv9MBAAXXXXzGk4strXE0UbWW4yzg==

Any Idea?

regards, I


-- 
############################################################################
Iban Cabrillo Bartolome
Instituto de Fisica de Cantabria (IFCA)
Santander, Spain
Tel: +34942200969
PGP PUBLIC KEY:
http://pgp.mit.edu/pks/lookup?op=get&search=0xD9DF0B3D6C8C08AC
############################################################################
Bertrand Russell:
*"El problema con el mundo es que los estúpidos están seguros de todo y los
inteligentes están llenos de dudas*"
_______________________________________________
ceph-users mailing list
ceph-users@lists.ceph.com
http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com

Reply via email to