OS: CentOS 6.5
Version: Ceph 0.79

Hi, everybody!
I have installed a ceph cluster with 10 servers.
I test the throughput of ceph cluster in the same  datacenter.
Upload files of 1GB from one server or several servers to one server or several 
servers, the total is about 30MB/s.
That is to say, there is no difference between one server or one cluster about 
throughput when uploading files.
How to optimize the performance of ceph object storage?
Thanks!


------------------------------------------------------------------------------------------------------------
Info about ceph cluster: 
4 MONs in the first 4 nodes in the cluster.
11 OSDs in each server, 109 OSDs in total (one disk was bad).
4TB each disk, 391TB in total (109*4-391=45TB.Where did 45TB space?)
1 RGW in each server, 10 RGWs in total.That is to say, I can use S3 API in each 
Server.


ceph.conf:
[global]
        auth supported = none


        ;auth_service_required = cephx
        ;auth_client_required = cephx
        ;auth_cluster_required = cephx
        filestore_xattr_use_omap = true


        max open files = 131072
        log file = /var/log/ceph/$name.log
        pid file = /var/run/ceph/$name.pid
        keyring = /etc/ceph/keyring.admin
        
        mon_clock_drift_allowed = 2 ;clock skew detected


[mon]
        mon data = /data/mon$id
        keyring = /etc/ceph/keyring.$name
[osd]
        osd data = /data/osd$id
        osd journal = /data/osd$id/journal
        osd journal size = 1024;
        keyring = /etc/ceph/keyring.$name
        osd mkfs type = xfs    
        osd mount options xfs = rw,noatime
        osd mkfs options xfs = -f


[client.radosgw.cn-bj-1]
        rgw region = cn
        rgw region root pool = .cn.rgw.root
        rgw zone = cn-bj
        rgw zone root pool = .cn-wz.rgw.root
        host = yun168
        public_addr = 192.168.10.115
        rgw dns name = s3.domain.com
        keyring = /etc/ceph/ceph.client.radosgw.keyring
        rgw socket path = /var/run/ceph/$name.sock
        log file = /var/log/ceph/radosgw.log
        debug rgw = 20
        rgw print continue = true
        rgw should log = true








[root@yun168 ~]# ceph -s
    cluster e48b0d5b-ff08-4a8e-88aa-4acd3f5a6204
     health HEALTH_OK
     monmap e7: 4 mons at {... ... .... ...}, election epoch 78, quorum 0,1,2,3 
0,1,2,3
     mdsmap e49: 0/0/1 up
     osdmap e3722: 109 osds: 109 up, 109 in
      pgmap v106768: 29432 pgs, 19 pools, 12775 GB data, 12786 kobjects
            640 GB used, 390 TB / 391 TB avail
               29432 active+clean
  client io 1734 kB/s rd, 29755 kB/s wr, 443 op/s



_______________________________________________
ceph-users mailing list
ceph-users@lists.ceph.com
http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com

Reply via email to