Hi,
After reading the thread
http://lists.ceph.com/pipermail/ceph-users-ceph.com/2013-June/002358.html
We have done this crush map to make thing work.
srv1 and srv1ssd are the same physical server (same srv2,3,4)
we split it in the crush to make two parallel hierarchies.
This example is working,
I was just wondering if it's the best way to achieve this
Thanks
# begin crush map
# devices
device 0 osd.0
device 1 osd.1
device 2 osd.2
device 3 osd.3
device 4 osd.4
device 5 osd.5
device 6 osd.6
device 7 osd.7
device 8 osd.8
device 9 osd.9
device 10 osd.10
device 11 osd.11
device 12 osd.12
device 13 osd.13
device 14 osd.14
device 15 osd.15
# types
type 0 osd
type 1 host
type 2 rack
type 3 row
type 4 room
type 5 datacenter
type 6 root
# buckets
host srv1 {
id -2 # do not change unnecessarily
# weight 5.690
alg straw
hash 0 # rjenkins1
item osd.1 weight 1.820
item osd.2 weight 1.820
item osd.3 weight 1.820
}
host srv2 {
id -3 # do not change unnecessarily
# weight 5.690
alg straw
hash 0 # rjenkins1
item osd.5 weight 1.820
item osd.14 weight 1.820
item osd.15 weight 1.820
}
host srv3 {
id -4 # do not change unnecessarily
# weight 5.690
alg straw
hash 0 # rjenkins1
item osd.7 weight 1.820
item osd.8 weight 1.820
item osd.9 weight 1.820
}
host srv4 {
id -5 # do not change unnecessarily
# weight 5.690
alg straw
hash 0 # rjenkins1
item osd.11 weight 1.820
item osd.12 weight 1.820
item osd.13 weight 1.820
}
host srv1ssd {
id -100
alg straw
hash 0
item osd.0 weight 0.230
}
host srv2ssd {
id -101
alg straw
hash 0
item osd.4 weight 0.230
}
host srv3ssd {
id -102
alg straw
hash 0
item osd.6 weight 0.230
}
host srv4ssd {
id -103
alg straw
hash 0
item osd.10 weight 0.230
}
root default {
id -1 # do not change unnecessarily
# weight 22.760
alg straw
hash 0 # rjenkins1
item srv1 weight 5.690
item srv2 weight 5.690
item srv3 weight 5.690
item srv4 weight 5.690
}
root ssd {
id -99
alg straw
hash 0
item srv1ssd weight 0.230
item srv2ssd weight 0.230
item srv3ssd weight 0.230
item srv4ssd weight 0.230
}
# rules
rule data {
ruleset 0
type replicated
min_size 1
max_size 10
step take default
step chooseleaf firstn 0 type host
step emit
}
rule metadata {
ruleset 1
type replicated
min_size 1
max_size 10
step take default
step chooseleaf firstn 0 type host
step emit
}
rule sata {
ruleset 2
type replicated
min_size 1
max_size 10
step take default
step chooseleaf firstn 0 type host
step emit
}
rule ssd {
ruleset 3
type replicated
min_size 1
max_size 10
step take ssd
step chooseleaf firstn 0 type host
step emit
}
# end crush map
cyril
--
probeSys - spécialiste GNU/Linux
site web : http://www.probesys.com
_______________________________________________
ceph-users mailing list
[email protected]
http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com