Hi,
you're looking for custom location hooks [0] from the crush location
docs [1]. Note that the docs don't cover the containerized part,
you'll need a workaround as described in [3].
You can either disable the automatic update on OSD startup (default: true):
ceph config set osd osd_crush_update_on_start false
Or you add a custom location hook as described in this thread [2].
Regards,
Eugen
[0]
https://docs.ceph.com/en/latest/rados/operations/crush-map/#custom-location-hooks
[1] https://docs.ceph.com/en/latest/rados/operations/crush-map/#crush-location
[2]
https://lists.ceph.io/hyperkitty/list/ceph-users@ceph.io/thread/2WM7HF4THJJWNWBPZMGCZBWFYQ2TEDTQ/#UDMTJXCCOCZORYJYOITYVAKJ7ZQIAEJB
[3] https://tracker.ceph.com/issues/53562
Zitat von Devender Singh <deven...@netskrt.io>:
Hello all
Why my buckets are moving back when restarting osd…how to make them
persistent.. After move command crush shows as below…
host custom {
id -9 # do not change unnecessarily
id -10 class hdd # do not change unnecessarily
# weight 0.01469
alg straw2
hash 0 # rjenkins1
item osd.3 weight 0.00490
item osd.4 weight 0.00490
item osd.5 weight 0.00490
}
root@ceph-01:/var/lib/ceph/51b1bcd8-3527-11f0-b51b-dd58cdde2818/osd.5# ceph
osd crush move osd.3 host=custom root=default
ceph osd crush move osd.4 host=custom root=default
ceph osd crush move osd.5 host=custom root=default
no need to move item id 3 name 'osd.3' to location
{host=custom,root=default} in crush map
no need to move item id 4 name 'osd.4' to location
{host=custom,root=default} in crush map
no need to move item id 5 name 'osd.5' to location
{host=custom,root=default} in crush map
root@ceph-01:/var/lib/ceph/51b1bcd8-3527-11f0-b51b-dd58cdde2818/osd.5# ceph
osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-9 0.01469 host custom
3 hdd 0.00490 osd.3 up 0 1.00000
4 hdd 0.00490 osd.4 up 0 1.00000
5 hdd 0.00490 osd.5 up 0 1.00000
-1 0.08789 root default
-5 0.02930 host ceph-01
2 hdd 0.02930 osd.2 up 1.00000 1.00000
-3 0.02930 host ceph-02
1 hdd 0.02930 osd.1 up 1.00000 1.00000
-7 0.02930 host ceph-03
0 hdd 0.02930 osd.0 up 1.00000 1.00000
root@ceph-01:/var/lib/ceph/51b1bcd8-3527-11f0-b51b-dd58cdde2818/osd.5# ceph
orch daemon restart osd.5
Scheduled to restart osd.5 on host 'ceph-01.tinihub.com'
root@ceph-01:/var/lib/ceph/51b1bcd8-3527-11f0-b51b-dd58cdde2818/osd.5# ceph
osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-9 0.01469 host custom
3 hdd 0.00490 osd.3 up 0 1.00000
4 hdd 0.00490 osd.4 up 0 1.00000
5 hdd 0.00490 osd.5 up 0 1.00000
-1 0.08789 root default
-5 0.02930 host ceph-01
2 hdd 0.02930 osd.2 up 1.00000 1.00000
-3 0.02930 host ceph-02
1 hdd 0.02930 osd.1 up 1.00000 1.00000
-7 0.02930 host ceph-03
0 hdd 0.02930 osd.0 up 1.00000 1.00000
root@ceph-01:/var/lib/ceph/51b1bcd8-3527-11f0-b51b-dd58cdde2818/osd.5# ceph
osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-9 0.01469 host custom
3 hdd 0.00490 osd.3 up 0 1.00000
4 hdd 0.00490 osd.4 up 0 1.00000
5 hdd 0.00490 osd.5 down 0 1.00000
-1 0.08789 root default
-5 0.02930 host ceph-01
2 hdd 0.02930 osd.2 up 1.00000 1.00000
-3 0.02930 host ceph-02
1 hdd 0.02930 osd.1 up 1.00000 1.00000
-7 0.02930 host ceph-03
0 hdd 0.02930 osd.0 up 1.00000 1.00000
root@ceph-01:/var/lib/ceph/51b1bcd8-3527-11f0-b51b-dd58cdde2818/osd.5# ceph
osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-9 0.01469 host custom
3 hdd 0.00490 osd.3 up 0 1.00000
4 hdd 0.00490 osd.4 up 0 1.00000
5 hdd 0.00490 osd.5 down 0 1.00000
-1 0.08789 root default
-5 0.02930 host ceph-01
2 hdd 0.02930 osd.2 up 1.00000 1.00000
-3 0.02930 host ceph-02
1 hdd 0.02930 osd.1 up 1.00000 1.00000
-7 0.02930 host ceph-03
0 hdd 0.02930 osd.0 up 1.00000 1.00000
root@ceph-01:/var/lib/ceph/51b1bcd8-3527-11f0-b51b-dd58cdde2818/osd.5# ceph
osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-9 0.01469 host custom
3 hdd 0.00490 osd.3 up 0 1.00000
4 hdd 0.00490 osd.4 up 0 1.00000
5 hdd 0.00490 osd.5 down 0 1.00000
-1 0.08789 root default
-5 0.02930 host ceph-01
2 hdd 0.02930 osd.2 up 1.00000 1.00000
-3 0.02930 host ceph-02
1 hdd 0.02930 osd.1 up 1.00000 1.00000
-7 0.02930 host ceph-03
0 hdd 0.02930 osd.0 up 1.00000 1.00000
root@ceph-01:/var/lib/ceph/51b1bcd8-3527-11f0-b51b-dd58cdde2818/osd.5# ceph
osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-9 0.01469 host custom
3 hdd 0.00490 osd.3 up 0 1.00000
4 hdd 0.00490 osd.4 up 0 1.00000
5 hdd 0.00490 osd.5 down 0 1.00000
-1 0.08789 root default
-5 0.02930 host ceph-01
2 hdd 0.02930 osd.2 up 1.00000 1.00000
-3 0.02930 host ceph-02
1 hdd 0.02930 osd.1 up 1.00000 1.00000
-7 0.02930 host ceph-03
0 hdd 0.02930 osd.0 up 1.00000 1.00000
root@ceph-01:/var/lib/ceph/51b1bcd8-3527-11f0-b51b-dd58cdde2818/osd.5# ceph
osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-9 0.01469 host custom
3 hdd 0.00490 osd.3 up 0 1.00000
4 hdd 0.00490 osd.4 up 0 1.00000
5 hdd 0.00490 osd.5 down 0 1.00000
-1 0.08789 root default
-5 0.02930 host ceph-01
2 hdd 0.02930 osd.2 up 1.00000 1.00000
-3 0.02930 host ceph-02
1 hdd 0.02930 osd.1 up 1.00000 1.00000
-7 0.02930 host ceph-03
0 hdd 0.02930 osd.0 up 1.00000 1.00000
root@ceph-01:/var/lib/ceph/51b1bcd8-3527-11f0-b51b-dd58cdde2818/osd.5# ceph
osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-9 0.00980 host custom
3 hdd 0.00490 osd.3 up 0 1.00000
4 hdd 0.00490 osd.4 up 0 1.00000
-1 0.09279 root default
-5 0.03419 host ceph-01
2 hdd 0.02930 osd.2 up 1.00000 1.00000
5 hdd 0.00490 osd.5 down 0 1.00000
-3 0.02930 host ceph-02
1 hdd 0.02930 osd.1 up 1.00000 1.00000
-7 0.02930 host ceph-03
0 hdd 0.02930 osd.0 up 1.00000 1.00000
root@ceph-01:/var/lib/ceph/51b1bcd8-3527-11f0-b51b-dd58cdde2818/osd.5# ceph
osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-9 0.00980 host custom
3 hdd 0.00490 osd.3 up 0 1.00000
4 hdd 0.00490 osd.4 up 0 1.00000
-1 0.09279 root default
-5 0.03419 host ceph-01
2 hdd 0.02930 osd.2 up 1.00000 1.00000
5 hdd 0.00490 osd.5 up 0 1.00000
-3 0.02930 host ceph-02
1 hdd 0.02930 osd.1 up 1.00000 1.00000
-7 0.02930 host ceph-03
0 hdd 0.02930 osd.0 up 1.00000 1.00000
_______________________________________________
ceph-users mailing list -- ceph-users@ceph.io
To unsubscribe send an email to ceph-users-le...@ceph.io
_______________________________________________
ceph-users mailing list -- ceph-users@ceph.io
To unsubscribe send an email to ceph-users-le...@ceph.io