When reclaiming zones we should arbitrate between the zoned
devices to get a better throughput. So implement a simple
round-robin load balancer between the zoned devices.

Signed-off-by: Hannes Reinecke <h...@suse.de>
---
 drivers/md/dm-zoned-metadata.c | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 87784e7785bc..25dcad2a565f 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -171,6 +171,8 @@ struct dmz_metadata {
        unsigned int            nr_reserved_seq;
        unsigned int            nr_chunks;
 
+       unsigned int            last_alloc_idx;
+
        /* Zone information array */
        struct xarray           zones;
 
@@ -2178,7 +2180,7 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, 
unsigned long flags)
 {
        struct list_head *list;
        struct dm_zone *zone;
-       unsigned int dev_idx = 0;
+       unsigned int dev_idx = zmd->last_alloc_idx;
 
 again:
        if (flags & DMZ_ALLOC_CACHE)
@@ -2214,6 +2216,9 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, 
unsigned long flags)
        zone = list_first_entry(list, struct dm_zone, link);
        list_del_init(&zone->link);
 
+       if (!(flags & DMZ_ALLOC_CACHE))
+               zmd->last_alloc_idx = (dev_idx + 1) % zmd->nr_devs;
+
        if (dmz_is_cache(zone))
                atomic_dec(&zmd->unmap_nr_cache);
        else if (dmz_is_rnd(zone))
@@ -2839,6 +2844,7 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
        zmd->dev = dev;
        zmd->nr_devs = num_dev;
        zmd->mblk_rbtree = RB_ROOT;
+       zmd->last_alloc_idx = 0;
        init_rwsem(&zmd->mblk_sem);
        mutex_init(&zmd->mblk_flush_lock);
        spin_lock_init(&zmd->mblk_lock);
-- 
2.16.4

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel

Reply via email to