Re: [dm-devel] [PATCH 11/12] dm-zoned: round-robin load balancer for reclaiming zones

2020-05-25 Thread Hannes Reinecke

On 5/25/20 4:42 AM, Damien Le Moal wrote:

On 2020/05/23 0:39, Hannes Reinecke wrote:

When reclaiming zones we should arbitrate between the zoned
devices to get a better throughput. So implement a simple
round-robin load balancer between the zoned devices.

Signed-off-by: Hannes Reinecke 
---
  drivers/md/dm-zoned-metadata.c | 8 +++-
  1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 87784e7785bc..25dcad2a565f 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -171,6 +171,8 @@ struct dmz_metadata {
unsigned intnr_reserved_seq;
unsigned intnr_chunks;
  
+	unsigned int		last_alloc_idx;

+
/* Zone information array */
struct xarray   zones;
  
@@ -2178,7 +2180,7 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags)

  {
struct list_head *list;
struct dm_zone *zone;
-   unsigned int dev_idx = 0;
+   unsigned int dev_idx = zmd->last_alloc_idx;
  
  again:

if (flags & DMZ_ALLOC_CACHE)
@@ -2214,6 +2216,9 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, 
unsigned long flags)
zone = list_first_entry(list, struct dm_zone, link);
list_del_init(>link);
  
+	if (!(flags & DMZ_ALLOC_CACHE))

+   zmd->last_alloc_idx = (dev_idx + 1) % zmd->nr_devs;
+
if (dmz_is_cache(zone))
atomic_dec(>unmap_nr_cache);
else if (dmz_is_rnd(zone))
@@ -2839,6 +2844,7 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
zmd->dev = dev;
zmd->nr_devs = num_dev;
zmd->mblk_rbtree = RB_ROOT;
+   zmd->last_alloc_idx = 0;
init_rwsem(>mblk_sem);
mutex_init(>mblk_flush_lock);
spin_lock_init(>mblk_lock);




OK. So my comment on patch 8 is already addressed. Or at least partly... Where
is last_alloc_idx actually used ? It looks like this only sets last_alloc_idx
but do not use that value on entry to dmz_alloc_zone() to allocate the zone.


Aw, fsck. Something went astray when generating the patches.
Will be fixing it up for the next round.

Cheers,

Hannes
--
Dr. Hannes ReineckeTeamlead Storage & Networking
h...@suse.de   +49 911 74053 688
SUSE Software Solutions GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), Geschäftsführer: Felix Imendörffer


--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel

Re: [dm-devel] [PATCH 11/12] dm-zoned: round-robin load balancer for reclaiming zones

2020-05-24 Thread Damien Le Moal
On 2020/05/23 0:39, Hannes Reinecke wrote:
> When reclaiming zones we should arbitrate between the zoned
> devices to get a better throughput. So implement a simple
> round-robin load balancer between the zoned devices.
> 
> Signed-off-by: Hannes Reinecke 
> ---
>  drivers/md/dm-zoned-metadata.c | 8 +++-
>  1 file changed, 7 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
> index 87784e7785bc..25dcad2a565f 100644
> --- a/drivers/md/dm-zoned-metadata.c
> +++ b/drivers/md/dm-zoned-metadata.c
> @@ -171,6 +171,8 @@ struct dmz_metadata {
>   unsigned intnr_reserved_seq;
>   unsigned intnr_chunks;
>  
> + unsigned intlast_alloc_idx;
> +
>   /* Zone information array */
>   struct xarray   zones;
>  
> @@ -2178,7 +2180,7 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata 
> *zmd, unsigned long flags)
>  {
>   struct list_head *list;
>   struct dm_zone *zone;
> - unsigned int dev_idx = 0;
> + unsigned int dev_idx = zmd->last_alloc_idx;
>  
>  again:
>   if (flags & DMZ_ALLOC_CACHE)
> @@ -2214,6 +2216,9 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata 
> *zmd, unsigned long flags)
>   zone = list_first_entry(list, struct dm_zone, link);
>   list_del_init(>link);
>  
> + if (!(flags & DMZ_ALLOC_CACHE))
> + zmd->last_alloc_idx = (dev_idx + 1) % zmd->nr_devs;
> +
>   if (dmz_is_cache(zone))
>   atomic_dec(>unmap_nr_cache);
>   else if (dmz_is_rnd(zone))
> @@ -2839,6 +2844,7 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
>   zmd->dev = dev;
>   zmd->nr_devs = num_dev;
>   zmd->mblk_rbtree = RB_ROOT;
> + zmd->last_alloc_idx = 0;
>   init_rwsem(>mblk_sem);
>   mutex_init(>mblk_flush_lock);
>   spin_lock_init(>mblk_lock);
> 


OK. So my comment on patch 8 is already addressed. Or at least partly... Where
is last_alloc_idx actually used ? It looks like this only sets last_alloc_idx
but do not use that value on entry to dmz_alloc_zone() to allocate the zone.

-- 
Damien Le Moal
Western Digital Research



--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel



[dm-devel] [PATCH 11/12] dm-zoned: round-robin load balancer for reclaiming zones

2020-05-22 Thread Hannes Reinecke
When reclaiming zones we should arbitrate between the zoned
devices to get a better throughput. So implement a simple
round-robin load balancer between the zoned devices.

Signed-off-by: Hannes Reinecke 
---
 drivers/md/dm-zoned-metadata.c | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 87784e7785bc..25dcad2a565f 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -171,6 +171,8 @@ struct dmz_metadata {
unsigned intnr_reserved_seq;
unsigned intnr_chunks;
 
+   unsigned intlast_alloc_idx;
+
/* Zone information array */
struct xarray   zones;
 
@@ -2178,7 +2180,7 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, 
unsigned long flags)
 {
struct list_head *list;
struct dm_zone *zone;
-   unsigned int dev_idx = 0;
+   unsigned int dev_idx = zmd->last_alloc_idx;
 
 again:
if (flags & DMZ_ALLOC_CACHE)
@@ -2214,6 +2216,9 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, 
unsigned long flags)
zone = list_first_entry(list, struct dm_zone, link);
list_del_init(>link);
 
+   if (!(flags & DMZ_ALLOC_CACHE))
+   zmd->last_alloc_idx = (dev_idx + 1) % zmd->nr_devs;
+
if (dmz_is_cache(zone))
atomic_dec(>unmap_nr_cache);
else if (dmz_is_rnd(zone))
@@ -2839,6 +2844,7 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
zmd->dev = dev;
zmd->nr_devs = num_dev;
zmd->mblk_rbtree = RB_ROOT;
+   zmd->last_alloc_idx = 0;
init_rwsem(>mblk_sem);
mutex_init(>mblk_flush_lock);
spin_lock_init(>mblk_lock);
-- 
2.16.4

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel