Re: [dm-devel] [PATCH 08/12] dm-zoned: move random and sequential zones into struct dmz_dev

2020-05-25 Thread Hannes Reinecke

On 5/25/20 4:27 AM, Damien Le Moal wrote:

On 2020/05/23 0:39, Hannes Reinecke wrote:

Random and sequential zones should be part of the respective
device structure to make arbitration between devices possible.

Signed-off-by: Hannes Reinecke 
---
  drivers/md/dm-zoned-metadata.c | 143 +
  drivers/md/dm-zoned.h  |  10 +++
  2 files changed, 99 insertions(+), 54 deletions(-)

diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 1b9da698a812..5f44970a6187 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -192,21 +192,12 @@ struct dmz_metadata {
/* Zone allocation management */
struct mutexmap_lock;
struct dmz_mblock   **map_mblk;
-   unsigned intnr_rnd;
-   atomic_tunmap_nr_rnd;
-   struct list_headunmap_rnd_list;
-   struct list_headmap_rnd_list;
  
  	unsigned int		nr_cache;

atomic_tunmap_nr_cache;
struct list_headunmap_cache_list;
struct list_headmap_cache_list;
  
-	unsigned int		nr_seq;

-   atomic_tunmap_nr_seq;
-   struct list_headunmap_seq_list;
-   struct list_headmap_seq_list;
-
atomic_tnr_reserved_seq_zones;
struct list_headreserved_seq_zones_list;
  
@@ -281,12 +272,22 @@ unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
  
  unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd)

  {
-   return zmd->nr_rnd;
+   unsigned int nr_rnd_zones = 0;
+   int i;
+
+   for (i = 0; i < zmd->nr_devs; i++)
+   nr_rnd_zones += zmd->dev[i].nr_rnd;


We could keep the total nr_rnd_zones in dmz_metadata to avoid this one since the
value will never change at run time.



Yeah, we could, but in the end this is only used for logging, so it's 
hardly performance critical.

And I have an aversion against having two counters for the same thing;
they inevitably tend to get out of sync.


+   return nr_rnd_zones;
  }
  
  unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd)

  {
-   return atomic_read(>unmap_nr_rnd);
+   unsigned int nr_unmap_rnd_zones = 0;
+   int i;
+
+   for (i = 0; i < zmd->nr_devs; i++)
+   nr_unmap_rnd_zones += atomic_read(>dev[i].unmap_nr_rnd);
+   return nr_unmap_rnd_zones;
  }
  
  unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd)

@@ -301,12 +302,22 @@ unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata 
*zmd)
  
  unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd)

  {
-   return zmd->nr_seq;
+   unsigned int nr_seq_zones = 0;
+   int i;
+
+   for (i = 0; i < zmd->nr_devs; i++)
+   nr_seq_zones += zmd->dev[i].nr_seq;


Same here. This value does not change at runtime.


+   return nr_seq_zones;
  }
  
  unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd)

  {
-   return atomic_read(>unmap_nr_seq);
+   unsigned int nr_unmap_seq_zones = 0;
+   int i;
+
+   for (i = 0; i < zmd->nr_devs; i++)
+   nr_unmap_seq_zones += atomic_read(>dev[i].unmap_nr_seq);
+   return nr_unmap_seq_zones;
  }
  
  static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)

@@ -1485,6 +1496,14 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
  
  		dev->metadata = zmd;

zmd->nr_zones += dev->nr_zones;
+
+   atomic_set(>unmap_nr_rnd, 0);
+   INIT_LIST_HEAD(>unmap_rnd_list);
+   INIT_LIST_HEAD(>map_rnd_list);
+
+   atomic_set(>unmap_nr_seq, 0);
+   INIT_LIST_HEAD(>unmap_seq_list);
+   INIT_LIST_HEAD(>map_seq_list);
}
  
  	if (!zmd->nr_zones) {

@@ -1702,9 +1721,9 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
if (dmz_is_cache(dzone))
list_add_tail(>link, >map_cache_list);
else if (dmz_is_rnd(dzone))
-   list_add_tail(>link, >map_rnd_list);
+   list_add_tail(>link, >dev->map_rnd_list);
else
-   list_add_tail(>link, >map_seq_list);
+   list_add_tail(>link, >dev->map_seq_list);
  
  		/* Check buffer zone */

bzone_id = le32_to_cpu(dmap[e].bzone_id);
@@ -1738,7 +1757,7 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
if (dmz_is_cache(bzone))
list_add_tail(>link, >map_cache_list);
else
-   list_add_tail(>link, >map_rnd_list);
+   list_add_tail(>link, >dev->map_rnd_list);
  next:
chunk++;
e++;
@@ -1763,9 +1782,9 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
if (dmz_is_cache(dzone))
zmd->nr_cache++;
else if 

Re: [dm-devel] [PATCH 08/12] dm-zoned: move random and sequential zones into struct dmz_dev

2020-05-24 Thread Damien Le Moal
On 2020/05/23 0:39, Hannes Reinecke wrote:
> Random and sequential zones should be part of the respective
> device structure to make arbitration between devices possible.
> 
> Signed-off-by: Hannes Reinecke 
> ---
>  drivers/md/dm-zoned-metadata.c | 143 
> +
>  drivers/md/dm-zoned.h  |  10 +++
>  2 files changed, 99 insertions(+), 54 deletions(-)
> 
> diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
> index 1b9da698a812..5f44970a6187 100644
> --- a/drivers/md/dm-zoned-metadata.c
> +++ b/drivers/md/dm-zoned-metadata.c
> @@ -192,21 +192,12 @@ struct dmz_metadata {
>   /* Zone allocation management */
>   struct mutexmap_lock;
>   struct dmz_mblock   **map_mblk;
> - unsigned intnr_rnd;
> - atomic_tunmap_nr_rnd;
> - struct list_headunmap_rnd_list;
> - struct list_headmap_rnd_list;
>  
>   unsigned intnr_cache;
>   atomic_tunmap_nr_cache;
>   struct list_headunmap_cache_list;
>   struct list_headmap_cache_list;
>  
> - unsigned intnr_seq;
> - atomic_tunmap_nr_seq;
> - struct list_headunmap_seq_list;
> - struct list_headmap_seq_list;
> -
>   atomic_tnr_reserved_seq_zones;
>   struct list_headreserved_seq_zones_list;
>  
> @@ -281,12 +272,22 @@ unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
>  
>  unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd)
>  {
> - return zmd->nr_rnd;
> + unsigned int nr_rnd_zones = 0;
> + int i;
> +
> + for (i = 0; i < zmd->nr_devs; i++)
> + nr_rnd_zones += zmd->dev[i].nr_rnd;

We could keep the total nr_rnd_zones in dmz_metadata to avoid this one since the
value will never change at run time.

> + return nr_rnd_zones;
>  }
>  
>  unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd)
>  {
> - return atomic_read(>unmap_nr_rnd);
> + unsigned int nr_unmap_rnd_zones = 0;
> + int i;
> +
> + for (i = 0; i < zmd->nr_devs; i++)
> + nr_unmap_rnd_zones += atomic_read(>dev[i].unmap_nr_rnd);
> + return nr_unmap_rnd_zones;
>  }
>  
>  unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd)
> @@ -301,12 +302,22 @@ unsigned int dmz_nr_unmap_cache_zones(struct 
> dmz_metadata *zmd)
>  
>  unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd)
>  {
> - return zmd->nr_seq;
> + unsigned int nr_seq_zones = 0;
> + int i;
> +
> + for (i = 0; i < zmd->nr_devs; i++)
> + nr_seq_zones += zmd->dev[i].nr_seq;

Same here. This value does not change at runtime.

> + return nr_seq_zones;
>  }
>  
>  unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd)
>  {
> - return atomic_read(>unmap_nr_seq);
> + unsigned int nr_unmap_seq_zones = 0;
> + int i;
> +
> + for (i = 0; i < zmd->nr_devs; i++)
> + nr_unmap_seq_zones += atomic_read(>dev[i].unmap_nr_seq);
> + return nr_unmap_seq_zones;
>  }
>  
>  static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int 
> zone_id)
> @@ -1485,6 +1496,14 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
>  
>   dev->metadata = zmd;
>   zmd->nr_zones += dev->nr_zones;
> +
> + atomic_set(>unmap_nr_rnd, 0);
> + INIT_LIST_HEAD(>unmap_rnd_list);
> + INIT_LIST_HEAD(>map_rnd_list);
> +
> + atomic_set(>unmap_nr_seq, 0);
> + INIT_LIST_HEAD(>unmap_seq_list);
> + INIT_LIST_HEAD(>map_seq_list);
>   }
>  
>   if (!zmd->nr_zones) {
> @@ -1702,9 +1721,9 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
>   if (dmz_is_cache(dzone))
>   list_add_tail(>link, >map_cache_list);
>   else if (dmz_is_rnd(dzone))
> - list_add_tail(>link, >map_rnd_list);
> + list_add_tail(>link, >dev->map_rnd_list);
>   else
> - list_add_tail(>link, >map_seq_list);
> + list_add_tail(>link, >dev->map_seq_list);
>  
>   /* Check buffer zone */
>   bzone_id = le32_to_cpu(dmap[e].bzone_id);
> @@ -1738,7 +1757,7 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
>   if (dmz_is_cache(bzone))
>   list_add_tail(>link, >map_cache_list);
>   else
> - list_add_tail(>link, >map_rnd_list);
> + list_add_tail(>link, >dev->map_rnd_list);
>  next:
>   chunk++;
>   e++;
> @@ -1763,9 +1782,9 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
>   if (dmz_is_cache(dzone))
>   zmd->nr_cache++;
>   else if (dmz_is_rnd(dzone))
> - zmd->nr_rnd++;
> + dzone->dev->nr_rnd++;
>   else
> -  

[dm-devel] [PATCH 08/12] dm-zoned: move random and sequential zones into struct dmz_dev

2020-05-22 Thread Hannes Reinecke
Random and sequential zones should be part of the respective
device structure to make arbitration between devices possible.

Signed-off-by: Hannes Reinecke 
---
 drivers/md/dm-zoned-metadata.c | 143 +
 drivers/md/dm-zoned.h  |  10 +++
 2 files changed, 99 insertions(+), 54 deletions(-)

diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 1b9da698a812..5f44970a6187 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -192,21 +192,12 @@ struct dmz_metadata {
/* Zone allocation management */
struct mutexmap_lock;
struct dmz_mblock   **map_mblk;
-   unsigned intnr_rnd;
-   atomic_tunmap_nr_rnd;
-   struct list_headunmap_rnd_list;
-   struct list_headmap_rnd_list;
 
unsigned intnr_cache;
atomic_tunmap_nr_cache;
struct list_headunmap_cache_list;
struct list_headmap_cache_list;
 
-   unsigned intnr_seq;
-   atomic_tunmap_nr_seq;
-   struct list_headunmap_seq_list;
-   struct list_headmap_seq_list;
-
atomic_tnr_reserved_seq_zones;
struct list_headreserved_seq_zones_list;
 
@@ -281,12 +272,22 @@ unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
 
 unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd)
 {
-   return zmd->nr_rnd;
+   unsigned int nr_rnd_zones = 0;
+   int i;
+
+   for (i = 0; i < zmd->nr_devs; i++)
+   nr_rnd_zones += zmd->dev[i].nr_rnd;
+   return nr_rnd_zones;
 }
 
 unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd)
 {
-   return atomic_read(>unmap_nr_rnd);
+   unsigned int nr_unmap_rnd_zones = 0;
+   int i;
+
+   for (i = 0; i < zmd->nr_devs; i++)
+   nr_unmap_rnd_zones += atomic_read(>dev[i].unmap_nr_rnd);
+   return nr_unmap_rnd_zones;
 }
 
 unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd)
@@ -301,12 +302,22 @@ unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata 
*zmd)
 
 unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd)
 {
-   return zmd->nr_seq;
+   unsigned int nr_seq_zones = 0;
+   int i;
+
+   for (i = 0; i < zmd->nr_devs; i++)
+   nr_seq_zones += zmd->dev[i].nr_seq;
+   return nr_seq_zones;
 }
 
 unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd)
 {
-   return atomic_read(>unmap_nr_seq);
+   unsigned int nr_unmap_seq_zones = 0;
+   int i;
+
+   for (i = 0; i < zmd->nr_devs; i++)
+   nr_unmap_seq_zones += atomic_read(>dev[i].unmap_nr_seq);
+   return nr_unmap_seq_zones;
 }
 
 static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
@@ -1485,6 +1496,14 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
 
dev->metadata = zmd;
zmd->nr_zones += dev->nr_zones;
+
+   atomic_set(>unmap_nr_rnd, 0);
+   INIT_LIST_HEAD(>unmap_rnd_list);
+   INIT_LIST_HEAD(>map_rnd_list);
+
+   atomic_set(>unmap_nr_seq, 0);
+   INIT_LIST_HEAD(>unmap_seq_list);
+   INIT_LIST_HEAD(>map_seq_list);
}
 
if (!zmd->nr_zones) {
@@ -1702,9 +1721,9 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
if (dmz_is_cache(dzone))
list_add_tail(>link, >map_cache_list);
else if (dmz_is_rnd(dzone))
-   list_add_tail(>link, >map_rnd_list);
+   list_add_tail(>link, >dev->map_rnd_list);
else
-   list_add_tail(>link, >map_seq_list);
+   list_add_tail(>link, >dev->map_seq_list);
 
/* Check buffer zone */
bzone_id = le32_to_cpu(dmap[e].bzone_id);
@@ -1738,7 +1757,7 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
if (dmz_is_cache(bzone))
list_add_tail(>link, >map_cache_list);
else
-   list_add_tail(>link, >map_rnd_list);
+   list_add_tail(>link, >dev->map_rnd_list);
 next:
chunk++;
e++;
@@ -1763,9 +1782,9 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
if (dmz_is_cache(dzone))
zmd->nr_cache++;
else if (dmz_is_rnd(dzone))
-   zmd->nr_rnd++;
+   dzone->dev->nr_rnd++;
else
-   zmd->nr_seq++;
+   dzone->dev->nr_seq++;
 
if (dmz_is_data(dzone)) {
/* Already initialized */
@@ -1779,16 +1798,18 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
list_add_tail(>link, >unmap_cache_list);