Re: [PATCH 4/7] lightnvm: drop reserve and release LUN callbacks

2016-10-31 Thread Matias Bjørling
On 10/31/2016 09:22 PM, Jens Axboe wrote:
> On 10/31/2016 07:08 AM, Matias Bjørling wrote:
>> On 10/27/2016 08:01 PM, Javier González wrote:
>>> From: Javier González 
>>>
>>> On target initialization, targets use callbacks to the media manager to
>>> configure the LUNs they use. In order to simplify the flow, drop this
>>> callbacks and manage everything internally on the media manager.
>>>
>>> By making use of the newly introduce LUN management structure, the media
>>> manager knows which target exclusively owns each target and can
>>> therefore allocate and free all the necessary structures before
>>> initializing the target. Not exclusively owned LUNs belong to the media
>>> manager in any case.
>>>
>>> Adapt rrpc to not use the reserve_lun/release_lun callback functions.
>>>
>>> Signed-off-by: Javier González 
>>> ---
>>>  drivers/lightnvm/gennvm.c | 68
>>> ---
>>>  drivers/lightnvm/rrpc.c   | 12 +
>>>  include/linux/lightnvm.h  |  5 +---
>>>  3 files changed, 55 insertions(+), 30 deletions(-)
>>>
>>> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
>>> index 8bff725..a340685 100644
>>> --- a/drivers/lightnvm/gennvm.c
>>> +++ b/drivers/lightnvm/gennvm.c
>>> @@ -35,6 +35,50 @@ static const struct block_device_operations
>>> gen_fops = {
>>>  .owner= THIS_MODULE,
>>>  };
>>>
>>> +static int gen_reserve_luns(struct nvm_dev *dev, int lun_begin, int
>>> lun_end,
>>> +struct nvm_target *t)
>>> +{
>>> +struct gen_dev *gn = dev->mp;
>>> +struct gen_lun *lun;
>>> +int i;
>>> +
>>> +for (i = lun_begin; i <= lun_end; i++) {
>>> +if (test_and_set_bit(i, dev->lun_map)) {
>>> +pr_err("gennvm: lun %d is already allocated\n", i);
>>> +goto fail;
>>> +}
>>> +
>>> +lun = >luns[i];
>>> +lun->tgt = t;
>>> +lun->vlun.priv = lun->mgmt;
>>> +}
>>> +
>>> +return 0;
>>> +fail:
>>> +while (--i > lun_begin)
>>> +clear_bit(i, dev->lun_map);
>>> +
>>> +return 1;
>>
>> return -EINVAL;
> 
> -EBUSY?
> 
>> Lad os lige snakke lidt om dette her senere også :)
> 
> And probably keep the public emails in English :-)
> 

Haha, will do. Good to spice up the mailing list a bit :)
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 4/7] lightnvm: drop reserve and release LUN callbacks

2016-10-31 Thread Matias Bjørling
On 10/27/2016 08:01 PM, Javier González wrote:
> From: Javier González 
> 
> On target initialization, targets use callbacks to the media manager to
> configure the LUNs they use. In order to simplify the flow, drop this
> callbacks and manage everything internally on the media manager.
> 
> By making use of the newly introduce LUN management structure, the media
> manager knows which target exclusively owns each target and can
> therefore allocate and free all the necessary structures before
> initializing the target. Not exclusively owned LUNs belong to the media
> manager in any case.
> 
> Adapt rrpc to not use the reserve_lun/release_lun callback functions.
> 
> Signed-off-by: Javier González 
> ---
>  drivers/lightnvm/gennvm.c | 68 
> ---
>  drivers/lightnvm/rrpc.c   | 12 +
>  include/linux/lightnvm.h  |  5 +---
>  3 files changed, 55 insertions(+), 30 deletions(-)
> 
> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
> index 8bff725..a340685 100644
> --- a/drivers/lightnvm/gennvm.c
> +++ b/drivers/lightnvm/gennvm.c
> @@ -35,6 +35,50 @@ static const struct block_device_operations gen_fops = {
>   .owner  = THIS_MODULE,
>  };
>  
> +static int gen_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end,
> + struct nvm_target *t)
> +{
> + struct gen_dev *gn = dev->mp;
> + struct gen_lun *lun;
> + int i;
> +
> + for (i = lun_begin; i <= lun_end; i++) {
> + if (test_and_set_bit(i, dev->lun_map)) {
> + pr_err("gennvm: lun %d is already allocated\n", i);
> + goto fail;
> + }
> +
> + lun = >luns[i];
> + lun->tgt = t;
> + lun->vlun.priv = lun->mgmt;
> + }
> +
> + return 0;
> +fail:
> + while (--i > lun_begin)
> + clear_bit(i, dev->lun_map);
> +
> + return 1;

return -EINVAL;

Lad os lige snakke lidt om dette her senere også :)
> +}
> +
> +static void gen_release_luns(struct nvm_dev *dev, struct nvm_target *t)
> +{
> + struct gen_dev *gn = dev->mp;
> + struct gen_lun *lun;
> + int lunid;
> + int i;
> +
> + gen_for_each_lun(gn, lun, i) {
> + if (lun->tgt != t)
> + continue;
> +
> + lunid = lun->vlun.id;
> + WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
> + lun->vlun.priv = NULL;
> + lun->tgt = NULL;
> + }
> +}
> +
>  static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create 
> *create)
>  {
>   struct gen_dev *gn = dev->mp;
> @@ -80,6 +124,9 @@ static int gen_create_tgt(struct nvm_dev *dev, struct 
> nvm_ioctl_create *create)
>   tdisk->fops = _fops;
>   tdisk->queue = tqueue;
>  
> + if (tt->exclusive && gen_reserve_luns(dev, s->lun_begin, s->lun_end, t))
> + goto err_reserve;
> +
>   targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
>   if (IS_ERR(targetdata))
>   goto err_init;
> @@ -102,6 +149,8 @@ static int gen_create_tgt(struct nvm_dev *dev, struct 
> nvm_ioctl_create *create)
>  
>   return 0;
>  err_init:
> + gen_release_luns(dev, t);
> +err_reserve:
>   put_disk(tdisk);
>  err_queue:
>   blk_cleanup_queue(tqueue);
> @@ -110,7 +159,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct 
> nvm_ioctl_create *create)
>   return -ENOMEM;
>  }
>  
> -static void __gen_remove_target(struct nvm_target *t)
> +static void __gen_remove_target(struct nvm_dev *dev, struct nvm_target *t)
>  {
>   struct nvm_tgt_type *tt = t->type;
>   struct gendisk *tdisk = t->disk;
> @@ -122,6 +171,7 @@ static void __gen_remove_target(struct nvm_target *t)
>   if (tt->exit)
>   tt->exit(tdisk->private_data);
>  
> + gen_release_luns(dev, t);
>   put_disk(tdisk);
>  
>   list_del(>list);
> @@ -152,7 +202,7 @@ static int gen_remove_tgt(struct nvm_dev *dev, struct 
> nvm_ioctl_remove *remove)
>   mutex_unlock(>lock);
>   return 1;
>   }
> - __gen_remove_target(t);
> + __gen_remove_target(dev, t);
>   mutex_unlock(>lock);
>  
>   return 0;
> @@ -474,7 +524,7 @@ static void gen_unregister(struct nvm_dev *dev)
>   list_for_each_entry_safe(t, tmp, >targets, list) {
>   if (t->dev != dev)
>   continue;
> - __gen_remove_target(t);
> + __gen_remove_target(dev, t);
>   }
>   mutex_unlock(>lock);
>  
> @@ -618,16 +668,6 @@ static int gen_erase_blk(struct nvm_dev *dev, struct 
> nvm_block *blk, int flags)
>   return nvm_erase_ppa(dev, , 1, flags);
>  }
>  
> -static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
> -{
> - return test_and_set_bit(lunid, dev->lun_map);
> -}
> -
> -static void gen_release_lun(struct nvm_dev *dev, int lunid)
> -{
> - WARN_ON(!test_and_clear_bit(lunid, 

[PATCH 4/7] lightnvm: drop reserve and release LUN callbacks

2016-10-27 Thread Javier González
From: Javier González 

On target initialization, targets use callbacks to the media manager to
configure the LUNs they use. In order to simplify the flow, drop this
callbacks and manage everything internally on the media manager.

By making use of the newly introduce LUN management structure, the media
manager knows which target exclusively owns each target and can
therefore allocate and free all the necessary structures before
initializing the target. Not exclusively owned LUNs belong to the media
manager in any case.

Adapt rrpc to not use the reserve_lun/release_lun callback functions.

Signed-off-by: Javier González 
---
 drivers/lightnvm/gennvm.c | 68 ---
 drivers/lightnvm/rrpc.c   | 12 +
 include/linux/lightnvm.h  |  5 +---
 3 files changed, 55 insertions(+), 30 deletions(-)

diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 8bff725..a340685 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -35,6 +35,50 @@ static const struct block_device_operations gen_fops = {
.owner  = THIS_MODULE,
 };
 
+static int gen_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end,
+   struct nvm_target *t)
+{
+   struct gen_dev *gn = dev->mp;
+   struct gen_lun *lun;
+   int i;
+
+   for (i = lun_begin; i <= lun_end; i++) {
+   if (test_and_set_bit(i, dev->lun_map)) {
+   pr_err("gennvm: lun %d is already allocated\n", i);
+   goto fail;
+   }
+
+   lun = >luns[i];
+   lun->tgt = t;
+   lun->vlun.priv = lun->mgmt;
+   }
+
+   return 0;
+fail:
+   while (--i > lun_begin)
+   clear_bit(i, dev->lun_map);
+
+   return 1;
+}
+
+static void gen_release_luns(struct nvm_dev *dev, struct nvm_target *t)
+{
+   struct gen_dev *gn = dev->mp;
+   struct gen_lun *lun;
+   int lunid;
+   int i;
+
+   gen_for_each_lun(gn, lun, i) {
+   if (lun->tgt != t)
+   continue;
+
+   lunid = lun->vlun.id;
+   WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+   lun->vlun.priv = NULL;
+   lun->tgt = NULL;
+   }
+}
+
 static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 {
struct gen_dev *gn = dev->mp;
@@ -80,6 +124,9 @@ static int gen_create_tgt(struct nvm_dev *dev, struct 
nvm_ioctl_create *create)
tdisk->fops = _fops;
tdisk->queue = tqueue;
 
+   if (tt->exclusive && gen_reserve_luns(dev, s->lun_begin, s->lun_end, t))
+   goto err_reserve;
+
targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
if (IS_ERR(targetdata))
goto err_init;
@@ -102,6 +149,8 @@ static int gen_create_tgt(struct nvm_dev *dev, struct 
nvm_ioctl_create *create)
 
return 0;
 err_init:
+   gen_release_luns(dev, t);
+err_reserve:
put_disk(tdisk);
 err_queue:
blk_cleanup_queue(tqueue);
@@ -110,7 +159,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct 
nvm_ioctl_create *create)
return -ENOMEM;
 }
 
-static void __gen_remove_target(struct nvm_target *t)
+static void __gen_remove_target(struct nvm_dev *dev, struct nvm_target *t)
 {
struct nvm_tgt_type *tt = t->type;
struct gendisk *tdisk = t->disk;
@@ -122,6 +171,7 @@ static void __gen_remove_target(struct nvm_target *t)
if (tt->exit)
tt->exit(tdisk->private_data);
 
+   gen_release_luns(dev, t);
put_disk(tdisk);
 
list_del(>list);
@@ -152,7 +202,7 @@ static int gen_remove_tgt(struct nvm_dev *dev, struct 
nvm_ioctl_remove *remove)
mutex_unlock(>lock);
return 1;
}
-   __gen_remove_target(t);
+   __gen_remove_target(dev, t);
mutex_unlock(>lock);
 
return 0;
@@ -474,7 +524,7 @@ static void gen_unregister(struct nvm_dev *dev)
list_for_each_entry_safe(t, tmp, >targets, list) {
if (t->dev != dev)
continue;
-   __gen_remove_target(t);
+   __gen_remove_target(dev, t);
}
mutex_unlock(>lock);
 
@@ -618,16 +668,6 @@ static int gen_erase_blk(struct nvm_dev *dev, struct 
nvm_block *blk, int flags)
return nvm_erase_ppa(dev, , 1, flags);
 }
 
-static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
-{
-   return test_and_set_bit(lunid, dev->lun_map);
-}
-
-static void gen_release_lun(struct nvm_dev *dev, int lunid)
-{
-   WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
-}
-
 static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
 {
struct gen_dev *gn = dev->mp;
@@ -674,8 +714,6 @@ static struct nvmm_type gen = {
.mark_blk   = gen_mark_blk,
 
.get_lun= gen_get_lun,
-   

[PATCH 4/7] lightnvm: drop reserve and release LUN callbacks

2016-10-27 Thread Javier González
On target initialization, targets use callbacks to the media manager to
configure the LUNs they use. In order to simplify the flow, drop this
callbacks and manage everything internally on the media manager.

By making use of the newly introduce LUN management structure, the media
manager knows which target exclusively owns each target and can
therefore allocate and free all the necessary structures before
initializing the target. Not exclusively owned LUNs belong to the media
manager in any case.

Adapt rrpc to not use the reserve_lun/release_lun callback functions.

Signed-off-by: Javier González 
---
 drivers/lightnvm/gennvm.c | 62 +++
 drivers/lightnvm/rrpc.c   | 12 +
 include/linux/lightnvm.h  |  5 +---
 3 files changed, 49 insertions(+), 30 deletions(-)

diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 8bff725..575afc4 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -35,6 +35,30 @@ static const struct block_device_operations gen_fops = {
.owner  = THIS_MODULE,
 };
 
+static int gen_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end,
+   struct nvm_target *t)
+{
+   struct gen_dev *gn = dev->mp;
+   struct gen_lun *lun;
+   int i;
+
+   for (i = lun_begin; i <= lun_end; i++) {
+   if (test_and_set_bit(i, dev->lun_map)) {
+   pr_err("gennvm: lun %d is already allocated\n", i);
+   goto fail;
+   }
+
+   lun = >luns[i];
+   }
+
+   return 0;
+fail:
+   while (--i > lun_begin)
+   clear_bit(i, dev->lun_map);
+
+   return 1;
+}
+
 static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 {
struct gen_dev *gn = dev->mp;
@@ -80,6 +104,9 @@ static int gen_create_tgt(struct nvm_dev *dev, struct 
nvm_ioctl_create *create)
tdisk->fops = _fops;
tdisk->queue = tqueue;
 
+   if (tt->exclusive && gen_reserve_luns(dev, s->lun_begin, s->lun_end, t))
+   goto err_init;
+
targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
if (IS_ERR(targetdata))
goto err_init;
@@ -110,7 +137,23 @@ err_t:
return -ENOMEM;
 }
 
-static void __gen_remove_target(struct nvm_target *t)
+static void gen_release_luns(struct nvm_dev *dev, struct nvm_target *t)
+{
+   struct gen_dev *gn = dev->mp;
+   struct gen_lun *lun;
+   int lunid;
+   int i;
+
+   gen_for_each_lun(gn, lun, i) {
+   if (lun->tgt != t)
+   continue;
+
+   lunid = lun->vlun.id;
+   WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+   }
+}
+
+static void __gen_remove_target(struct nvm_dev *dev, struct nvm_target *t)
 {
struct nvm_tgt_type *tt = t->type;
struct gendisk *tdisk = t->disk;
@@ -122,6 +165,7 @@ static void __gen_remove_target(struct nvm_target *t)
if (tt->exit)
tt->exit(tdisk->private_data);
 
+   gen_release_luns(dev, t);
put_disk(tdisk);
 
list_del(>list);
@@ -152,7 +196,7 @@ static int gen_remove_tgt(struct nvm_dev *dev, struct 
nvm_ioctl_remove *remove)
mutex_unlock(>lock);
return 1;
}
-   __gen_remove_target(t);
+   __gen_remove_target(dev, t);
mutex_unlock(>lock);
 
return 0;
@@ -474,7 +518,7 @@ static void gen_unregister(struct nvm_dev *dev)
list_for_each_entry_safe(t, tmp, >targets, list) {
if (t->dev != dev)
continue;
-   __gen_remove_target(t);
+   __gen_remove_target(dev, t);
}
mutex_unlock(>lock);
 
@@ -618,16 +662,6 @@ static int gen_erase_blk(struct nvm_dev *dev, struct 
nvm_block *blk, int flags)
return nvm_erase_ppa(dev, , 1, flags);
 }
 
-static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
-{
-   return test_and_set_bit(lunid, dev->lun_map);
-}
-
-static void gen_release_lun(struct nvm_dev *dev, int lunid)
-{
-   WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
-}
-
 static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
 {
struct gen_dev *gn = dev->mp;
@@ -674,8 +708,6 @@ static struct nvmm_type gen = {
.mark_blk   = gen_mark_blk,
 
.get_lun= gen_get_lun,
-   .reserve_lun= gen_reserve_lun,
-   .release_lun= gen_release_lun,
.lun_info_print = gen_lun_info_print,
 
.get_area   = gen_get_area,
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index f293d00..cb30ccf 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -1167,8 +1167,6 @@ static void rrpc_core_free(struct rrpc *rrpc)
 
 static void rrpc_luns_free(struct rrpc *rrpc)
 {
-   struct nvm_dev *dev = rrpc->dev;
-