Re: [PATCH 4/5] lightnvm: pblk: Support for packed metadata in pblk.

2018-06-20 Thread Javier Gonzalez

> On 20 Jun 2018, at 00.20, Igor Konopko  wrote:
> 
> On 19.06.2018 05:47, Javier Gonzalez wrote:
>>> On 19 Jun 2018, at 14.42, Matias Bjørling  wrote:
>>> 
>>> On Tue, Jun 19, 2018 at 1:08 PM, Javier Gonzalez  
>>> wrote:
> On 16 Jun 2018, at 00.27, Igor Konopko  wrote:
> 
> In current pblk implementation, l2p mapping for not closed lines
> is always stored only in OOB metadata and recovered from it.
> 
> Such a solution does not provide data integrity when drives does
> not have such a OOB metadata space.
> 
> The goal of this patch is to add support for so called packed
> metadata, which store l2p mapping for open lines in last sector
> of every write unit.
> 
> Signed-off-by: Igor Konopko 
> ---
> drivers/lightnvm/pblk-core.c | 52 
> 
> drivers/lightnvm/pblk-init.c | 37 ++--
> drivers/lightnvm/pblk-rb.c   |  3 +++
> drivers/lightnvm/pblk-recovery.c | 25 +++
> drivers/lightnvm/pblk-sysfs.c|  7 ++
> drivers/lightnvm/pblk-write.c| 14 +++
> drivers/lightnvm/pblk.h  |  5 +++-
> 7 files changed, 128 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
> index c092ee93a18d..375c6430612e 100644
> --- a/drivers/lightnvm/pblk-core.c
> +++ b/drivers/lightnvm/pblk-core.c
> @@ -340,7 +340,7 @@ void pblk_write_should_kick(struct pblk *pblk)
> {
>  unsigned int secs_avail = pblk_rb_read_count(>rwb);
> 
> - if (secs_avail >= pblk->min_write_pgs)
> + if (secs_avail >= pblk->min_write_pgs_data)
>  pblk_write_kick(pblk);
> }
> 
> @@ -371,7 +371,9 @@ struct list_head *pblk_line_gc_list(struct pblk 
> *pblk, struct pblk_line *line)
>  struct pblk_line_meta *lm = >lm;
>  struct pblk_line_mgmt *l_mg = >l_mg;
>  struct list_head *move_list = NULL;
> - int vsc = le32_to_cpu(*line->vsc);
> + int packed_meta = (le32_to_cpu(*line->vsc) / 
> pblk->min_write_pgs_data)
> + * (pblk->min_write_pgs - pblk->min_write_pgs_data);
> + int vsc = le32_to_cpu(*line->vsc) + packed_meta;
> 
>  lockdep_assert_held(>lock);
> 
> @@ -540,12 +542,15 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, 
> void *data,
> }
> 
> int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
> -unsigned long secs_to_flush)
> +unsigned long secs_to_flush, bool skip_meta)
> {
>  int max = pblk->sec_per_write;
>  int min = pblk->min_write_pgs;
>  int secs_to_sync = 0;
> 
> + if (skip_meta)
> + min = max = pblk->min_write_pgs_data;
> +
>  if (secs_avail >= max)
>  secs_to_sync = max;
>  else if (secs_avail >= min)
> @@ -663,7 +668,7 @@ static int pblk_line_submit_emeta_io(struct pblk 
> *pblk, struct pblk_line *line,
> next_rq:
>  memset(, 0, sizeof(struct nvm_rq));
> 
> - rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
> + rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
>  rq_len = rq_ppas * geo->csecs;
> 
>  bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
> @@ -2091,3 +2096,42 @@ void pblk_lookup_l2p_rand(struct pblk *pblk, 
> struct ppa_addr *ppas,
>  }
>  spin_unlock(>trans_lock);
> }
> +
> +void pblk_set_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
> +{
> + void *meta_list = rqd->meta_list;
> + void *page;
> + int i = 0;
> +
> + if (pblk_is_oob_meta_supported(pblk))
> + return;
> +
> + /* We need to zero out metadata corresponding to packed meta page */
> + pblk_get_meta_at(pblk, meta_list, rqd->nr_ppas - 1)->lba = 
> ADDR_EMPTY;
> +
> + page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 
> 1].bv_page);
> + /* We need to fill last page of request (packed metadata)
> +  * with data from oob meta buffer.
> +  */
> + for (; i < rqd->nr_ppas; i++)
> + memcpy(page + (i * sizeof(struct pblk_sec_meta)),
> + pblk_get_meta_at(pblk, meta_list, i),
> + sizeof(struct pblk_sec_meta));
> +}
> +
> +void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
> +{
> + void *meta_list = rqd->meta_list;
> + void *page;
> + int i = 0;
> +
> + if (pblk_is_oob_meta_supported(pblk))
> + return;
> +
> + page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 
> 1].bv_page);
> + /* We need to fill oob meta buffer with data from packed metadata */
> + for (; i < 

Re: [PATCH 4/5] lightnvm: pblk: Support for packed metadata in pblk.

2018-06-19 Thread Igor Konopko




On 19.06.2018 05:47, Javier Gonzalez wrote:

On 19 Jun 2018, at 14.42, Matias Bjørling  wrote:

On Tue, Jun 19, 2018 at 1:08 PM, Javier Gonzalez  wrote:

On 16 Jun 2018, at 00.27, Igor Konopko  wrote:

In current pblk implementation, l2p mapping for not closed lines
is always stored only in OOB metadata and recovered from it.

Such a solution does not provide data integrity when drives does
not have such a OOB metadata space.

The goal of this patch is to add support for so called packed
metadata, which store l2p mapping for open lines in last sector
of every write unit.

Signed-off-by: Igor Konopko 
---
drivers/lightnvm/pblk-core.c | 52 
drivers/lightnvm/pblk-init.c | 37 ++--
drivers/lightnvm/pblk-rb.c   |  3 +++
drivers/lightnvm/pblk-recovery.c | 25 +++
drivers/lightnvm/pblk-sysfs.c|  7 ++
drivers/lightnvm/pblk-write.c| 14 +++
drivers/lightnvm/pblk.h  |  5 +++-
7 files changed, 128 insertions(+), 15 deletions(-)

diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index c092ee93a18d..375c6430612e 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -340,7 +340,7 @@ void pblk_write_should_kick(struct pblk *pblk)
{
  unsigned int secs_avail = pblk_rb_read_count(>rwb);

- if (secs_avail >= pblk->min_write_pgs)
+ if (secs_avail >= pblk->min_write_pgs_data)
  pblk_write_kick(pblk);
}

@@ -371,7 +371,9 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, 
struct pblk_line *line)
  struct pblk_line_meta *lm = >lm;
  struct pblk_line_mgmt *l_mg = >l_mg;
  struct list_head *move_list = NULL;
- int vsc = le32_to_cpu(*line->vsc);
+ int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
+ * (pblk->min_write_pgs - pblk->min_write_pgs_data);
+ int vsc = le32_to_cpu(*line->vsc) + packed_meta;

  lockdep_assert_held(>lock);

@@ -540,12 +542,15 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void 
*data,
}

int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
-unsigned long secs_to_flush)
+unsigned long secs_to_flush, bool skip_meta)
{
  int max = pblk->sec_per_write;
  int min = pblk->min_write_pgs;
  int secs_to_sync = 0;

+ if (skip_meta)
+ min = max = pblk->min_write_pgs_data;
+
  if (secs_avail >= max)
  secs_to_sync = max;
  else if (secs_avail >= min)
@@ -663,7 +668,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, 
struct pblk_line *line,
next_rq:
  memset(, 0, sizeof(struct nvm_rq));

- rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
+ rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
  rq_len = rq_ppas * geo->csecs;

  bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
@@ -2091,3 +2096,42 @@ void pblk_lookup_l2p_rand(struct pblk *pblk, struct 
ppa_addr *ppas,
  }
  spin_unlock(>trans_lock);
}
+
+void pblk_set_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ void *meta_list = rqd->meta_list;
+ void *page;
+ int i = 0;
+
+ if (pblk_is_oob_meta_supported(pblk))
+ return;
+
+ /* We need to zero out metadata corresponding to packed meta page */
+ pblk_get_meta_at(pblk, meta_list, rqd->nr_ppas - 1)->lba = ADDR_EMPTY;
+
+ page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
+ /* We need to fill last page of request (packed metadata)
+  * with data from oob meta buffer.
+  */
+ for (; i < rqd->nr_ppas; i++)
+ memcpy(page + (i * sizeof(struct pblk_sec_meta)),
+ pblk_get_meta_at(pblk, meta_list, i),
+ sizeof(struct pblk_sec_meta));
+}
+
+void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ void *meta_list = rqd->meta_list;
+ void *page;
+ int i = 0;
+
+ if (pblk_is_oob_meta_supported(pblk))
+ return;
+
+ page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
+ /* We need to fill oob meta buffer with data from packed metadata */
+ for (; i < rqd->nr_ppas; i++)
+ memcpy(pblk_get_meta_at(pblk, meta_list, i),
+ page + (i * sizeof(struct pblk_sec_meta)),
+ sizeof(struct pblk_sec_meta));
+}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index f05112230a52..5eb641da46ed 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -372,8 +372,40 @@ static int pblk_core_init(struct pblk *pblk)
  pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE);
  max_write_ppas = pblk->min_write_pgs * geo->all_luns;
  pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
+ pblk->min_write_pgs_data = pblk->min_write_pgs;
  pblk_set_sec_per_write(pblk, pblk->min_write_pgs);

+ if 

Re: [PATCH 4/5] lightnvm: pblk: Support for packed metadata in pblk.

2018-06-19 Thread Javier Gonzalez
> On 19 Jun 2018, at 14.42, Matias Bjørling  wrote:
> 
> On Tue, Jun 19, 2018 at 1:08 PM, Javier Gonzalez  wrote:
>>> On 16 Jun 2018, at 00.27, Igor Konopko  wrote:
>>> 
>>> In current pblk implementation, l2p mapping for not closed lines
>>> is always stored only in OOB metadata and recovered from it.
>>> 
>>> Such a solution does not provide data integrity when drives does
>>> not have such a OOB metadata space.
>>> 
>>> The goal of this patch is to add support for so called packed
>>> metadata, which store l2p mapping for open lines in last sector
>>> of every write unit.
>>> 
>>> Signed-off-by: Igor Konopko 
>>> ---
>>> drivers/lightnvm/pblk-core.c | 52 
>>> 
>>> drivers/lightnvm/pblk-init.c | 37 ++--
>>> drivers/lightnvm/pblk-rb.c   |  3 +++
>>> drivers/lightnvm/pblk-recovery.c | 25 +++
>>> drivers/lightnvm/pblk-sysfs.c|  7 ++
>>> drivers/lightnvm/pblk-write.c| 14 +++
>>> drivers/lightnvm/pblk.h  |  5 +++-
>>> 7 files changed, 128 insertions(+), 15 deletions(-)
>>> 
>>> diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
>>> index c092ee93a18d..375c6430612e 100644
>>> --- a/drivers/lightnvm/pblk-core.c
>>> +++ b/drivers/lightnvm/pblk-core.c
>>> @@ -340,7 +340,7 @@ void pblk_write_should_kick(struct pblk *pblk)
>>> {
>>>  unsigned int secs_avail = pblk_rb_read_count(>rwb);
>>> 
>>> - if (secs_avail >= pblk->min_write_pgs)
>>> + if (secs_avail >= pblk->min_write_pgs_data)
>>>  pblk_write_kick(pblk);
>>> }
>>> 
>>> @@ -371,7 +371,9 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, 
>>> struct pblk_line *line)
>>>  struct pblk_line_meta *lm = >lm;
>>>  struct pblk_line_mgmt *l_mg = >l_mg;
>>>  struct list_head *move_list = NULL;
>>> - int vsc = le32_to_cpu(*line->vsc);
>>> + int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
>>> + * (pblk->min_write_pgs - pblk->min_write_pgs_data);
>>> + int vsc = le32_to_cpu(*line->vsc) + packed_meta;
>>> 
>>>  lockdep_assert_held(>lock);
>>> 
>>> @@ -540,12 +542,15 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void 
>>> *data,
>>> }
>>> 
>>> int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
>>> -unsigned long secs_to_flush)
>>> +unsigned long secs_to_flush, bool skip_meta)
>>> {
>>>  int max = pblk->sec_per_write;
>>>  int min = pblk->min_write_pgs;
>>>  int secs_to_sync = 0;
>>> 
>>> + if (skip_meta)
>>> + min = max = pblk->min_write_pgs_data;
>>> +
>>>  if (secs_avail >= max)
>>>  secs_to_sync = max;
>>>  else if (secs_avail >= min)
>>> @@ -663,7 +668,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, 
>>> struct pblk_line *line,
>>> next_rq:
>>>  memset(, 0, sizeof(struct nvm_rq));
>>> 
>>> - rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
>>> + rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
>>>  rq_len = rq_ppas * geo->csecs;
>>> 
>>>  bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
>>> @@ -2091,3 +2096,42 @@ void pblk_lookup_l2p_rand(struct pblk *pblk, struct 
>>> ppa_addr *ppas,
>>>  }
>>>  spin_unlock(>trans_lock);
>>> }
>>> +
>>> +void pblk_set_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
>>> +{
>>> + void *meta_list = rqd->meta_list;
>>> + void *page;
>>> + int i = 0;
>>> +
>>> + if (pblk_is_oob_meta_supported(pblk))
>>> + return;
>>> +
>>> + /* We need to zero out metadata corresponding to packed meta page */
>>> + pblk_get_meta_at(pblk, meta_list, rqd->nr_ppas - 1)->lba = ADDR_EMPTY;
>>> +
>>> + page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 
>>> 1].bv_page);
>>> + /* We need to fill last page of request (packed metadata)
>>> +  * with data from oob meta buffer.
>>> +  */
>>> + for (; i < rqd->nr_ppas; i++)
>>> + memcpy(page + (i * sizeof(struct pblk_sec_meta)),
>>> + pblk_get_meta_at(pblk, meta_list, i),
>>> + sizeof(struct pblk_sec_meta));
>>> +}
>>> +
>>> +void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
>>> +{
>>> + void *meta_list = rqd->meta_list;
>>> + void *page;
>>> + int i = 0;
>>> +
>>> + if (pblk_is_oob_meta_supported(pblk))
>>> + return;
>>> +
>>> + page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 
>>> 1].bv_page);
>>> + /* We need to fill oob meta buffer with data from packed metadata */
>>> + for (; i < rqd->nr_ppas; i++)
>>> + memcpy(pblk_get_meta_at(pblk, meta_list, i),
>>> + page + (i * sizeof(struct pblk_sec_meta)),
>>> + sizeof(struct pblk_sec_meta));
>>> +}
>>> diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
>>> index f05112230a52..5eb641da46ed 100644
>>> --- 

Re: [PATCH 4/5] lightnvm: pblk: Support for packed metadata in pblk.

2018-06-19 Thread Matias Bjørling
On Tue, Jun 19, 2018 at 1:08 PM, Javier Gonzalez  wrote:
>> On 16 Jun 2018, at 00.27, Igor Konopko  wrote:
>>
>> In current pblk implementation, l2p mapping for not closed lines
>> is always stored only in OOB metadata and recovered from it.
>>
>> Such a solution does not provide data integrity when drives does
>> not have such a OOB metadata space.
>>
>> The goal of this patch is to add support for so called packed
>> metadata, which store l2p mapping for open lines in last sector
>> of every write unit.
>>
>> Signed-off-by: Igor Konopko 
>> ---
>> drivers/lightnvm/pblk-core.c | 52 
>> 
>> drivers/lightnvm/pblk-init.c | 37 ++--
>> drivers/lightnvm/pblk-rb.c   |  3 +++
>> drivers/lightnvm/pblk-recovery.c | 25 +++
>> drivers/lightnvm/pblk-sysfs.c|  7 ++
>> drivers/lightnvm/pblk-write.c| 14 +++
>> drivers/lightnvm/pblk.h  |  5 +++-
>> 7 files changed, 128 insertions(+), 15 deletions(-)
>>
>> diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
>> index c092ee93a18d..375c6430612e 100644
>> --- a/drivers/lightnvm/pblk-core.c
>> +++ b/drivers/lightnvm/pblk-core.c
>> @@ -340,7 +340,7 @@ void pblk_write_should_kick(struct pblk *pblk)
>> {
>>   unsigned int secs_avail = pblk_rb_read_count(>rwb);
>>
>> - if (secs_avail >= pblk->min_write_pgs)
>> + if (secs_avail >= pblk->min_write_pgs_data)
>>   pblk_write_kick(pblk);
>> }
>>
>> @@ -371,7 +371,9 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, 
>> struct pblk_line *line)
>>   struct pblk_line_meta *lm = >lm;
>>   struct pblk_line_mgmt *l_mg = >l_mg;
>>   struct list_head *move_list = NULL;
>> - int vsc = le32_to_cpu(*line->vsc);
>> + int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
>> + * (pblk->min_write_pgs - pblk->min_write_pgs_data);
>> + int vsc = le32_to_cpu(*line->vsc) + packed_meta;
>>
>>   lockdep_assert_held(>lock);
>>
>> @@ -540,12 +542,15 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void 
>> *data,
>> }
>>
>> int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
>> -unsigned long secs_to_flush)
>> +unsigned long secs_to_flush, bool skip_meta)
>> {
>>   int max = pblk->sec_per_write;
>>   int min = pblk->min_write_pgs;
>>   int secs_to_sync = 0;
>>
>> + if (skip_meta)
>> + min = max = pblk->min_write_pgs_data;
>> +
>>   if (secs_avail >= max)
>>   secs_to_sync = max;
>>   else if (secs_avail >= min)
>> @@ -663,7 +668,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, 
>> struct pblk_line *line,
>> next_rq:
>>   memset(, 0, sizeof(struct nvm_rq));
>>
>> - rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
>> + rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
>>   rq_len = rq_ppas * geo->csecs;
>>
>>   bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
>> @@ -2091,3 +2096,42 @@ void pblk_lookup_l2p_rand(struct pblk *pblk, struct 
>> ppa_addr *ppas,
>>   }
>>   spin_unlock(>trans_lock);
>> }
>> +
>> +void pblk_set_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
>> +{
>> + void *meta_list = rqd->meta_list;
>> + void *page;
>> + int i = 0;
>> +
>> + if (pblk_is_oob_meta_supported(pblk))
>> + return;
>> +
>> + /* We need to zero out metadata corresponding to packed meta page */
>> + pblk_get_meta_at(pblk, meta_list, rqd->nr_ppas - 1)->lba = ADDR_EMPTY;
>> +
>> + page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 
>> 1].bv_page);
>> + /* We need to fill last page of request (packed metadata)
>> +  * with data from oob meta buffer.
>> +  */
>> + for (; i < rqd->nr_ppas; i++)
>> + memcpy(page + (i * sizeof(struct pblk_sec_meta)),
>> + pblk_get_meta_at(pblk, meta_list, i),
>> + sizeof(struct pblk_sec_meta));
>> +}
>> +
>> +void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
>> +{
>> + void *meta_list = rqd->meta_list;
>> + void *page;
>> + int i = 0;
>> +
>> + if (pblk_is_oob_meta_supported(pblk))
>> + return;
>> +
>> + page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 
>> 1].bv_page);
>> + /* We need to fill oob meta buffer with data from packed metadata */
>> + for (; i < rqd->nr_ppas; i++)
>> + memcpy(pblk_get_meta_at(pblk, meta_list, i),
>> + page + (i * sizeof(struct pblk_sec_meta)),
>> + sizeof(struct pblk_sec_meta));
>> +}
>> diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
>> index f05112230a52..5eb641da46ed 100644
>> --- a/drivers/lightnvm/pblk-init.c
>> +++ b/drivers/lightnvm/pblk-init.c
>> @@ -372,8 +372,40 @@ static int pblk_core_init(struct pblk *pblk)
>>   pblk->min_write_pgs = geo->ws_opt * (geo->csecs / 

Re: [PATCH 4/5] lightnvm: pblk: Support for packed metadata in pblk.

2018-06-19 Thread Javier Gonzalez
> On 16 Jun 2018, at 00.27, Igor Konopko  wrote:
> 
> In current pblk implementation, l2p mapping for not closed lines
> is always stored only in OOB metadata and recovered from it.
> 
> Such a solution does not provide data integrity when drives does
> not have such a OOB metadata space.
> 
> The goal of this patch is to add support for so called packed
> metadata, which store l2p mapping for open lines in last sector
> of every write unit.
> 
> Signed-off-by: Igor Konopko 
> ---
> drivers/lightnvm/pblk-core.c | 52 
> drivers/lightnvm/pblk-init.c | 37 ++--
> drivers/lightnvm/pblk-rb.c   |  3 +++
> drivers/lightnvm/pblk-recovery.c | 25 +++
> drivers/lightnvm/pblk-sysfs.c|  7 ++
> drivers/lightnvm/pblk-write.c| 14 +++
> drivers/lightnvm/pblk.h  |  5 +++-
> 7 files changed, 128 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
> index c092ee93a18d..375c6430612e 100644
> --- a/drivers/lightnvm/pblk-core.c
> +++ b/drivers/lightnvm/pblk-core.c
> @@ -340,7 +340,7 @@ void pblk_write_should_kick(struct pblk *pblk)
> {
>   unsigned int secs_avail = pblk_rb_read_count(>rwb);
> 
> - if (secs_avail >= pblk->min_write_pgs)
> + if (secs_avail >= pblk->min_write_pgs_data)
>   pblk_write_kick(pblk);
> }
> 
> @@ -371,7 +371,9 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, 
> struct pblk_line *line)
>   struct pblk_line_meta *lm = >lm;
>   struct pblk_line_mgmt *l_mg = >l_mg;
>   struct list_head *move_list = NULL;
> - int vsc = le32_to_cpu(*line->vsc);
> + int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
> + * (pblk->min_write_pgs - pblk->min_write_pgs_data);
> + int vsc = le32_to_cpu(*line->vsc) + packed_meta;
> 
>   lockdep_assert_held(>lock);
> 
> @@ -540,12 +542,15 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void 
> *data,
> }
> 
> int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
> -unsigned long secs_to_flush)
> +unsigned long secs_to_flush, bool skip_meta)
> {
>   int max = pblk->sec_per_write;
>   int min = pblk->min_write_pgs;
>   int secs_to_sync = 0;
> 
> + if (skip_meta)
> + min = max = pblk->min_write_pgs_data;
> +
>   if (secs_avail >= max)
>   secs_to_sync = max;
>   else if (secs_avail >= min)
> @@ -663,7 +668,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, 
> struct pblk_line *line,
> next_rq:
>   memset(, 0, sizeof(struct nvm_rq));
> 
> - rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
> + rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
>   rq_len = rq_ppas * geo->csecs;
> 
>   bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
> @@ -2091,3 +2096,42 @@ void pblk_lookup_l2p_rand(struct pblk *pblk, struct 
> ppa_addr *ppas,
>   }
>   spin_unlock(>trans_lock);
> }
> +
> +void pblk_set_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
> +{
> + void *meta_list = rqd->meta_list;
> + void *page;
> + int i = 0;
> +
> + if (pblk_is_oob_meta_supported(pblk))
> + return;
> +
> + /* We need to zero out metadata corresponding to packed meta page */
> + pblk_get_meta_at(pblk, meta_list, rqd->nr_ppas - 1)->lba = ADDR_EMPTY;
> +
> + page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
> + /* We need to fill last page of request (packed metadata)
> +  * with data from oob meta buffer.
> +  */
> + for (; i < rqd->nr_ppas; i++)
> + memcpy(page + (i * sizeof(struct pblk_sec_meta)),
> + pblk_get_meta_at(pblk, meta_list, i),
> + sizeof(struct pblk_sec_meta));
> +}
> +
> +void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
> +{
> + void *meta_list = rqd->meta_list;
> + void *page;
> + int i = 0;
> +
> + if (pblk_is_oob_meta_supported(pblk))
> + return;
> +
> + page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
> + /* We need to fill oob meta buffer with data from packed metadata */
> + for (; i < rqd->nr_ppas; i++)
> + memcpy(pblk_get_meta_at(pblk, meta_list, i),
> + page + (i * sizeof(struct pblk_sec_meta)),
> + sizeof(struct pblk_sec_meta));
> +}
> diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
> index f05112230a52..5eb641da46ed 100644
> --- a/drivers/lightnvm/pblk-init.c
> +++ b/drivers/lightnvm/pblk-init.c
> @@ -372,8 +372,40 @@ static int pblk_core_init(struct pblk *pblk)
>   pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE);
>   max_write_ppas = pblk->min_write_pgs * geo->all_luns;
>   pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
> + pblk->min_write_pgs_data = 

Re: [PATCH 4/5] lightnvm: pblk: Support for packed metadata in pblk.

2018-06-16 Thread Matias Bjørling

On 06/16/2018 12:27 AM, Igor Konopko wrote:

In current pblk implementation, l2p mapping for not closed lines
is always stored only in OOB metadata and recovered from it.

Such a solution does not provide data integrity when drives does
not have such a OOB metadata space.

The goal of this patch is to add support for so called packed
metadata, which store l2p mapping for open lines in last sector
of every write unit.

Signed-off-by: Igor Konopko 
---
  drivers/lightnvm/pblk-core.c | 52 
  drivers/lightnvm/pblk-init.c | 37 ++--
  drivers/lightnvm/pblk-rb.c   |  3 +++
  drivers/lightnvm/pblk-recovery.c | 25 +++
  drivers/lightnvm/pblk-sysfs.c|  7 ++
  drivers/lightnvm/pblk-write.c| 14 +++
  drivers/lightnvm/pblk.h  |  5 +++-
  7 files changed, 128 insertions(+), 15 deletions(-)

diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index c092ee93a18d..375c6430612e 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -340,7 +340,7 @@ void pblk_write_should_kick(struct pblk *pblk)
  {
unsigned int secs_avail = pblk_rb_read_count(>rwb);
  
-	if (secs_avail >= pblk->min_write_pgs)

+   if (secs_avail >= pblk->min_write_pgs_data)
pblk_write_kick(pblk);
  }
  
@@ -371,7 +371,9 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)

struct pblk_line_meta *lm = >lm;
struct pblk_line_mgmt *l_mg = >l_mg;
struct list_head *move_list = NULL;
-   int vsc = le32_to_cpu(*line->vsc);
+   int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
+   * (pblk->min_write_pgs - pblk->min_write_pgs_data);
+   int vsc = le32_to_cpu(*line->vsc) + packed_meta;
  
  	lockdep_assert_held(>lock);
  
@@ -540,12 +542,15 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,

  }
  
  int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,

-  unsigned long secs_to_flush)
+  unsigned long secs_to_flush, bool skip_meta)
  {
int max = pblk->sec_per_write;
int min = pblk->min_write_pgs;
int secs_to_sync = 0;
  
+	if (skip_meta)

+   min = max = pblk->min_write_pgs_data;
+
if (secs_avail >= max)
secs_to_sync = max;
else if (secs_avail >= min)
@@ -663,7 +668,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, 
struct pblk_line *line,
  next_rq:
memset(, 0, sizeof(struct nvm_rq));
  
-	rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);

+   rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
rq_len = rq_ppas * geo->csecs;
  
  	bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,

@@ -2091,3 +2096,42 @@ void pblk_lookup_l2p_rand(struct pblk *pblk, struct 
ppa_addr *ppas,
}
spin_unlock(>trans_lock);
  }
+
+void pblk_set_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
+{
+   void *meta_list = rqd->meta_list;
+   void *page;
+   int i = 0;
+
+   if (pblk_is_oob_meta_supported(pblk))
+   return;
+
+   /* We need to zero out metadata corresponding to packed meta page */
+   pblk_get_meta_at(pblk, meta_list, rqd->nr_ppas - 1)->lba = ADDR_EMPTY;
+
+   page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
+   /* We need to fill last page of request (packed metadata)
+* with data from oob meta buffer.
+*/
+   for (; i < rqd->nr_ppas; i++)
+   memcpy(page + (i * sizeof(struct pblk_sec_meta)),
+   pblk_get_meta_at(pblk, meta_list, i),
+   sizeof(struct pblk_sec_meta));
+}
+
+void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
+{
+   void *meta_list = rqd->meta_list;
+   void *page;
+   int i = 0;
+
+   if (pblk_is_oob_meta_supported(pblk))
+   return;
+
+   page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
+   /* We need to fill oob meta buffer with data from packed metadata */
+   for (; i < rqd->nr_ppas; i++)


Initialize i here. i = 0;


+   memcpy(pblk_get_meta_at(pblk, meta_list, i),
+   page + (i * sizeof(struct pblk_sec_meta)),
+   sizeof(struct pblk_sec_meta));
+}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index f05112230a52..5eb641da46ed 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -372,8 +372,40 @@ static int pblk_core_init(struct pblk *pblk)
pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE);
max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
+   pblk->min_write_pgs_data = pblk->min_write_pgs;
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
  
+	if 

[PATCH 4/5] lightnvm: pblk: Support for packed metadata in pblk.

2018-06-15 Thread Igor Konopko
In current pblk implementation, l2p mapping for not closed lines
is always stored only in OOB metadata and recovered from it.

Such a solution does not provide data integrity when drives does
not have such a OOB metadata space.

The goal of this patch is to add support for so called packed
metadata, which store l2p mapping for open lines in last sector
of every write unit.

Signed-off-by: Igor Konopko 
---
 drivers/lightnvm/pblk-core.c | 52 
 drivers/lightnvm/pblk-init.c | 37 ++--
 drivers/lightnvm/pblk-rb.c   |  3 +++
 drivers/lightnvm/pblk-recovery.c | 25 +++
 drivers/lightnvm/pblk-sysfs.c|  7 ++
 drivers/lightnvm/pblk-write.c| 14 +++
 drivers/lightnvm/pblk.h  |  5 +++-
 7 files changed, 128 insertions(+), 15 deletions(-)

diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index c092ee93a18d..375c6430612e 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -340,7 +340,7 @@ void pblk_write_should_kick(struct pblk *pblk)
 {
unsigned int secs_avail = pblk_rb_read_count(>rwb);
 
-   if (secs_avail >= pblk->min_write_pgs)
+   if (secs_avail >= pblk->min_write_pgs_data)
pblk_write_kick(pblk);
 }
 
@@ -371,7 +371,9 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, 
struct pblk_line *line)
struct pblk_line_meta *lm = >lm;
struct pblk_line_mgmt *l_mg = >l_mg;
struct list_head *move_list = NULL;
-   int vsc = le32_to_cpu(*line->vsc);
+   int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
+   * (pblk->min_write_pgs - pblk->min_write_pgs_data);
+   int vsc = le32_to_cpu(*line->vsc) + packed_meta;
 
lockdep_assert_held(>lock);
 
@@ -540,12 +542,15 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void 
*data,
 }
 
 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
-  unsigned long secs_to_flush)
+  unsigned long secs_to_flush, bool skip_meta)
 {
int max = pblk->sec_per_write;
int min = pblk->min_write_pgs;
int secs_to_sync = 0;
 
+   if (skip_meta)
+   min = max = pblk->min_write_pgs_data;
+
if (secs_avail >= max)
secs_to_sync = max;
else if (secs_avail >= min)
@@ -663,7 +668,7 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, 
struct pblk_line *line,
 next_rq:
memset(, 0, sizeof(struct nvm_rq));
 
-   rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
+   rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
rq_len = rq_ppas * geo->csecs;
 
bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
@@ -2091,3 +2096,42 @@ void pblk_lookup_l2p_rand(struct pblk *pblk, struct 
ppa_addr *ppas,
}
spin_unlock(>trans_lock);
 }
+
+void pblk_set_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
+{
+   void *meta_list = rqd->meta_list;
+   void *page;
+   int i = 0;
+
+   if (pblk_is_oob_meta_supported(pblk))
+   return;
+
+   /* We need to zero out metadata corresponding to packed meta page */
+   pblk_get_meta_at(pblk, meta_list, rqd->nr_ppas - 1)->lba = ADDR_EMPTY;
+
+   page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
+   /* We need to fill last page of request (packed metadata)
+* with data from oob meta buffer.
+*/
+   for (; i < rqd->nr_ppas; i++)
+   memcpy(page + (i * sizeof(struct pblk_sec_meta)),
+   pblk_get_meta_at(pblk, meta_list, i),
+   sizeof(struct pblk_sec_meta));
+}
+
+void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
+{
+   void *meta_list = rqd->meta_list;
+   void *page;
+   int i = 0;
+
+   if (pblk_is_oob_meta_supported(pblk))
+   return;
+
+   page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
+   /* We need to fill oob meta buffer with data from packed metadata */
+   for (; i < rqd->nr_ppas; i++)
+   memcpy(pblk_get_meta_at(pblk, meta_list, i),
+   page + (i * sizeof(struct pblk_sec_meta)),
+   sizeof(struct pblk_sec_meta));
+}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index f05112230a52..5eb641da46ed 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -372,8 +372,40 @@ static int pblk_core_init(struct pblk *pblk)
pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE);
max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
+   pblk->min_write_pgs_data = pblk->min_write_pgs;
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
 
+   if (!pblk_is_oob_meta_supported(pblk)) {
+   /* For drives which does not