Re: [RFC v2 16/83] Initialize block map and free lists in nova_init().

2018-03-11 Thread Andiry Xu
On Sun, Mar 11, 2018 at 5:12 AM, Nikolay Borisov
 wrote:
>
>
> On 10.03.2018 20:17, Andiry Xu wrote:
>> From: Andiry Xu 
>>
>> NOVA divides the pmem range equally among per-CPU free lists,
>> and format the red-black trees by inserting the initial free range.
>>
>> Signed-off-by: Andiry Xu 
>> ---
>>  fs/nova/balloc.c | 161 
>> +++
>>  fs/nova/balloc.h |  13 -
>>  fs/nova/super.c  |   2 +
>>  3 files changed, 175 insertions(+), 1 deletion(-)
>>
>> diff --git a/fs/nova/balloc.c b/fs/nova/balloc.c
>> index 450c942..cb627db 100644
>> --- a/fs/nova/balloc.c
>> +++ b/fs/nova/balloc.c
>> @@ -55,4 +55,165 @@ void nova_delete_free_lists(struct super_block *sb)
>>   sbi->free_lists = NULL;
>>  }
>>
>> +// Initialize a free list.  Each CPU gets an equal share of the block space 
>> to
>> +// manage.
>> +static void nova_init_free_list(struct super_block *sb,
>> + struct free_list *free_list, int index)
>> +{
>> + struct nova_sb_info *sbi = NOVA_SB(sb);
>> + unsigned long per_list_blocks;
>> +
>> + per_list_blocks = sbi->num_blocks / sbi->cpus;
>
> nit: You've already initialised per_list_blocks in nova_init_blockmap,
> which calls this function. So just reference it, rather than performing
> the the divison every time
>

Thanks for catching this.

>> +
>> + free_list->block_start = per_list_blocks * index;
>> + free_list->block_end = free_list->block_start +
>> + per_list_blocks - 1;
>> + if (index == 0)
>> + free_list->block_start += sbi->head_reserved_blocks;
>> + if (index == sbi->cpus - 1)
>> + free_list->block_end -= sbi->tail_reserved_blocks;
>> +}
>> +
>> +inline struct nova_range_node *nova_alloc_blocknode(struct super_block *sb)
>> +{
>> + return nova_alloc_range_node(sb);
>> +}
>> +
>> +inline void nova_free_blocknode(struct super_block *sb,
>> + struct nova_range_node *node)
>> +{
>> + nova_free_range_node(node);
>> +}
>> +
>> +void nova_init_blockmap(struct super_block *sb, int recovery)
>> +{
>> + struct nova_sb_info *sbi = NOVA_SB(sb);
>> + struct rb_root *tree;
>> + struct nova_range_node *blknode;
>> + struct free_list *free_list;
>> + int i;
>> + int ret;
>> +
>> + /* Divide the block range among per-CPU free lists */
>> + sbi->per_list_blocks = sbi->num_blocks / sbi->cpus;
>> + for (i = 0; i < sbi->cpus; i++) {
>> + free_list = nova_get_free_list(sb, i);
>> + tree = &(free_list->block_free_tree);
>> + nova_init_free_list(sb, free_list, i);
>> +
>> + /* For recovery, update these fields later */
>> + if (recovery == 0) {
>> + free_list->num_free_blocks = free_list->block_end -
>> + free_list->block_start + 1;
>> +
>> + blknode = nova_alloc_blocknode(sb);
>> + if (blknode == NULL)
>> + return;
>> + blknode->range_low = free_list->block_start;
>> + blknode->range_high = free_list->block_end;
>> + ret = nova_insert_blocktree(sbi, tree, blknode);
>> + if (ret) {
>> + nova_err(sb, "%s failed\n", __func__);
>> + nova_free_blocknode(sb, blknode);
>> + return;
>> + }
>> + free_list->first_node = blknode;
>> + free_list->last_node = blknode;
>> + free_list->num_blocknode = 1;
>> + }
>> +
>> + nova_dbgv("%s: free list %d: block start %lu, end %lu, %lu 
>> free blocks\n",
>> +   __func__, i,
>> +   free_list->block_start,
>> +   free_list->block_end,
>> +   free_list->num_free_blocks);
>> + }
>> +}
>> +
>> +static inline int nova_rbtree_compare_rangenode(struct nova_range_node 
>> *curr,
>> + unsigned long range_low)
>> +{
>> + if (range_low < curr->range_low)
>> + return -1;
>> + if (range_low > curr->range_high)
>> + return 1;
>>
>> + return 0;
>> +}
>> +
>> +int nova_find_range_node(struct nova_sb_info *sbi,
>> + struct rb_root *tree, unsigned long range_low,
>> + struct nova_range_node **ret_node)
>
> Instead of having a **ret_node pointer as an argument, just make the
> function return struct nova_range *node and have callers check for null:
>
> struct nova_range_node *node = nova_find_range_node(sbi, tree, range);
>
> if (ret) {
> //do stuff with *node
> }
>

I pass **ret_node as an argument because if the target node is not
found, nova_find_range_node() returns the father node in
nova_find_free_slot(). So there is possibility that it returns 0 and a
not-NULL 

Re: [RFC v2 16/83] Initialize block map and free lists in nova_init().

2018-03-11 Thread Andiry Xu
On Sun, Mar 11, 2018 at 5:12 AM, Nikolay Borisov
 wrote:
>
>
> On 10.03.2018 20:17, Andiry Xu wrote:
>> From: Andiry Xu 
>>
>> NOVA divides the pmem range equally among per-CPU free lists,
>> and format the red-black trees by inserting the initial free range.
>>
>> Signed-off-by: Andiry Xu 
>> ---
>>  fs/nova/balloc.c | 161 
>> +++
>>  fs/nova/balloc.h |  13 -
>>  fs/nova/super.c  |   2 +
>>  3 files changed, 175 insertions(+), 1 deletion(-)
>>
>> diff --git a/fs/nova/balloc.c b/fs/nova/balloc.c
>> index 450c942..cb627db 100644
>> --- a/fs/nova/balloc.c
>> +++ b/fs/nova/balloc.c
>> @@ -55,4 +55,165 @@ void nova_delete_free_lists(struct super_block *sb)
>>   sbi->free_lists = NULL;
>>  }
>>
>> +// Initialize a free list.  Each CPU gets an equal share of the block space 
>> to
>> +// manage.
>> +static void nova_init_free_list(struct super_block *sb,
>> + struct free_list *free_list, int index)
>> +{
>> + struct nova_sb_info *sbi = NOVA_SB(sb);
>> + unsigned long per_list_blocks;
>> +
>> + per_list_blocks = sbi->num_blocks / sbi->cpus;
>
> nit: You've already initialised per_list_blocks in nova_init_blockmap,
> which calls this function. So just reference it, rather than performing
> the the divison every time
>

Thanks for catching this.

>> +
>> + free_list->block_start = per_list_blocks * index;
>> + free_list->block_end = free_list->block_start +
>> + per_list_blocks - 1;
>> + if (index == 0)
>> + free_list->block_start += sbi->head_reserved_blocks;
>> + if (index == sbi->cpus - 1)
>> + free_list->block_end -= sbi->tail_reserved_blocks;
>> +}
>> +
>> +inline struct nova_range_node *nova_alloc_blocknode(struct super_block *sb)
>> +{
>> + return nova_alloc_range_node(sb);
>> +}
>> +
>> +inline void nova_free_blocknode(struct super_block *sb,
>> + struct nova_range_node *node)
>> +{
>> + nova_free_range_node(node);
>> +}
>> +
>> +void nova_init_blockmap(struct super_block *sb, int recovery)
>> +{
>> + struct nova_sb_info *sbi = NOVA_SB(sb);
>> + struct rb_root *tree;
>> + struct nova_range_node *blknode;
>> + struct free_list *free_list;
>> + int i;
>> + int ret;
>> +
>> + /* Divide the block range among per-CPU free lists */
>> + sbi->per_list_blocks = sbi->num_blocks / sbi->cpus;
>> + for (i = 0; i < sbi->cpus; i++) {
>> + free_list = nova_get_free_list(sb, i);
>> + tree = &(free_list->block_free_tree);
>> + nova_init_free_list(sb, free_list, i);
>> +
>> + /* For recovery, update these fields later */
>> + if (recovery == 0) {
>> + free_list->num_free_blocks = free_list->block_end -
>> + free_list->block_start + 1;
>> +
>> + blknode = nova_alloc_blocknode(sb);
>> + if (blknode == NULL)
>> + return;
>> + blknode->range_low = free_list->block_start;
>> + blknode->range_high = free_list->block_end;
>> + ret = nova_insert_blocktree(sbi, tree, blknode);
>> + if (ret) {
>> + nova_err(sb, "%s failed\n", __func__);
>> + nova_free_blocknode(sb, blknode);
>> + return;
>> + }
>> + free_list->first_node = blknode;
>> + free_list->last_node = blknode;
>> + free_list->num_blocknode = 1;
>> + }
>> +
>> + nova_dbgv("%s: free list %d: block start %lu, end %lu, %lu 
>> free blocks\n",
>> +   __func__, i,
>> +   free_list->block_start,
>> +   free_list->block_end,
>> +   free_list->num_free_blocks);
>> + }
>> +}
>> +
>> +static inline int nova_rbtree_compare_rangenode(struct nova_range_node 
>> *curr,
>> + unsigned long range_low)
>> +{
>> + if (range_low < curr->range_low)
>> + return -1;
>> + if (range_low > curr->range_high)
>> + return 1;
>>
>> + return 0;
>> +}
>> +
>> +int nova_find_range_node(struct nova_sb_info *sbi,
>> + struct rb_root *tree, unsigned long range_low,
>> + struct nova_range_node **ret_node)
>
> Instead of having a **ret_node pointer as an argument, just make the
> function return struct nova_range *node and have callers check for null:
>
> struct nova_range_node *node = nova_find_range_node(sbi, tree, range);
>
> if (ret) {
> //do stuff with *node
> }
>

I pass **ret_node as an argument because if the target node is not
found, nova_find_range_node() returns the father node in
nova_find_free_slot(). So there is possibility that it returns 0 and a
not-NULL ret_node. Having it as a parameter makes this clearer.

Thanks,

Re: [RFC v2 16/83] Initialize block map and free lists in nova_init().

2018-03-11 Thread Nikolay Borisov


On 10.03.2018 20:17, Andiry Xu wrote:
> From: Andiry Xu 
> 
> NOVA divides the pmem range equally among per-CPU free lists,
> and format the red-black trees by inserting the initial free range.
> 
> Signed-off-by: Andiry Xu 
> ---
>  fs/nova/balloc.c | 161 
> +++
>  fs/nova/balloc.h |  13 -
>  fs/nova/super.c  |   2 +
>  3 files changed, 175 insertions(+), 1 deletion(-)
> 
> diff --git a/fs/nova/balloc.c b/fs/nova/balloc.c
> index 450c942..cb627db 100644
> --- a/fs/nova/balloc.c
> +++ b/fs/nova/balloc.c
> @@ -55,4 +55,165 @@ void nova_delete_free_lists(struct super_block *sb)
>   sbi->free_lists = NULL;
>  }
>  
> +// Initialize a free list.  Each CPU gets an equal share of the block space 
> to
> +// manage.
> +static void nova_init_free_list(struct super_block *sb,
> + struct free_list *free_list, int index)
> +{
> + struct nova_sb_info *sbi = NOVA_SB(sb);
> + unsigned long per_list_blocks;
> +
> + per_list_blocks = sbi->num_blocks / sbi->cpus;

nit: You've already initialised per_list_blocks in nova_init_blockmap,
which calls this function. So just reference it, rather than performing
the the divison every time

> +
> + free_list->block_start = per_list_blocks * index;
> + free_list->block_end = free_list->block_start +
> + per_list_blocks - 1;
> + if (index == 0)
> + free_list->block_start += sbi->head_reserved_blocks;
> + if (index == sbi->cpus - 1)
> + free_list->block_end -= sbi->tail_reserved_blocks;
> +}
> +
> +inline struct nova_range_node *nova_alloc_blocknode(struct super_block *sb)
> +{
> + return nova_alloc_range_node(sb);
> +}
> +
> +inline void nova_free_blocknode(struct super_block *sb,
> + struct nova_range_node *node)
> +{
> + nova_free_range_node(node);
> +}
> +
> +void nova_init_blockmap(struct super_block *sb, int recovery)
> +{
> + struct nova_sb_info *sbi = NOVA_SB(sb);
> + struct rb_root *tree;
> + struct nova_range_node *blknode;
> + struct free_list *free_list;
> + int i;
> + int ret;
> +
> + /* Divide the block range among per-CPU free lists */
> + sbi->per_list_blocks = sbi->num_blocks / sbi->cpus;
> + for (i = 0; i < sbi->cpus; i++) {
> + free_list = nova_get_free_list(sb, i);
> + tree = &(free_list->block_free_tree);
> + nova_init_free_list(sb, free_list, i);
> +
> + /* For recovery, update these fields later */
> + if (recovery == 0) {
> + free_list->num_free_blocks = free_list->block_end -
> + free_list->block_start + 1;
> +
> + blknode = nova_alloc_blocknode(sb);
> + if (blknode == NULL)
> + return;
> + blknode->range_low = free_list->block_start;
> + blknode->range_high = free_list->block_end;
> + ret = nova_insert_blocktree(sbi, tree, blknode);
> + if (ret) {
> + nova_err(sb, "%s failed\n", __func__);
> + nova_free_blocknode(sb, blknode);
> + return;
> + }
> + free_list->first_node = blknode;
> + free_list->last_node = blknode;
> + free_list->num_blocknode = 1;
> + }
> +
> + nova_dbgv("%s: free list %d: block start %lu, end %lu, %lu free 
> blocks\n",
> +   __func__, i,
> +   free_list->block_start,
> +   free_list->block_end,
> +   free_list->num_free_blocks);
> + }
> +}
> +
> +static inline int nova_rbtree_compare_rangenode(struct nova_range_node *curr,
> + unsigned long range_low)
> +{
> + if (range_low < curr->range_low)
> + return -1;
> + if (range_low > curr->range_high)
> + return 1;
>  
> + return 0;
> +}
> +
> +int nova_find_range_node(struct nova_sb_info *sbi,
> + struct rb_root *tree, unsigned long range_low,
> + struct nova_range_node **ret_node)

Instead of having a **ret_node pointer as an argument, just make the
function return struct nova_range *node and have callers check for null:

struct nova_range_node *node = nova_find_range_node(sbi, tree, range);

if (ret) {
//do stuff with *node
}

> +{
> + struct nova_range_node *curr = NULL;
> + struct rb_node *temp;
> + int compVal;
> + int ret = 0;
> +
> + temp = tree->rb_node;
> +
> + while (temp) {
> + curr = container_of(temp, struct nova_range_node, node);
> + compVal = nova_rbtree_compare_rangenode(curr, range_low);
> +
> + if (compVal == -1) {
> + temp = temp->rb_left;
> + } else if (compVal == 

Re: [RFC v2 16/83] Initialize block map and free lists in nova_init().

2018-03-11 Thread Nikolay Borisov


On 10.03.2018 20:17, Andiry Xu wrote:
> From: Andiry Xu 
> 
> NOVA divides the pmem range equally among per-CPU free lists,
> and format the red-black trees by inserting the initial free range.
> 
> Signed-off-by: Andiry Xu 
> ---
>  fs/nova/balloc.c | 161 
> +++
>  fs/nova/balloc.h |  13 -
>  fs/nova/super.c  |   2 +
>  3 files changed, 175 insertions(+), 1 deletion(-)
> 
> diff --git a/fs/nova/balloc.c b/fs/nova/balloc.c
> index 450c942..cb627db 100644
> --- a/fs/nova/balloc.c
> +++ b/fs/nova/balloc.c
> @@ -55,4 +55,165 @@ void nova_delete_free_lists(struct super_block *sb)
>   sbi->free_lists = NULL;
>  }
>  
> +// Initialize a free list.  Each CPU gets an equal share of the block space 
> to
> +// manage.
> +static void nova_init_free_list(struct super_block *sb,
> + struct free_list *free_list, int index)
> +{
> + struct nova_sb_info *sbi = NOVA_SB(sb);
> + unsigned long per_list_blocks;
> +
> + per_list_blocks = sbi->num_blocks / sbi->cpus;

nit: You've already initialised per_list_blocks in nova_init_blockmap,
which calls this function. So just reference it, rather than performing
the the divison every time

> +
> + free_list->block_start = per_list_blocks * index;
> + free_list->block_end = free_list->block_start +
> + per_list_blocks - 1;
> + if (index == 0)
> + free_list->block_start += sbi->head_reserved_blocks;
> + if (index == sbi->cpus - 1)
> + free_list->block_end -= sbi->tail_reserved_blocks;
> +}
> +
> +inline struct nova_range_node *nova_alloc_blocknode(struct super_block *sb)
> +{
> + return nova_alloc_range_node(sb);
> +}
> +
> +inline void nova_free_blocknode(struct super_block *sb,
> + struct nova_range_node *node)
> +{
> + nova_free_range_node(node);
> +}
> +
> +void nova_init_blockmap(struct super_block *sb, int recovery)
> +{
> + struct nova_sb_info *sbi = NOVA_SB(sb);
> + struct rb_root *tree;
> + struct nova_range_node *blknode;
> + struct free_list *free_list;
> + int i;
> + int ret;
> +
> + /* Divide the block range among per-CPU free lists */
> + sbi->per_list_blocks = sbi->num_blocks / sbi->cpus;
> + for (i = 0; i < sbi->cpus; i++) {
> + free_list = nova_get_free_list(sb, i);
> + tree = &(free_list->block_free_tree);
> + nova_init_free_list(sb, free_list, i);
> +
> + /* For recovery, update these fields later */
> + if (recovery == 0) {
> + free_list->num_free_blocks = free_list->block_end -
> + free_list->block_start + 1;
> +
> + blknode = nova_alloc_blocknode(sb);
> + if (blknode == NULL)
> + return;
> + blknode->range_low = free_list->block_start;
> + blknode->range_high = free_list->block_end;
> + ret = nova_insert_blocktree(sbi, tree, blknode);
> + if (ret) {
> + nova_err(sb, "%s failed\n", __func__);
> + nova_free_blocknode(sb, blknode);
> + return;
> + }
> + free_list->first_node = blknode;
> + free_list->last_node = blknode;
> + free_list->num_blocknode = 1;
> + }
> +
> + nova_dbgv("%s: free list %d: block start %lu, end %lu, %lu free 
> blocks\n",
> +   __func__, i,
> +   free_list->block_start,
> +   free_list->block_end,
> +   free_list->num_free_blocks);
> + }
> +}
> +
> +static inline int nova_rbtree_compare_rangenode(struct nova_range_node *curr,
> + unsigned long range_low)
> +{
> + if (range_low < curr->range_low)
> + return -1;
> + if (range_low > curr->range_high)
> + return 1;
>  
> + return 0;
> +}
> +
> +int nova_find_range_node(struct nova_sb_info *sbi,
> + struct rb_root *tree, unsigned long range_low,
> + struct nova_range_node **ret_node)

Instead of having a **ret_node pointer as an argument, just make the
function return struct nova_range *node and have callers check for null:

struct nova_range_node *node = nova_find_range_node(sbi, tree, range);

if (ret) {
//do stuff with *node
}

> +{
> + struct nova_range_node *curr = NULL;
> + struct rb_node *temp;
> + int compVal;
> + int ret = 0;
> +
> + temp = tree->rb_node;
> +
> + while (temp) {
> + curr = container_of(temp, struct nova_range_node, node);
> + compVal = nova_rbtree_compare_rangenode(curr, range_low);
> +
> + if (compVal == -1) {
> + temp = temp->rb_left;
> + } else if (compVal == 1) {
> + temp =