* Lai Jiangshan ([email protected]) wrote:
> Add min_alloc_size(minimum allocation size) for allocation.
> 
> Now we can gain better cache locality by
> specifying larger minimum allocation size.
> 
> Signed-off-by: Lai Jiangshan <[email protected]>
> ---
>  rculfhash.c            |   42 ++++++++++++++++++++++++++++++------------
>  tests/test_urcu_hash.c |    2 +-
>  urcu/rculfhash.h       |    5 ++++-
>  3 files changed, 35 insertions(+), 14 deletions(-)
> 
> diff --git a/rculfhash.c b/rculfhash.c
> index a3dfbfe..32ea649 100644
> --- a/rculfhash.c
> +++ b/rculfhash.c
> @@ -244,6 +244,8 @@ struct cds_lfht {
>       struct rcu_table t;
>       cds_lfht_hash_fct hash_fct;
>       cds_lfht_compare_fct compare_fct;
> +     unsigned long min_alloc_order;
> +     unsigned long min_alloc_size;
>       unsigned long hash_seed;
>       int flags;
>       /*
> @@ -1076,7 +1078,7 @@ void init_table_populate_partition(struct cds_lfht *ht, 
> unsigned long i,
>  {
>       unsigned long j;
>  
> -     assert(i > 0);
> +     assert(i > ht->min_alloc_order);
>       ht->cds_lfht_rcu_read_lock();
>       for (j = start; j < start + len; j++) {
>               struct cds_lfht_node *new_node =
> @@ -1114,7 +1116,7 @@ void init_table(struct cds_lfht *ht,
>  
>       dbg_printf("init table: first_order %lu end_order %lu\n",
>                  first_order, end_order);
> -     assert(first_order > 0);
> +     assert(first_order > ht->min_alloc_order);
>       for (i = first_order; i <= end_order; i++) {
>               unsigned long len;
>  
> @@ -1177,7 +1179,7 @@ void remove_table_partition(struct cds_lfht *ht, 
> unsigned long i,
>  {
>       unsigned long j;
>  
> -     assert(i > 0);
> +     assert(i > ht->min_alloc_order);
>       ht->cds_lfht_rcu_read_lock();
>       for (j = start; j < start + len; j++) {
>               struct cds_lfht_node *fini_node =
> @@ -1215,7 +1217,7 @@ void fini_table(struct cds_lfht *ht,
>  
>       dbg_printf("fini table: first_order %lu end_order %lu\n",
>                  first_order, end_order);
> -     assert(first_order > 0);
> +     assert(first_order > ht->min_alloc_order);
>       for (i = end_order; i >= first_order; i--) {
>               unsigned long len;
>  
> @@ -1266,7 +1268,7 @@ void cds_lfht_create_dummy(struct cds_lfht *ht, 
> unsigned long size)
>       struct _cds_lfht_node *prev, *node;
>       unsigned long order, len, i, j;
>  
> -     ht->t.tbl[0] = calloc(1, sizeof(struct _cds_lfht_node));
> +     ht->t.tbl[0] = calloc(1, ht->min_alloc_size * sizeof(struct 
> _cds_lfht_node));
>       assert(ht->t.tbl[0]);
>  
>       dbg_printf("create dummy: order %lu index %lu hash %lu\n", 0, 0, 0);
> @@ -1275,8 +1277,12 @@ void cds_lfht_create_dummy(struct cds_lfht *ht, 
> unsigned long size)
>  
>       for (order = 1; order < get_count_order_ulong(size) + 1; order++) {
>               len = 1UL << (order - 1);
> -             ht->t.tbl[order] = calloc(1, len * sizeof(struct 
> _cds_lfht_node));
> -             assert(ht->t.tbl[order]);
> +             if (order <= ht->min_alloc_order) {
> +                     ht->t.tbl[order] = (void *)(ht->t.tbl[0]->nodes + len);

not sure why we need to cast to void *  here. I'm probably missing
something. Can you enlighten me ? Is there any other way we could do
this ?

Thanks,

Mathieu

> +             } else {
> +                     ht->t.tbl[order] = calloc(1, len * sizeof(struct 
> _cds_lfht_node));
> +                     assert(ht->t.tbl[order]);
> +             }
>  
>               i = 0;
>               prev = ht->t.tbl[i]->nodes;
> @@ -1303,6 +1309,7 @@ struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct 
> hash_fct,
>                       cds_lfht_compare_fct compare_fct,
>                       unsigned long hash_seed,
>                       unsigned long init_size,
> +                     unsigned long min_alloc_size,
>                       int flags,
>                       void (*cds_lfht_call_rcu)(struct rcu_head *head,
>                                       void (*func)(struct rcu_head *head)),
> @@ -1318,9 +1325,14 @@ struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct 
> hash_fct,
>       struct cds_lfht *ht;
>       unsigned long order;
>  
> +     /* min_alloc_size must be power of two */
> +     if (!min_alloc_size || (min_alloc_size & (min_alloc_size - 1)))
> +             return NULL;
>       /* init_size must be power of two */
> -     if (init_size && (init_size & (init_size - 1)))
> +     if (!init_size || (init_size & (init_size - 1)))
>               return NULL;
> +     min_alloc_size = max(min_alloc_size, MIN_TABLE_SIZE);
> +     init_size = max(init_size, min_alloc_size);
>       ht = calloc(1, sizeof(struct cds_lfht));
>       assert(ht);
>       ht->hash_fct = hash_fct;
> @@ -1339,10 +1351,12 @@ struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct 
> hash_fct,
>       /* this mutex should not nest in read-side C.S. */
>       pthread_mutex_init(&ht->resize_mutex, NULL);
>       ht->flags = flags;
> -     order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE));
> +     order = get_count_order_ulong(init_size);
>       ht->t.resize_target = 1UL << order;
>       cds_lfht_create_dummy(ht, 1UL << order);
>       ht->t.size = 1UL << order;
> +     ht->min_alloc_size = min_alloc_size;
> +     ht->min_alloc_order = get_count_order_ulong(min_alloc_size);
>       return ht;
>  }
>  
> @@ -1560,7 +1574,11 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht)
>                               
> bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash));
>                       assert(is_dummy(ht->t.tbl[order]->nodes[i].next));
>               }
> -             poison_free(ht->t.tbl[order]);
> +
> +             if (order == ht->min_alloc_order)
> +                     poison_free(ht->t.tbl[0]);
> +             else if (order > ht->min_alloc_order)
> +                     poison_free(ht->t.tbl[order]);
>       }
>       return 0;
>  }
> @@ -1661,7 +1679,7 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht,
>  {
>       unsigned long old_order, new_order;
>  
> -     new_size = max(new_size, MIN_TABLE_SIZE);
> +     new_size = max(new_size, ht->min_alloc_size);
>       old_order = get_count_order_ulong(old_size);
>       new_order = get_count_order_ulong(new_size);
>       dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
> @@ -1711,7 +1729,7 @@ static
>  void resize_target_update_count(struct cds_lfht *ht,
>                               unsigned long count)
>  {
> -     count = max(count, MIN_TABLE_SIZE);
> +     count = max(count, ht->min_alloc_size);
>       uatomic_set(&ht->t.resize_target, count);
>  }
>  
> diff --git a/tests/test_urcu_hash.c b/tests/test_urcu_hash.c
> index db45c8c..70db8b3 100644
> --- a/tests/test_urcu_hash.c
> +++ b/tests/test_urcu_hash.c
> @@ -888,7 +888,7 @@ int main(int argc, char **argv)
>        */
>       rcu_register_thread();
>       test_ht = cds_lfht_new(test_hash, test_compare, 0x42UL,
> -                     init_hash_size,
> +                     init_hash_size, 1,
>                       opt_auto_resize ? CDS_LFHT_AUTO_RESIZE : 0, NULL);
>               ret = populate_hash();
>       assert(!ret);
> diff --git a/urcu/rculfhash.h b/urcu/rculfhash.h
> index 1376b5a..fd67b6b 100644
> --- a/urcu/rculfhash.h
> +++ b/urcu/rculfhash.h
> @@ -103,6 +103,7 @@ struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
>                       cds_lfht_compare_fct compare_fct,
>                       unsigned long hash_seed,
>                       unsigned long init_size,
> +                     unsigned long min_alloc_size,
>                       int flags,
>                       void (*cds_lfht_call_rcu)(struct rcu_head *head,
>                               void (*func)(struct rcu_head *head)),
> @@ -121,6 +122,7 @@ struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
>   * @compare_fct: the key comparison function.
>   * @hash_seed: the seed for hash function.
>   * @init_size: number of nodes to allocate initially. Must be power of two.
> + * @min_alloc_size: the smallest allocation size to use. Must be power of 
> two.
>   * @flags: hash table creation flags (can be combined with bitwise or: '|').
>   *           0: no flags.
>   *           CDS_LFHT_AUTO_RESIZE: automatically resize hash table.
> @@ -143,11 +145,12 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct 
> hash_fct,
>                       cds_lfht_compare_fct compare_fct,
>                       unsigned long hash_seed,
>                       unsigned long init_size,
> +                     unsigned long min_alloc_size,
>                       int flags,
>                       pthread_attr_t *attr)
>  {
>       return _cds_lfht_new(hash_fct, compare_fct, hash_seed,
> -                     init_size, flags,
> +                     init_size, min_alloc_size, flags,
>                       call_rcu, synchronize_rcu, rcu_read_lock,
>                       rcu_read_unlock, rcu_thread_offline,
>                       rcu_thread_online, rcu_register_thread,
> -- 
> 1.7.4.4
> 

-- 
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com

_______________________________________________
ltt-dev mailing list
[email protected]
http://lists.casi.polymtl.ca/cgi-bin/mailman/listinfo/ltt-dev

Reply via email to