The lock-free algorithm has caused significant lookup
performance regression for certain use cases. The
regression is attributed to the use of non-relaxed
memory orderings. 2 versions of the lookup functions
are created. One that uses the RW lock and the one that
is lock-free. This restores the performance regression
caused for use cases that used RW lock version of the
lookup function.

Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")
Cc: honnappa.nagaraha...@arm.com

Suggested-by: Jerin Jacob <jerin.ja...@caviumnetworks.com>
Signed-off-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljed...@arm.com>
Reviewed-by: Gavin Hu <gavin...@arm.com>
---
 lib/librte_hash/rte_cuckoo_hash.c | 44 ++++++++++++++++++++++++-------
 1 file changed, 34 insertions(+), 10 deletions(-)

diff --git a/lib/librte_hash/rte_cuckoo_hash.c 
b/lib/librte_hash/rte_cuckoo_hash.c
index 9390dc5e4..7e1a9ac96 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1129,10 +1129,11 @@ rte_hash_add_key_data(const struct rte_hash *h, const 
void *key, void *data)
                return ret;
 }
 
-/* Search one bucket to find the match key */
+/* Search one bucket to find the match key - uses rw lock */
 static inline int32_t
-search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,
-                       void **data, const struct rte_hash_bucket *bkt)
+search_one_bucket_l(const struct rte_hash *h, const void *key,
+               uint16_t sig, void **data,
+               const struct rte_hash_bucket *bkt)
 {
        int i;
        struct rte_hash_key *k, *keys = h->key_store;
@@ -1191,8 +1192,8 @@ search_one_bucket_lf(const struct rte_hash *h, const void 
*key, uint16_t sig,
 }
 
 static inline int32_t
-__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
-                                       hash_sig_t sig, void **data)
+__rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
+                               hash_sig_t sig, void **data)
 {
        uint32_t prim_bucket_idx, sec_bucket_idx;
        struct rte_hash_bucket *bkt, *cur_bkt;
@@ -1207,7 +1208,7 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, 
const void *key,
 
        /* Check if key is in primary location */
        bkt = &h->buckets[prim_bucket_idx];
-       ret = search_one_bucket(h, key, short_sig, data, bkt);
+       ret = search_one_bucket_l(h, key, short_sig, data, bkt);
        if (ret != -1) {
                __hash_rw_reader_unlock(h);
                return ret;
@@ -1217,7 +1218,7 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, 
const void *key,
 
        /* Check if key is in secondary location */
        FOR_EACH_BUCKET(cur_bkt, bkt) {
-               ret = search_one_bucket(h, key, short_sig,
+               ret = search_one_bucket_l(h, key, short_sig,
                                        data, cur_bkt);
                if (ret != -1) {
                        __hash_rw_reader_unlock(h);
@@ -1291,6 +1292,16 @@ __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, 
const void *key,
        return -ENOENT;
 }
 
+static inline int32_t
+__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
+                                       hash_sig_t sig, void **data)
+{
+       if (h->readwrite_concur_lf_support)
+               return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
+       else
+               return __rte_hash_lookup_with_hash_l(h, key, sig, data);
+}
+
 int32_t
 rte_hash_lookup_with_hash(const struct rte_hash *h,
                        const void *key, hash_sig_t sig)
@@ -1592,7 +1603,7 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t 
*sec_hash_matches,
 
 #define PREFETCH_OFFSET 4
 static inline void
-__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
+__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
                        int32_t num_keys, int32_t *positions,
                        uint64_t *hit_mask, void *data[])
 {
@@ -1762,10 +1773,10 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const 
void **keys,
                next_bkt = secondary_bkt[i]->next;
                FOR_EACH_BUCKET(cur_bkt, next_bkt) {
                        if (data != NULL)
-                               ret = search_one_bucket(h, keys[i],
+                               ret = search_one_bucket_l(h, keys[i],
                                                sig[i], &data[i], cur_bkt);
                        else
-                               ret = search_one_bucket(h, keys[i],
+                               ret = search_one_bucket_l(h, keys[i],
                                                sig[i], NULL, cur_bkt);
                        if (ret != -1) {
                                positions[i] = ret;
@@ -2005,6 +2016,19 @@ __rte_hash_lookup_bulk_lf(const struct rte_hash *h, 
const void **keys,
                *hit_mask = hits;
 }
 
+static inline void
+__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
+                       int32_t num_keys, int32_t *positions,
+                       uint64_t *hit_mask, void *data[])
+{
+       if (h->readwrite_concur_lf_support)
+               return __rte_hash_lookup_bulk_lf(h, keys, num_keys,
+                                               positions, hit_mask, data);
+       else
+               return __rte_hash_lookup_bulk_l(h, keys, num_keys,
+                                               positions, hit_mask, data);
+}
+
 int
 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
                      uint32_t num_keys, int32_t *positions)
-- 
2.17.1

Reply via email to