This field is added for librte_distributor. User of librte_distributor
is advocated to set value of mbuf->hash.usr before calling
rte_distributor_process. The value of usr is the tag which stands as
identifier of flow.

Signed-off-by: Qinglai Xiao <jigsaw at gmail.com>
---
 app/test/test_distributor.c              |   18 +++++++++---------
 app/test/test_distributor_perf.c         |    4 ++--
 lib/librte_distributor/rte_distributor.c |    2 +-
 lib/librte_mbuf/rte_mbuf.h               |    1 +
 4 files changed, 13 insertions(+), 12 deletions(-)

diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c
index ce06436..9e8c06d 100644
--- a/app/test/test_distributor.c
+++ b/app/test/test_distributor.c
@@ -120,7 +120,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool 
*p)
        /* now set all hash values in all buffers to zero, so all pkts go to the
         * one worker thread */
        for (i = 0; i < BURST; i++)
-               bufs[i]->hash.rss = 0;
+               bufs[i]->hash.usr = 0;

        rte_distributor_process(d, bufs, BURST);
        rte_distributor_flush(d);
@@ -142,7 +142,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool 
*p)
        if (rte_lcore_count() >= 3) {
                clear_packet_count();
                for (i = 0; i < BURST; i++)
-                       bufs[i]->hash.rss = (i & 1) << 8;
+                       bufs[i]->hash.usr = (i & 1) << 8;

                rte_distributor_process(d, bufs, BURST);
                rte_distributor_flush(d);
@@ -167,7 +167,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool 
*p)
         * so load gets distributed */
        clear_packet_count();
        for (i = 0; i < BURST; i++)
-               bufs[i]->hash.rss = i;
+               bufs[i]->hash.usr = i;

        rte_distributor_process(d, bufs, BURST);
        rte_distributor_flush(d);
@@ -199,7 +199,7 @@ sanity_test(struct rte_distributor *d, struct rte_mempool 
*p)
                return -1;
        }
        for (i = 0; i < BIG_BATCH; i++)
-               many_bufs[i]->hash.rss = i << 2;
+               many_bufs[i]->hash.usr = i << 2;

        for (i = 0; i < BIG_BATCH/BURST; i++) {
                rte_distributor_process(d, &many_bufs[i*BURST], BURST);
@@ -280,7 +280,7 @@ sanity_test_with_mbuf_alloc(struct rte_distributor *d, 
struct rte_mempool *p)
                while (rte_mempool_get_bulk(p, (void *)bufs, BURST) < 0)
                        rte_distributor_process(d, NULL, 0);
                for (j = 0; j < BURST; j++) {
-                       bufs[j]->hash.rss = (i+j) << 1;
+                       bufs[j]->hash.usr = (i+j) << 1;
                        rte_mbuf_refcnt_set(bufs[j], 1);
                }

@@ -359,7 +359,7 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d,
        /* now set all hash values in all buffers to zero, so all pkts go to the
         * one worker thread */
        for (i = 0; i < BURST; i++)
-               bufs[i]->hash.rss = 0;
+               bufs[i]->hash.usr = 0;

        rte_distributor_process(d, bufs, BURST);
        /* at this point, we will have processed some packets and have a full
@@ -372,7 +372,7 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d,
                return -1;
        }
        for (i = 0; i < BURST; i++)
-               bufs[i]->hash.rss = 0;
+               bufs[i]->hash.usr = 0;

        /* get worker zero to quit */
        zero_quit = 1;
@@ -416,7 +416,7 @@ test_flush_with_worker_shutdown(struct rte_distributor *d,
        /* now set all hash values in all buffers to zero, so all pkts go to the
         * one worker thread */
        for (i = 0; i < BURST; i++)
-               bufs[i]->hash.rss = 0;
+               bufs[i]->hash.usr = 0;

        rte_distributor_process(d, bufs, BURST);
        /* at this point, we will have processed some packets and have a full
@@ -488,7 +488,7 @@ quit_workers(struct rte_distributor *d, struct rte_mempool 
*p)
        zero_quit = 0;
        quit = 1;
        for (i = 0; i < num_workers; i++)
-               bufs[i]->hash.rss = i << 1;
+               bufs[i]->hash.usr = i << 1;
        rte_distributor_process(d, bufs, num_workers);

        rte_mempool_put_bulk(p, (void *)bufs, num_workers);
diff --git a/app/test/test_distributor_perf.c b/app/test/test_distributor_perf.c
index b04864c..48ee344 100644
--- a/app/test/test_distributor_perf.c
+++ b/app/test/test_distributor_perf.c
@@ -159,7 +159,7 @@ perf_test(struct rte_distributor *d, struct rte_mempool *p)
        }
        /* ensure we have different hash value for each pkt */
        for (i = 0; i < BURST; i++)
-               bufs[i]->hash.rss = i;
+               bufs[i]->hash.usr = i;

        start = rte_rdtsc();
        for (i = 0; i < (1<<ITER_POWER); i++)
@@ -198,7 +198,7 @@ quit_workers(struct rte_distributor *d, struct rte_mempool 
*p)

        quit = 1;
        for (i = 0; i < num_workers; i++)
-               bufs[i]->hash.rss = i << 1;
+               bufs[i]->hash.usr = i << 1;
        rte_distributor_process(d, bufs, num_workers);

        rte_mempool_put_bulk(p, (void *)bufs, num_workers);
diff --git a/lib/librte_distributor/rte_distributor.c 
b/lib/librte_distributor/rte_distributor.c
index 656ee5c..3dfec4a 100644
--- a/lib/librte_distributor/rte_distributor.c
+++ b/lib/librte_distributor/rte_distributor.c
@@ -289,7 +289,7 @@ rte_distributor_process(struct rte_distributor *d,
                         * use the zero-value to indicate that no packet is
                         * being processed by a worker.
                         */
-                       new_tag = (next_mb->hash.rss | 1);
+                       new_tag = (next_mb->hash.usr | 1);

                        uint32_t match = 0;
                        unsigned i;
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index e8f9bfc..f5f8658 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -185,6 +185,7 @@ struct rte_mbuf {
                        uint16_t id;
                } fdir;           /**< Filter identifier if FDIR enabled */
                uint32_t sched;   /**< Hierarchical scheduler */
+               uint32_t usr;     /**< User defined tags. See 
@rte_distributor_process */
        } hash;                   /**< hash information */

        /* second cache line - fields only used in slow path or on TX */
-- 
1.7.1

Reply via email to