This is preparation for 'struct dp_netdev_pmd_thread' modification
in upcoming commits. Needed to avoid reordering and regrouping while
replacing old and adding new members.

Signed-off-by: Bhanuprakash Bodireddy <[email protected]>
Co-authored-by: Bhanuprakash Bodireddy <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
---
 lib/dpif-netdev.c | 65 ++++++++++++++++++++++++++++++++++---------------------
 1 file changed, 40 insertions(+), 25 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 0a62630..7a7c6ce 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -549,29 +549,22 @@ struct tx_port {
 struct dp_netdev_pmd_thread {
     PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline0,
         struct dp_netdev *dp;
-        struct cmap_node node;          /* In 'dp->poll_threads'. */
-        pthread_cond_t cond;            /* For synchronizing pmd thread
-                                           reload. */
+        struct cmap_node node;     /* In 'dp->poll_threads'. */
+        pthread_cond_t cond;       /* For synchronizing pmd thread reload. */
     );
 
     PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE, cacheline1,
         struct ovs_mutex cond_mutex;    /* Mutex for condition variable. */
         pthread_t thread;
-        unsigned core_id;               /* CPU core id of this pmd thread. */
-        int numa_id;                    /* numa node id of this pmd thread. */
     );
 
     /* Per thread exact-match cache.  Note, the instance for cpu core
      * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
      * need to be protected by 'non_pmd_mutex'.  Every other instance
      * will only be accessed by its own pmd thread. */
-    OVS_ALIGNED_VAR(CACHE_LINE_SIZE) struct emc_cache flow_cache;
-    struct ovs_refcount ref_cnt;    /* Every reference must be refcount'ed. */
-
-    /* Queue id used by this pmd thread to send packets on all netdevs if
-     * XPS disabled for this netdev. All static_tx_qid's are unique and less
-     * than 'cmap_count(dp->poll_threads)'. */
-    uint32_t static_tx_qid;
+    PADDED_MEMBERS(CACHE_LINE_SIZE,
+        OVS_ALIGNED_VAR(CACHE_LINE_SIZE) struct emc_cache flow_cache;
+    );
 
     /* Flow-Table and classifiers
      *
@@ -579,7 +572,10 @@ struct dp_netdev_pmd_thread {
      * changes to 'classifiers' must be made while still holding the
      * 'flow_mutex'.
      */
-    struct ovs_mutex flow_mutex;
+    PADDED_MEMBERS(CACHE_LINE_SIZE,
+        struct ovs_mutex flow_mutex;
+        /* 8 pad bytes. */
+    );
     PADDED_MEMBERS(CACHE_LINE_SIZE,
         struct cmap flow_table OVS_GUARDED; /* Flow table. */
 
@@ -596,35 +592,50 @@ struct dp_netdev_pmd_thread {
 
         /* Used to count cycles. See 'cycles_counter_end()'. */
         unsigned long long last_cycles;
-        struct latch exit_latch;        /* For terminating the pmd thread. */
-    );
 
+        /* 8 pad bytes. */
+    );
     PADDED_MEMBERS(CACHE_LINE_SIZE,
         /* Statistics. */
         struct dp_netdev_pmd_stats stats;
+        /* 8 pad bytes. */
+    );
 
+    PADDED_MEMBERS(CACHE_LINE_SIZE,
+        struct latch exit_latch;     /* For terminating the pmd thread. */
         struct seq *reload_seq;
         uint64_t last_reload_seq;
-        atomic_bool reload;             /* Do we need to reload ports? */
-        bool isolated;
-
+        atomic_bool reload;          /* Do we need to reload ports? */
         /* Set to true if the pmd thread needs to be reloaded. */
         bool need_reload;
-        /* 5 pad bytes. */
+        bool isolated;
+
+        struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */
+
+        /* Queue id used by this pmd thread to send packets on all netdevs if
+         * XPS disabled for this netdev. All static_tx_qid's are unique and
+         * less than 'cmap_count(dp->poll_threads)'. */
+        uint32_t static_tx_qid;
+
+        unsigned core_id;            /* CPU core id of this pmd thread. */
+        int numa_id;                 /* numa node id of this pmd thread. */
+
+        /* 20 pad bytes. */
     );
 
     PADDED_MEMBERS(CACHE_LINE_SIZE,
-        struct ovs_mutex port_mutex;    /* Mutex for 'poll_list'
-                                           and 'tx_ports'. */
-        /* 16 pad bytes. */
+        /* Mutex for 'poll_list' and 'tx_ports'. */
+        struct ovs_mutex port_mutex;
     );
+
     PADDED_MEMBERS(CACHE_LINE_SIZE,
         /* List of rx queues to poll. */
         struct hmap poll_list OVS_GUARDED;
-        /* Map of 'tx_port's used for transmission.  Written by the main
-         * thread, read by the pmd thread. */
+        /* Map of 'tx_port's used for transmission.
+         * Written by the main thread, read by the pmd thread. */
         struct hmap tx_ports OVS_GUARDED;
     );
+
     PADDED_MEMBERS(CACHE_LINE_SIZE,
         /* These are thread-local copies of 'tx_ports'.  One contains only
          * tunnel ports (that support push_tunnel/pop_tunnel), the other
@@ -648,9 +659,13 @@ struct dp_netdev_pmd_thread {
          * values and subtracts them from 'stats' and 'cycles' before
          * reporting to the user */
         unsigned long long stats_zero[DP_N_STATS];
-        uint64_t cycles_zero[PMD_N_CYCLES];
         /* 8 pad bytes. */
     );
+
+    PADDED_MEMBERS(CACHE_LINE_SIZE,
+        uint64_t cycles_zero[PMD_N_CYCLES];
+        /* 48 pad bytes. */
+    );
 };
 
 /* Interface to netdev-based datapath. */
-- 
2.7.4

_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to