From: Pieter Jansen van Vuuren <pieter.jansenvanvuu...@netronome.com>

Make use of an array stats instead of storing stats per flow which
would require a hash lookup at critical times.

Signed-off-by: Pieter Jansen van Vuuren <pieter.jansenvanvuu...@netronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicin...@netronome.com>
---
 .../net/ethernet/netronome/nfp/flower/main.h  |  6 +-
 .../ethernet/netronome/nfp/flower/metadata.c  | 56 +++++++++----------
 .../ethernet/netronome/nfp/flower/offload.c   | 19 ++++---
 3 files changed, 40 insertions(+), 41 deletions(-)

diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h 
b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 8b2b656da7ca..21a167df90c1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -139,6 +139,8 @@ struct nfp_fl_lag {
  * @mask_ids:          List of free mask ids
  * @mask_table:                Hash table used to store masks
  * @flow_table:                Hash table used to store flower rules
+ * @stats:             Stored stats updates for flower rules
+ * @stats_lock:                Lock for flower rule stats updates
  * @cmsg_work:         Workqueue for control messages processing
  * @cmsg_skbs_high:    List of higher priority skbs for control message
  *                     processing
@@ -173,6 +175,8 @@ struct nfp_flower_priv {
        struct nfp_fl_mask_id mask_ids;
        DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
        struct rhashtable flow_table;
+       struct nfp_fl_stats *stats;
+       spinlock_t stats_lock; /* lock stats */
        struct work_struct cmsg_work;
        struct sk_buff_head cmsg_skbs_high;
        struct sk_buff_head cmsg_skbs_low;
@@ -232,8 +236,6 @@ struct nfp_fl_payload {
        unsigned long tc_flower_cookie;
        struct rhash_head fl_node;
        struct rcu_head rcu;
-       spinlock_t lock; /* lock stats */
-       struct nfp_fl_stats stats;
        __be32 nfp_tun_ipv4_addr;
        struct net_device *ingress_dev;
        char *unmasked_data;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c 
b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 2427c994c91d..f0db7f9122d2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -119,42 +119,26 @@ nfp_flower_search_fl_table(struct nfp_app *app, unsigned 
long tc_flower_cookie,
                                      nfp_flower_table_params);
 }
 
-static void
-nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats)
-{
-       struct nfp_fl_payload *nfp_flow;
-       unsigned long flower_cookie;
-
-       flower_cookie = be64_to_cpu(stats->stats_cookie);
-
-       rcu_read_lock();
-       nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL,
-                                             stats->stats_con_id);
-       if (!nfp_flow)
-               goto exit_rcu_unlock;
-
-       spin_lock(&nfp_flow->lock);
-       nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count);
-       nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count);
-       nfp_flow->stats.used = jiffies;
-       spin_unlock(&nfp_flow->lock);
-
-exit_rcu_unlock:
-       rcu_read_unlock();
-}
-
 void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
 {
        unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
-       struct nfp_fl_stats_frame *stats_frame;
+       struct nfp_flower_priv *priv = app->priv;
+       struct nfp_fl_stats_frame *stats;
        unsigned char *msg;
+       u32 ctx_id;
        int i;
 
        msg = nfp_flower_cmsg_get_data(skb);
 
-       stats_frame = (struct nfp_fl_stats_frame *)msg;
-       for (i = 0; i < msg_len / sizeof(*stats_frame); i++)
-               nfp_flower_update_stats(app, stats_frame + i);
+       spin_lock(&priv->stats_lock);
+       for (i = 0; i < msg_len / sizeof(*stats); i++) {
+               stats = (struct nfp_fl_stats_frame *)msg + i;
+               ctx_id = be32_to_cpu(stats->stats_con_id);
+               priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
+               priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
+               priv->stats[ctx_id].used = jiffies;
+       }
+       spin_unlock(&priv->stats_lock);
 }
 
 static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
@@ -348,9 +332,9 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
 
        /* Update flow payload with mask ids. */
        nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
-       nfp_flow->stats.pkts = 0;
-       nfp_flow->stats.bytes = 0;
-       nfp_flow->stats.used = jiffies;
+       priv->stats[stats_cxt].pkts = 0;
+       priv->stats[stats_cxt].bytes = 0;
+       priv->stats[stats_cxt].used = jiffies;
 
        check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
                                                 NFP_FL_STATS_CTX_DONT_CARE);
@@ -469,8 +453,17 @@ int nfp_flower_metadata_init(struct nfp_app *app)
 
        priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX;
 
+       priv->stats = kvmalloc_array(NFP_FL_STATS_ENTRY_RS,
+                                    sizeof(struct nfp_fl_stats), GFP_KERNEL);
+       if (!priv->stats)
+               goto err_free_ring_buf;
+
+       spin_lock_init(&priv->stats_lock);
+
        return 0;
 
+err_free_ring_buf:
+       vfree(priv->stats_ids.free_list.buf);
 err_free_last_used:
        kfree(priv->mask_ids.last_used);
 err_free_mask_id:
@@ -489,6 +482,7 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
 
        rhashtable_free_and_destroy(&priv->flow_table,
                                    nfp_check_rhashtable_empty, NULL);
+       kvfree(priv->stats);
        kfree(priv->mask_ids.mask_id_free_list.buf);
        kfree(priv->mask_ids.last_used);
        vfree(priv->stats_ids.free_list.buf);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c 
b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 3f3649acb78f..cad28b44b21a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -428,8 +428,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, 
bool egress)
 
        flow_pay->nfp_tun_ipv4_addr = 0;
        flow_pay->meta.flags = 0;
-       spin_lock_init(&flow_pay->lock);
-
        flow_pay->ingress_offload = !egress;
 
        return flow_pay;
@@ -604,8 +602,10 @@ static int
 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
                     struct tc_cls_flower_offload *flow, bool egress)
 {
+       struct nfp_flower_priv *priv = app->priv;
        struct nfp_fl_payload *nfp_flow;
        struct net_device *ingr_dev;
+       u32 ctx_id;
 
        ingr_dev = egress ? NULL : netdev;
        nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
@@ -616,13 +616,16 @@ nfp_flower_get_stats(struct nfp_app *app, struct 
net_device *netdev,
        if (nfp_flow->ingress_offload && egress)
                return 0;
 
-       spin_lock_bh(&nfp_flow->lock);
-       tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
-                             nfp_flow->stats.pkts, nfp_flow->stats.used);
+       ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
+
+       spin_lock_bh(&priv->stats_lock);
+       tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes,
+                             priv->stats[ctx_id].pkts,
+                             priv->stats[ctx_id].used);
 
-       nfp_flow->stats.pkts = 0;
-       nfp_flow->stats.bytes = 0;
-       spin_unlock_bh(&nfp_flow->lock);
+       priv->stats[ctx_id].pkts = 0;
+       priv->stats[ctx_id].bytes = 0;
+       spin_unlock_bh(&priv->stats_lock);
 
        return 0;
 }
-- 
2.17.1

Reply via email to