Author: slavash
Date: Wed Dec  5 14:20:57 2018
New Revision: 341578
URL: https://svnweb.freebsd.org/changeset/base/341578

Log:
  mlx5en: Remove the DRBR and associated logic in the transmit path.
  
  The hardware queues are deep enough currently and using the DRBR and 
associated
  callbacks only leads to more task switching in the TX path. The is also a race
  setting the queue_state which can lead to hung TX rings.
  
  Submitted by:   hselasky@
  Approved by:    hselasky (mentor)
  MFC after:      1 week
  Sponsored by:   Mellanox Technologies

Modified:
  head/sys/dev/mlx5/mlx5_en/en.h
  head/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
  head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
  head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c

Modified: head/sys/dev/mlx5/mlx5_en/en.h
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/en.h      Wed Dec  5 14:20:26 2018        
(r341577)
+++ head/sys/dev/mlx5/mlx5_en/en.h      Wed Dec  5 14:20:57 2018        
(r341578)
@@ -473,7 +473,6 @@ struct mlx5e_params {
   m(+1, u64 tx_coalesce_usecs, "tx_coalesce_usecs", "Limit in usec for joining 
tx packets") \
   m(+1, u64 tx_coalesce_pkts, "tx_coalesce_pkts", "Maximum number of tx 
packets to join") \
   m(+1, u64 tx_coalesce_mode, "tx_coalesce_mode", "0: EQE mode 1: CQE mode") \
-  m(+1, u64 tx_bufring_disable, "tx_bufring_disable", "0: Enable bufring 1: 
Disable bufring") \
   m(+1, u64 tx_completion_fact, "tx_completion_fact", "1..MAX: Completion 
event ratio") \
   m(+1, u64 tx_completion_fact_max, "tx_completion_fact_max", "Maximum 
completion event ratio") \
   m(+1, u64 hw_lro, "hw_lro", "set to enable hw_lro") \
@@ -606,8 +605,6 @@ struct mlx5e_sq {
        struct  mlx5e_sq_stats stats;
 
        struct  mlx5e_cq cq;
-       struct  task sq_task;
-       struct  taskqueue *sq_tq;
 
        /* pointers to per packet info: write@xmit, read@completion */
        struct  mlx5e_sq_mbuf *mbuf;
@@ -628,7 +625,6 @@ struct mlx5e_sq {
        struct  mlx5_wq_ctrl wq_ctrl;
        struct  mlx5e_priv *priv;
        int     tc;
-       unsigned int queue_state;
 } __aligned(MLX5E_CACHELINE_SIZE);
 
 static inline bool
@@ -857,7 +853,6 @@ void        mlx5e_cq_error_event(struct mlx5_core_cq *mcq, 
in
 void   mlx5e_rx_cq_comp(struct mlx5_core_cq *);
 void   mlx5e_tx_cq_comp(struct mlx5_core_cq *);
 struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
-void   mlx5e_tx_que(void *context, int pending);
 
 int    mlx5e_open_flow_table(struct mlx5e_priv *priv);
 void   mlx5e_close_flow_table(struct mlx5e_priv *priv);

Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c Wed Dec  5 14:20:26 2018        
(r341577)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c Wed Dec  5 14:20:57 2018        
(r341578)
@@ -703,18 +703,6 @@ mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
                        mlx5e_open_locked(priv->ifp);
                break;
 
-       case MLX5_PARAM_OFFSET(tx_bufring_disable):
-               /* rangecheck input value */
-               priv->params_ethtool.tx_bufring_disable =
-                   priv->params_ethtool.tx_bufring_disable ? 1 : 0;
-
-               /* reconfigure the sendqueues, if any */
-               if (was_opened) {
-                       mlx5e_close_locked(priv->ifp);
-                       mlx5e_open_locked(priv->ifp);
-               }
-               break;
-
        case MLX5_PARAM_OFFSET(tx_completion_fact):
                /* network interface must be down */
                if (was_opened)

Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c    Wed Dec  5 14:20:26 2018        
(r341577)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c    Wed Dec  5 14:20:57 2018        
(r341578)
@@ -1,5 +1,5 @@
 /*-
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -1184,37 +1184,6 @@ mlx5e_create_sq(struct mlx5e_channel *c,
        sq->min_inline_mode = priv->params.tx_min_inline_mode;
        sq->vlan_inline_cap = MLX5_CAP_ETH(mdev, wqe_vlan_insert);
 
-       /* check if we should allocate a second packet buffer */
-       if (priv->params_ethtool.tx_bufring_disable == 0) {
-               sq->br = buf_ring_alloc(MLX5E_SQ_TX_QUEUE_SIZE, M_MLX5EN,
-                   M_WAITOK, &sq->lock);
-               if (sq->br == NULL) {
-                       if_printf(c->ifp, "%s: Failed allocating sq drbr 
buffer\n",
-                           __func__);
-                       err = -ENOMEM;
-                       goto err_free_sq_db;
-               }
-
-               sq->sq_tq = taskqueue_create_fast("mlx5e_que", M_WAITOK,
-                   taskqueue_thread_enqueue, &sq->sq_tq);
-               if (sq->sq_tq == NULL) {
-                       if_printf(c->ifp, "%s: Failed allocating taskqueue\n",
-                           __func__);
-                       err = -ENOMEM;
-                       goto err_free_drbr;
-               }
-
-               TASK_INIT(&sq->sq_task, 0, mlx5e_tx_que, sq);
-#ifdef RSS
-               cpu_id = rss_getcpu(c->ix % rss_getnumbuckets());
-               CPU_SETOF(cpu_id, &cpu_mask);
-               taskqueue_start_threads_cpuset(&sq->sq_tq, 1, PI_NET, &cpu_mask,
-                   "%s TX SQ%d.%d CPU%d", c->ifp->if_xname, c->ix, tc, cpu_id);
-#else
-               taskqueue_start_threads(&sq->sq_tq, 1, PI_NET,
-                   "%s TX SQ%d.%d", c->ifp->if_xname, c->ix, tc);
-#endif
-       }
        snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
        mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
            buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
@@ -1222,10 +1191,6 @@ mlx5e_create_sq(struct mlx5e_channel *c,
 
        return (0);
 
-err_free_drbr:
-       buf_ring_free(sq->br, M_MLX5EN);
-err_free_sq_db:
-       mlx5e_free_sq_db(sq);
 err_sq_wq_destroy:
        mlx5_wq_destroy(&sq->wq_ctrl);
 
@@ -1247,12 +1212,6 @@ mlx5e_destroy_sq(struct mlx5e_sq *sq)
        mlx5e_free_sq_db(sq);
        mlx5_wq_destroy(&sq->wq_ctrl);
        mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar);
-       if (sq->sq_tq != NULL) {
-               taskqueue_drain(sq->sq_tq, &sq->sq_task);
-               taskqueue_free(sq->sq_tq);
-       }
-       if (sq->br != NULL)
-               buf_ring_free(sq->br, M_MLX5EN);
 }
 
 int
@@ -1350,8 +1309,6 @@ mlx5e_open_sq(struct mlx5e_channel *c,
        err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
        if (err)
                goto err_disable_sq;
-
-       WRITE_ONCE(sq->queue_state, MLX5E_SQ_READY);
 
        return (0);
 

Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c      Wed Dec  5 14:20:26 2018        
(r341577)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c      Wed Dec  5 14:20:57 2018        
(r341578)
@@ -249,10 +249,6 @@ mlx5e_get_header_size(struct mbuf *mb)
        return (eth_hdr_len);
 }
 
-/*
- * The return value is not going back to the stack because of
- * the drbr
- */
 static int
 mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
 {
@@ -269,13 +265,9 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
        u16 pi;
        u8 opcode;
 
-       /*
-        * Return ENOBUFS if the queue is full, this may trigger reinsertion
-        * of the mbuf into the drbr (see mlx5e_xmit_locked)
-        */
-       if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) 
{
+       /* Return ENOBUFS if the queue is full */
+       if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS)))
                return (ENOBUFS);
-       }
 
        /* Align SQ edge with NOPs to avoid WQE wrap around */
        pi = ((~sq->pc) & sq->wq.sz_m1);
@@ -497,71 +489,14 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
        atomic_thread_fence_rel();
 
        sq->cc = sqcc;
-
-       if (sq->sq_tq != NULL &&
-           atomic_cmpset_int(&sq->queue_state, MLX5E_SQ_FULL, MLX5E_SQ_READY))
-               taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
 }
 
 static int
 mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
 {
-       struct mbuf *next;
        int err = 0;
 
-       if (likely(mb != NULL)) {
-               /*
-                * If we can't insert mbuf into drbr, try to xmit anyway.
-                * We keep the error we got so we could return that after xmit.
-                */
-               err = drbr_enqueue(ifp, sq->br, mb);
-       }
-
-       /*
-        * Check if the network interface is closed or if the SQ is
-        * being stopped:
-        */
        if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
-           sq->stopped != 0))
-               return (err);
-
-       /* Process the queue */
-       while ((next = drbr_peek(ifp, sq->br)) != NULL) {
-               if (mlx5e_sq_xmit(sq, &next) != 0) {
-                       if (next != NULL) {
-                               drbr_putback(ifp, sq->br, next);
-                               atomic_store_rel_int(&sq->queue_state, 
MLX5E_SQ_FULL);
-                               break;
-                       }
-               }
-               drbr_advance(ifp, sq->br);
-       }
-       /* Check if we need to write the doorbell */
-       if (likely(sq->doorbell.d64 != 0)) {
-               mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
-               sq->doorbell.d64 = 0;
-       }
-       /*
-        * Check if we need to start the event timer which flushes the
-        * transmit ring on timeout:
-        */
-       if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
-           sq->cev_factor != 1)) {
-               /* start the timer */
-               mlx5e_sq_cev_timeout(sq);
-       } else {
-               /* don't send NOPs yet */
-               sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
-       }
-       return (err);
-}
-
-static int
-mlx5e_xmit_locked_no_br(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf 
*mb)
-{
-       int err = 0;
-
-       if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
            sq->stopped != 0)) {
                m_freem(mb);
                return (ENETDOWN);
@@ -624,18 +559,9 @@ mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb)
                return (ENXIO);
        }
 
-       if (unlikely(sq->br == NULL)) {
-               /* rate limited traffic */
-               mtx_lock(&sq->lock);
-               ret = mlx5e_xmit_locked_no_br(ifp, sq, mb);
-               mtx_unlock(&sq->lock);
-       } else if (mtx_trylock(&sq->lock)) {
-               ret = mlx5e_xmit_locked(ifp, sq, mb);
-               mtx_unlock(&sq->lock);
-       } else {
-               ret = drbr_enqueue(ifp, sq->br, mb);
-               taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
-       }
+       mtx_lock(&sq->lock);
+       ret = mlx5e_xmit_locked(ifp, sq, mb);
+       mtx_unlock(&sq->lock);
 
        return (ret);
 }
@@ -649,18 +575,4 @@ mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq)
        mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX);
        mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
        mtx_unlock(&sq->comp_lock);
-}
-
-void
-mlx5e_tx_que(void *context, int pending)
-{
-       struct mlx5e_sq *sq = context;
-       struct ifnet *ifp = sq->ifp;
-
-       if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
-               mtx_lock(&sq->lock);
-               if (!drbr_empty(ifp, sq->br))
-                       mlx5e_xmit_locked(ifp, sq, NULL);
-               mtx_unlock(&sq->lock);
-       }
 }
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to