This implements basic XDP redirect support in mlx5 driver.

Notice that the ndo_xdp_xmit() is NOT implemented, because that API
need some changes that this patchset is working towards.

The main purpose of this patch is have different drivers doing
XDP_REDIRECT to show how different memory models behave in a cross
driver world.

Update(pre-RFCv2 Tariq): Need to DMA unmap page before xdp_do_redirect,
as the return API does not exist yet to to keep this mapped.

Update(pre-RFCv3 Saeed): Don't mix XDP_TX and XDP_REDIRECT flushing,
introduce xdpsq.db.redirect_flush boolian.

Signed-off-by: Jesper Dangaard Brouer <bro...@redhat.com>
Reviewed-by: Tariq Toukan <tar...@mellanox.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en.h    |    1 +
 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c |   27 ++++++++++++++++++++---
 2 files changed, 25 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h 
b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 4c9360b25532..28cc26debeda 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -398,6 +398,7 @@ struct mlx5e_xdpsq {
        struct {
                struct mlx5e_dma_info     *di;
                bool                       doorbell;
+               bool                       redirect_flush;
        } db;
 
        /* read only */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 8cce90dc461d..6dcc3e8fbd3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -236,14 +236,20 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq 
*rq,
        return 0;
 }
 
+static inline void mlx5e_page_dma_unmap(struct mlx5e_rq *rq,
+                                       struct mlx5e_dma_info *dma_info)
+{
+       dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
+                      rq->buff.map_dir);
+}
+
 void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
                        bool recycle)
 {
        if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
                return;
 
-       dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
-                      rq->buff.map_dir);
+       mlx5e_page_dma_unmap(rq, dma_info);
        put_page(dma_info->page);
 }
 
@@ -822,9 +828,10 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
                                   struct mlx5e_dma_info *di,
                                   void *va, u16 *rx_headroom, u32 *len)
 {
-       const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
+       struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
        struct xdp_buff xdp;
        u32 act;
+       int err;
 
        if (!prog)
                return false;
@@ -845,6 +852,15 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
                if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
                        trace_xdp_exception(rq->netdev, prog, act);
                return true;
+       case XDP_REDIRECT:
+               /* When XDP enabled then page-refcnt==1 here */
+               err = xdp_do_redirect(rq->netdev, &xdp, prog);
+               if (!err) {
+                       rq->wqe.xdp_xmit = true; /* XDP xmit owns page */
+                       rq->xdpsq.db.redirect_flush = true;
+                       mlx5e_page_dma_unmap(rq, di);
+               }
+               return true;
        default:
                bpf_warn_invalid_xdp_action(act);
        case XDP_ABORTED:
@@ -1107,6 +1123,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
                xdpsq->db.doorbell = false;
        }
 
+       if (xdpsq->db.redirect_flush) {
+               xdp_do_flush_map();
+               xdpsq->db.redirect_flush = false;
+       }
+
        mlx5_cqwq_update_db_record(&cq->wq);
 
        /* ensure cq space is freed before enabling more cqes */

Reply via email to