In message: [PATCH linux-yocto v5.15/standard/preempt-rt/cn-sdkv5.15/octeon] 
Revert "octeontx2-af: Remove unrelated changes introduced when merging 
v5.15/standard/base"
on 20/08/2024 Kevin Hao wrote:

> From: Kevin Hao <kexin....@windriver.com>
> 
> This reverts commit e6a43185a1bd93c7763fffb285619ee3ba4ec407.
> The changes in commit e6a43185a1bd are only irrelevant for the
> non-octeon branches. They are necessary for octeon branches.
> 
> Signed-off-by: Kevin Hao <kexin....@windriver.com>
> ---
> Hi Bruce,
> 
> Please merge this on v5.15/standard/preempt-rt/cn-sdkv5.15/octeon branch.

done.

Bruce

> ---
>  .../net/ethernet/marvell/octeontx2/af/rvu_nix.c    | 1962 
> ++++++++++++++++++--
>  1 file changed, 1850 insertions(+), 112 deletions(-)
> 
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 
> b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
> index 641f1d969bb7..19a59f8dbf4d 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
> @@ -12,8 +12,10 @@
>  #include "rvu_reg.h"
>  #include "rvu.h"
>  #include "npc.h"
> +#include "mcs.h"
>  #include "cgx.h"
>  #include "lmac_common.h"
> +#include "rvu_npc_hash.h"
>  
>  static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
>  static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
> @@ -70,12 +72,19 @@ enum nix_makr_fmt_indexes {
>  /* For now considering MC resources needed for broadcast
>   * pkt replication only. i.e 256 HWVFs + 12 PFs.
>   */
> -#define MC_TBL_SIZE  MC_TBL_SZ_512
> -#define MC_BUF_CNT   MC_BUF_CNT_128
> +#define MC_TBL_SIZE  MC_TBL_SZ_2K
> +#define MC_BUF_CNT   MC_BUF_CNT_1024
> +
> +#define MC_TX_MAX    2048
>  
>  struct mce {
>       struct hlist_node       node;
> +     u32                     rq_rss_index;
>       u16                     pcifunc;
> +     u16                     channel;
> +     u8                      dest_type;
> +     u8                      is_active;
> +     u8                      reserved[2];
>  };
>  
>  int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
> @@ -163,18 +172,33 @@ static void nix_mce_list_init(struct nix_mce_list 
> *list, int max)
>       list->max = max;
>  }
>  
> -static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
> +static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir)
>  {
> +     struct rsrc_bmap *mce_counter;
>       int idx;
>  
>       if (!mcast)
> -             return 0;
> +             return -EINVAL;
>  
> -     idx = mcast->next_free_mce;
> -     mcast->next_free_mce += count;
> +     mce_counter = &mcast->mce_counter[dir];
> +     if (!rvu_rsrc_check_contig(mce_counter, count))
> +             return -ENOSPC;
> +
> +     idx = rvu_alloc_rsrc_contig(mce_counter, count);
>       return idx;
>  }
>  
> +static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, 
> u8 dir)
> +{
> +     struct rsrc_bmap *mce_counter;
> +
> +     if (!mcast)
> +             return;
> +
> +     mce_counter = &mcast->mce_counter[dir];
> +     rvu_free_rsrc_contig(mce_counter, count, start);
> +}
> +
>  struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
>  {
>       int nix_blkaddr = 0, i = 0;
> @@ -190,6 +214,18 @@ struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int 
> blkaddr)
>       return NULL;
>  }
>  
> +int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type)
> +{
> +     if (hw->cap.nix_multiple_dwrr_mtu)
> +             return NIX_AF_DWRR_MTUX(smq_link_type);
> +
> +     if (smq_link_type == SMQ_LINK_TYPE_SDP)
> +             return NIX_AF_DWRR_SDP_MTU;
> +
> +     /* Here it's same reg for RPM and LBK */
> +     return NIX_AF_DWRR_RPM_MTU;
> +}
> +
>  u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
>  {
>       dwrr_mtu &= 0x1FULL;
> @@ -322,8 +358,11 @@ static int nix_interface_init(struct rvu *rvu, u16 
> pcifunc, int type, int nixlf,
>               pfvf->tx_chan_cnt = 1;
>               rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
>  
> -             cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
> -             rvu_npc_set_pkind(rvu, pkind, pfvf);
> +             if (rvu_cgx_is_pkind_config_permitted(rvu, pcifunc)) {
> +                     cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
> +                                   pkind);
> +                     rvu_npc_set_pkind(rvu, pkind, pfvf);
> +             }
>  
>               break;
>       case NIX_INTF_TYPE_LBK:
> @@ -463,14 +502,190 @@ static void nix_interface_deinit(struct rvu *rvu, u16 
> pcifunc, u8 nixlf)
>       rvu_cgx_disable_dmac_entries(rvu, pcifunc);
>  }
>  
> -int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
> -                                 struct nix_bp_cfg_req *req,
> +#define NIX_BPIDS_PER_LMAC   8
> +#define NIX_BPIDS_PER_CPT    1
> +static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr)
> +{
> +     struct nix_bp *bp = &hw->bp;
> +     int err, max_bpids;
> +     u64 cfg;
> +
> +     cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
> +     max_bpids = (cfg >> 12) & 0xFFF;
> +
> +     /* Reserve the BPIds for CGX and SDP */
> +     bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC;
> +     bp->sdp_bpid_cnt = rvu->hw->sdp_links * (cfg & 0xFFF);
> +     bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt +
> +                          NIX_BPIDS_PER_CPT;
> +     bp->bpids.max = max_bpids - bp->free_pool_base;
> +
> +     err = rvu_alloc_bitmap(&bp->bpids);
> +     if (err)
> +             return err;
> +
> +     bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max,
> +                               sizeof(u16), GFP_KERNEL);
> +     if (!bp->fn_map)
> +             return -ENOMEM;
> +
> +     bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max,
> +                                 sizeof(u8), GFP_KERNEL);
> +     if (!bp->intf_map)
> +             return -ENOMEM;
> +
> +     bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max,
> +                                sizeof(u8), GFP_KERNEL);
> +     if (!bp->ref_cnt)
> +             return -ENOMEM;
> +
> +     return 0;
> +}
> +
> +void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
> +{
> +     int blkaddr, bpid, err;
> +     struct nix_hw *nix_hw;
> +     struct nix_bp *bp;
> +
> +     if (!is_afvf(pcifunc))
> +             return;
> +
> +     err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
> +     if (err)
> +             return;
> +
> +     bp = &nix_hw->bp;
> +
> +     mutex_lock(&rvu->rsrc_lock);
> +     for (bpid = 0; bpid < bp->bpids.max; bpid++) {
> +             if (bp->fn_map[bpid] == pcifunc) {
> +                     bp->ref_cnt[bpid]--;
> +                     if (bp->ref_cnt[bpid])
> +                             continue;
> +                     rvu_free_rsrc(&bp->bpids, bpid);
> +                     bp->fn_map[bpid] = 0;
> +             }
> +     }
> +     mutex_unlock(&rvu->rsrc_lock);
> +}
> +
> +int rvu_mbox_handler_nix_rx_chan_cfg(struct rvu *rvu,
> +                                  struct nix_rx_chan_cfg *req,
> +                                  struct nix_rx_chan_cfg *rsp)
> +{
> +     struct rvu_pfvf *pfvf;
> +     int blkaddr;
> +     u16 chan;
> +
> +     pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
> +     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
> +     chan = pfvf->rx_chan_base + req->chan;
> +
> +     if (req->type == NIX_INTF_TYPE_CPT)
> +             chan = chan | BIT(11);
> +
> +     if (req->read) {
> +             rsp->val = rvu_read64(rvu, blkaddr,
> +                                   NIX_AF_RX_CHANX_CFG(chan));
> +             rsp->chan = req->chan;
> +     } else {
> +             rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), req->val);
> +     }
> +     return 0;
> +}
> +
> +int rvu_mbox_handler_nix_alloc_bpids(struct rvu *rvu,
> +                                  struct nix_alloc_bpid_req *req,
> +                                  struct nix_bpids *rsp)
> +{
> +     u16 pcifunc = req->hdr.pcifunc;
> +     struct nix_hw *nix_hw;
> +     int blkaddr, cnt = 0;
> +     struct nix_bp *bp;
> +     int bpid, err;
> +
> +     err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
> +     if (err)
> +             return err;
> +
> +     bp = &nix_hw->bp;
> +
> +     /* For interface like sso uses same bpid across multiple
> +      * application. Find the bpid is it already allocate or
> +      * allocate a new one.
> +      */
> +     mutex_lock(&rvu->rsrc_lock);
> +     if (req->type > NIX_INTF_TYPE_CPT || req->type == NIX_INTF_TYPE_LBK) {
> +             for (bpid = 0; bpid < bp->bpids.max; bpid++) {
> +                     if (bp->intf_map[bpid] == req->type) {
> +                             rsp->bpids[cnt] = bpid + bp->free_pool_base;
> +                             rsp->bpid_cnt++;
> +                             bp->ref_cnt[bpid]++;
> +                             cnt++;
> +                     }
> +             }
> +             if (rsp->bpid_cnt)
> +                     goto exit;
> +     }
> +
> +     for (cnt = 0; cnt < req->bpid_cnt; cnt++) {
> +             bpid = rvu_alloc_rsrc(&bp->bpids);
> +             if (bpid < 0)
> +                     goto exit;
> +             rsp->bpids[cnt] = bpid + bp->free_pool_base;
> +             bp->intf_map[bpid] = req->type;
> +             bp->fn_map[bpid] = pcifunc;
> +             bp->ref_cnt[bpid]++;
> +             rsp->bpid_cnt++;
> +     }
> +exit:
> +     mutex_unlock(&rvu->rsrc_lock);
> +     return 0;
> +}
> +
> +int rvu_mbox_handler_nix_free_bpids(struct rvu *rvu,
> +                                 struct nix_bpids *req,
>                                   struct msg_rsp *rsp)
>  {
>       u16 pcifunc = req->hdr.pcifunc;
> +     int blkaddr, cnt, err, id;
> +     struct nix_hw *nix_hw;
> +     struct nix_bp *bp;
> +     u16 bpid;
> +
> +     err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
> +     if (err)
> +             return err;
> +
> +     bp = &nix_hw->bp;
> +     mutex_lock(&rvu->rsrc_lock);
> +     for (cnt = 0; cnt < req->bpid_cnt; cnt++) {
> +             bpid = req->bpids[cnt] - bp->free_pool_base;
> +             bp->ref_cnt[bpid]--;
> +             if (bp->ref_cnt[bpid])
> +                     continue;
> +             rvu_free_rsrc(&bp->bpids, bpid);
> +             for (id = 0; id < bp->bpids.max; id++) {
> +                     if (bp->fn_map[id] == pcifunc)
> +                             bp->fn_map[id] = 0;
> +             }
> +     }
> +     mutex_unlock(&rvu->rsrc_lock);
> +     return 0;
> +}
> +
> +static int nix_bp_disable(struct rvu *rvu,
> +                       struct nix_bp_cfg_req *req,
> +                       struct msg_rsp *rsp, bool cpt_link)
> +{
> +     u16 pcifunc = req->hdr.pcifunc;
> +     int blkaddr, pf, type, err;
>       struct rvu_pfvf *pfvf;
> -     int blkaddr, pf, type;
> +     struct nix_hw *nix_hw;
>       u16 chan_base, chan;
> +     struct nix_bp *bp;
> +     u16 chan_v, bpid;
>       u64 cfg;
>  
>       pf = rvu_get_pf(pcifunc);
> @@ -478,41 +693,89 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
>       if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
>               return 0;
>  
> -     pfvf = rvu_get_pfvf(rvu, pcifunc);
> -     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
> +     if (is_sdp_pfvf(pcifunc))
> +             type = NIX_INTF_TYPE_SDP;
>  
> +     if (cpt_link && !rvu->hw->cpt_links)
> +             return 0;
> +
> +     pfvf = rvu_get_pfvf(rvu, pcifunc);
> +     err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
> +     if (err)
> +             return err;
> +
> +     bp = &nix_hw->bp;
>       chan_base = pfvf->rx_chan_base + req->chan_base;
> +
> +     if (cpt_link) {
> +             type = NIX_INTF_TYPE_CPT;
> +             cfg = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
> +             /* MODE=0 or MODE=1 => CPT looks only channels starting from 
> cpt chan base */
> +             cfg = (cfg >> 20) & 0x3;
> +             if (cfg != 2)
> +                     chan_base = rvu->hw->cpt_chan_base;
> +     }
> +
>       for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
> -             cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
> -             rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
> +             /* CPT channel for a given link channel is always
> +              * assumed to be BIT(11) set in link channel.
> +              */
> +             if (cpt_link)
> +                     chan_v = chan | BIT(11);
> +             else
> +                     chan_v = chan;
> +
> +             cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
> +             rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
>                           cfg & ~BIT_ULL(16));
> +
> +             if (type == NIX_INTF_TYPE_LBK) {
> +                     bpid = cfg & GENMASK(8, 0);
> +                     mutex_lock(&rvu->rsrc_lock);
> +                     rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base);
> +                     for (bpid = 0; bpid < bp->bpids.max; bpid++) {
> +                             if (bp->fn_map[bpid] == pcifunc) {
> +                                     bp->fn_map[bpid] = 0;
> +                                     bp->ref_cnt[bpid] = 0;
> +                             }
> +                     }
> +                     mutex_unlock(&rvu->rsrc_lock);
> +             }
>       }
>       return 0;
>  }
>  
> +int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
> +                                 struct nix_bp_cfg_req *req,
> +                                 struct msg_rsp *rsp)
> +{
> +     return nix_bp_disable(rvu, req, rsp, false);
> +}
> +
> +int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu,
> +                                     struct nix_bp_cfg_req *req,
> +                                     struct msg_rsp *rsp)
> +{
> +     return nix_bp_disable(rvu, req, rsp, true);
> +}
> +
>  static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
>                           int type, int chan_id)
>  {
> -     int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
> -     u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
> +     int bpid, blkaddr, sdp_chan_base, err;
>       struct rvu_hwinfo *hw = rvu->hw;
>       struct rvu_pfvf *pfvf;
> +     struct nix_hw *nix_hw;
>       u8 cgx_id, lmac_id;
> -     u64 cfg;
> -
> -     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
> -     cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
> -     lmac_chan_cnt = cfg & 0xFF;
> -
> -     cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
> -     lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
> -
> -     cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
> -     sdp_chan_cnt = cfg & 0xFFF;
> -     sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
> +     struct nix_bp *bp;
>  
>       pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
>  
> +     err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
> +     if (err)
> +             return err;
> +
> +     bp = &nix_hw->bp;
>       /* Backpressure IDs range division
>        * CGX channles are mapped to (0 - 191) BPIDs
>        * LBK channles are mapped to (192 - 255) BPIDs
> @@ -525,38 +788,52 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct 
> nix_bp_cfg_req *req,
>        */
>       switch (type) {
>       case NIX_INTF_TYPE_CGX:
> -             if ((req->chan_base + req->chan_cnt) > 15)
> -                     return -EINVAL;
> +             if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC)
> +                     return NIX_AF_ERR_INVALID_BPID_REQ;
> +
>               rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
>               /* Assign bpid based on cgx, lmac and chan id */
> -             bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
> -                     (lmac_id * lmac_chan_cnt) + req->chan_base;
> +             bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) +
> +                     (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base;
>  
>               if (req->bpid_per_chan)
>                       bpid += chan_id;
> -             if (bpid > cgx_bpid_cnt)
> -                     return -EINVAL;
> +             if (bpid > bp->cgx_bpid_cnt)
> +                     return NIX_AF_ERR_INVALID_BPID;
> +             break;
> +     case NIX_INTF_TYPE_CPT:
> +             bpid = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt;
>               break;
> -
>       case NIX_INTF_TYPE_LBK:
> -             if ((req->chan_base + req->chan_cnt) > 63)
> -                     return -EINVAL;
> -             bpid = cgx_bpid_cnt + req->chan_base;
> -             if (req->bpid_per_chan)
> -                     bpid += chan_id;
> -             if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
> -                     return -EINVAL;
> +             /* Alloc bpid from the free pool */
> +             mutex_lock(&rvu->rsrc_lock);
> +             bpid = rvu_alloc_rsrc(&bp->bpids);
> +             if (bpid < 0) {
> +                     mutex_unlock(&rvu->rsrc_lock);
> +                     return NIX_AF_ERR_INVALID_BPID;
> +             }
> +             bp->fn_map[bpid] = req->hdr.pcifunc;
> +             bp->ref_cnt[bpid]++;
> +             bpid += bp->free_pool_base;
> +             mutex_unlock(&rvu->rsrc_lock);
>               break;
>       case NIX_INTF_TYPE_SDP:
> -             if ((req->chan_base + req->chan_cnt) > 255)
> -                     return -EINVAL;
> +             if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt)
> +                     return NIX_AF_ERR_INVALID_BPID_REQ;
> +
> +             /* Handle usecase of 2 SDP blocks */
> +             if (!hw->cap.programmable_chans)
> +                     sdp_chan_base = pfvf->rx_chan_base - 
> NIX_CHAN_SDP_CH_START;
> +             else
> +                     sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base;
> +
> +             bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base;
>  
> -             bpid = sdp_bpid_cnt + req->chan_base;
>               if (req->bpid_per_chan)
>                       bpid += chan_id;
>  
> -             if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
> -                     return -EINVAL;
> +             if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt))
> +                     return NIX_AF_ERR_INVALID_BPID;
>               break;
>       default:
>               return -EINVAL;
> @@ -564,15 +841,17 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct 
> nix_bp_cfg_req *req,
>       return bpid;
>  }
>  
> -int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
> -                                struct nix_bp_cfg_req *req,
> -                                struct nix_bp_cfg_rsp *rsp)
> +static int nix_bp_enable(struct rvu *rvu,
> +                      struct nix_bp_cfg_req *req,
> +                      struct nix_bp_cfg_rsp *rsp,
> +                      bool cpt_link)
>  {
>       int blkaddr, pf, type, chan_id = 0;
>       u16 pcifunc = req->hdr.pcifunc;
> +     s16 bpid, bpid_base = -1;
>       struct rvu_pfvf *pfvf;
>       u16 chan_base, chan;
> -     s16 bpid, bpid_base;
> +     u16 chan_v;
>       u64 cfg;
>  
>       pf = rvu_get_pf(pcifunc);
> @@ -585,25 +864,46 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
>           type != NIX_INTF_TYPE_SDP)
>               return 0;
>  
> +     if (cpt_link && !rvu->hw->cpt_links)
> +             return 0;
> +
>       pfvf = rvu_get_pfvf(rvu, pcifunc);
>       blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
>  
> -     bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
>       chan_base = pfvf->rx_chan_base + req->chan_base;
> -     bpid = bpid_base;
> +
> +     if (cpt_link) {
> +             type = NIX_INTF_TYPE_CPT;
> +             cfg = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
> +             /* MODE=0 or MODE=1 => CPT looks only channels starting from 
> cpt chan base */
> +             cfg = (cfg >> 20) & 0x3;
> +             if (cfg != 2)
> +                     chan_base = rvu->hw->cpt_chan_base;
> +     }
>  
>       for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
> +             bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
>               if (bpid < 0) {
>                       dev_warn(rvu->dev, "Fail to enable backpressure\n");
>                       return -EINVAL;
>               }
> +             if (bpid_base < 0)
> +                     bpid_base = bpid;
>  
> -             cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
> +             /* CPT channel for a given link channel is always
> +              * assumed to be BIT(11) set in link channel.
> +              */
> +
> +             if (cpt_link)
> +                     chan_v = chan | BIT(11);
> +             else
> +                     chan_v = chan;
> +
> +             cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
>               cfg &= ~GENMASK_ULL(8, 0);
> -             rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
> +             rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
>                           cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
>               chan_id++;
> -             bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
>       }
>  
>       for (chan = 0; chan < req->chan_cnt; chan++) {
> @@ -618,6 +918,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
>       return 0;
>  }
>  
> +int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
> +                                struct nix_bp_cfg_req *req,
> +                                struct nix_bp_cfg_rsp *rsp)
> +{
> +     return nix_bp_enable(rvu, req, rsp, false);
> +}
> +
> +int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu,
> +                                    struct nix_bp_cfg_req *req,
> +                                    struct nix_bp_cfg_rsp *rsp)
> +{
> +     return nix_bp_enable(rvu, req, rsp, true);
> +}
> +
>  static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
>                                u64 format, bool v4, u64 *fidx)
>  {
> @@ -782,17 +1096,51 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int 
> blkaddr,
>       return 0;
>  }
>  
> +static void nix_aq_reset(struct rvu *rvu, struct rvu_block *block)
> +{
> +     struct admin_queue *aq = block->aq;
> +     u64 reg, head, tail;
> +     int timeout = 2000;
> +
> +     /* check if any AQ err is set and reset the AQ */
> +     reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
> +     head = (reg >> 4) & AQ_PTR_MASK;
> +     tail = (reg >> 36) & AQ_PTR_MASK;
> +     dev_err(rvu->dev, "AQ error occurred head:0x%llx tail:%llx 
> status:%llx\n", head, tail, reg);
> +
> +     /* Check if busy bit is set */
> +     while (reg & BIT_ULL(62)) {
> +             udelay(1);
> +             timeout--;
> +             if (!timeout)
> +                     dev_err(rvu->dev, "timeout waiting for busy bit to 
> clear\n");
> +     }
> +     /*reset the AQ base and result */
> +     memset(aq->inst->base, 0, sizeof(struct nix_aq_inst_s) * 
> Q_COUNT(AQ_SIZE));
> +     memset(aq->res->base, 0, sizeof(struct nix_aq_res_s) * 
> Q_COUNT(AQ_SIZE));
> +     /* Make sure the AQ memry is reset */
> +     wmb();
> +     reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
> +     reg |= BIT_ULL(63);
> +     rvu_write64(rvu, block->addr, NIX_AF_AQ_STATUS, reg);
> +     dev_info(rvu->dev, "AQ status after reset:0x%llx\n",
> +              rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS));
> +}
> +
>  static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
>                              struct nix_aq_inst_s *inst)
>  {
>       struct admin_queue *aq = block->aq;
>       struct nix_aq_res_s *result;
> -     int timeout = 1000;
> -     u64 reg, head;
> +     u64 reg, head, intr;
> +     int timeout = 2000;
>       int ret;
>  
> -     result = (struct nix_aq_res_s *)aq->res->base;
> +     reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
> +     if (reg & BIT_ULL(63))
> +             nix_aq_reset(rvu, block);
>  
> +     result = (struct nix_aq_res_s *)aq->res->base;
>       /* Get current head pointer where to append this instruction */
>       reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
>       head = (reg >> 4) & AQ_PTR_MASK;
> @@ -806,18 +1154,28 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct 
> rvu_block *block,
>       /* Ring the doorbell and wait for result */
>       rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
>       while (result->compcode == NIX_AQ_COMP_NOTDONE) {
> +             intr = rvu_read64(rvu, block->addr, NIX_AF_ERR_INT);
>               cpu_relax();
>               udelay(1);
>               timeout--;
> -             if (!timeout)
> +             if (!timeout) {
> +                     dev_err_ratelimited(rvu->dev,
> +                                         "%s wait timeout intr=0x%llx 
> status=0x%llx compcode:%d\n",
> +                                         __func__, intr,
> +                                        rvu_read64(rvu, block->addr, 
> NIX_AF_AQ_STATUS),
> +                                         result->compcode);
>                       return -EBUSY;
> +             }
>       }
>  
>       if (result->compcode != NIX_AQ_COMP_GOOD) {
>               /* TODO: Replace this with some error code */
> +             dev_err(rvu->dev, "AQ failed with error:%d\n", 
> result->compcode);
>               if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
>                   result->compcode == NIX_AQ_COMP_LOCKERR ||
>                   result->compcode == NIX_AQ_COMP_CTX_POISON) {
> +                     dev_err(rvu->dev, "AQ failed due to cache line 
> error:%d\n",
> +                             result->compcode);
>                       ret = rvu_ndc_fix_locked_cacheline(rvu, 
> BLKADDR_NDC_NIX0_RX);
>                       ret |= rvu_ndc_fix_locked_cacheline(rvu, 
> BLKADDR_NDC_NIX0_TX);
>                       ret |= rvu_ndc_fix_locked_cacheline(rvu, 
> BLKADDR_NDC_NIX1_RX);
> @@ -1210,7 +1568,9 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct 
> hwctx_disable_req *req)
>               aq_req.cq.ena = 0;
>               aq_req.cq_mask.ena = 1;
>               aq_req.cq.bp_ena = 0;
> +             aq_req.cq.lbp_ena = 0;
>               aq_req.cq_mask.bp_ena = 1;
> +             aq_req.cq_mask.lbp_ena = 1;
>               q_cnt = pfvf->cq_ctx->qsize;
>               bmap = pfvf->cq_bmap;
>       }
> @@ -1292,6 +1652,8 @@ int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
>       return rvu_nix_aq_enq_inst(rvu, req, rsp);
>  }
>  #endif
> +EXPORT_SYMBOL(rvu_mbox_handler_nix_aq_enq);
> +
>  /* CN10K mbox handler */
>  int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
>                                     struct nix_cn10k_aq_enq_req *req,
> @@ -1300,6 +1662,7 @@ int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
>       return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
>                                 (struct nix_aq_enq_rsp *)rsp);
>  }
> +EXPORT_SYMBOL(rvu_mbox_handler_nix_cn10k_aq_enq);
>  
>  int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
>                                      struct hwctx_disable_req *req,
> @@ -1313,10 +1676,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
>                                 struct nix_lf_alloc_rsp *rsp)
>  {
>       int nixlf, qints, hwctx_size, intf, err, rc = 0;
> +     struct rvu_pfvf *pfvf, *parent_pf;
>       struct rvu_hwinfo *hw = rvu->hw;
>       u16 pcifunc = req->hdr.pcifunc;
>       struct rvu_block *block;
> -     struct rvu_pfvf *pfvf;
>       u64 cfg, ctx_cfg;
>       int blkaddr;
>  
> @@ -1326,6 +1689,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
>       if (req->way_mask)
>               req->way_mask &= 0xFFFF;
>  
> +     parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
>       pfvf = rvu_get_pfvf(rvu, pcifunc);
>       blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
>       if (!pfvf->nixlf || blkaddr < 0)
> @@ -1484,8 +1848,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
>       rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
>  
>       /* Configure pkind for TX parse config */
> -     cfg = NPC_TX_DEF_PKIND;
> -     rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
> +     if (rvu_cgx_is_pkind_config_permitted(rvu, pcifunc)) {
> +             cfg = NPC_TX_DEF_PKIND;
> +             rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
> +     }
>  
>       intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
>       if (is_sdp_pfvf(pcifunc))
> @@ -1503,6 +1869,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
>       rvu_write64(rvu, blkaddr,
>                   NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
>                   VTAGSIZE_T4 | VTAG_STRIP);
> +     /* Configure RX VTAG Type 6 (strip) for fdsa */
> +     rvu_write64(rvu, blkaddr,
> +                 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE6),
> +                 VTAGSIZE_T4 | VTAG_STRIP | VTAG_CAPTURE);
>  
>       goto exit;
>  
> @@ -1531,6 +1901,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
>       cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
>       rsp->qints = ((cfg >> 12) & 0xFFF);
>       rsp->cints = ((cfg >> 24) & 0xFFF);
> +     rsp->hw_rx_tstamp_en = parent_pf->hw_rx_tstamp_en;
>       rsp->cgx_links = hw->cgx_links;
>       rsp->lbk_links = hw->lbk_links;
>       rsp->sdp_links = hw->sdp_links;
> @@ -1562,6 +1933,9 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, 
> struct nix_lf_free_req *req,
>       else
>               rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
>  
> +     /* Reset SPI to SA index table */
> +     rvu_nix_free_spi_to_sa_table(rvu, pcifunc);
> +
>       /* Free any tx vtag def entries used by this NIX LF */
>       if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
>               nix_free_tx_vtag_entries(rvu, pcifunc);
> @@ -1707,6 +2081,42 @@ handle_txschq_shaper_update(struct rvu *rvu, int 
> blkaddr, int nixlf,
>       return true;
>  }
>  
> +static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
> +                               int lvl, int schq)
> +{
> +     u64 tlx_parent = 0, tlx_schedule = 0;
> +
> +     switch (lvl) {
> +     case NIX_TXSCH_LVL_TL2:
> +             tlx_parent   = NIX_AF_TL2X_PARENT(schq);
> +             tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
> +             break;
> +     case NIX_TXSCH_LVL_TL3:
> +             tlx_parent   = NIX_AF_TL3X_PARENT(schq);
> +             tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
> +             break;
> +     case NIX_TXSCH_LVL_TL4:
> +             tlx_parent   = NIX_AF_TL4X_PARENT(schq);
> +             tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
> +             break;
> +     case NIX_TXSCH_LVL_MDQ:
> +             /* no need to reset SMQ_CFG as HW clears this CSR
> +              * on SMQ flush
> +              */
> +             tlx_parent   = NIX_AF_MDQX_PARENT(schq);
> +             tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
> +             break;
> +     default:
> +             return;
> +     }
> +
> +     if (tlx_parent)
> +             rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
> +
> +     if (tlx_schedule)
> +             rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
> +}
> +
>  /* Disable shaping of pkts by a scheduler queue
>   * at a given scheduler level.
>   */
> @@ -1996,6 +2406,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
>  {
>       struct rvu_hwinfo *hw = rvu->hw;
>       u16 pcifunc = req->hdr.pcifunc;
> +     struct rvu_pfvf *parent_pf;
>       int link, blkaddr, rc = 0;
>       int lvl, idx, start, end;
>       struct nix_txsch *txsch;
> @@ -2012,6 +2423,8 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
>       if (!nix_hw)
>               return NIX_AF_ERR_INVALID_NIXBLK;
>  
> +     parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
> +
>       mutex_lock(&rvu->rsrc_lock);
>  
>       /* Check if request is valid as per HW capabilities
> @@ -2056,6 +2469,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
>                               pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
>                       nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
>                       nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
> +                     nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
>               }
>  
>               for (idx = 0; idx < req->schq[lvl]; idx++) {
> @@ -2065,11 +2479,12 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
>                               pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
>                       nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
>                       nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
> +                     nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
>               }
>       }
>  
>       rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
> -     rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
> +     rsp->aggr_lvl_rr_prio = parent_pf->tl1_rr_prio;
>       rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
>                                      NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
>                                      NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
> @@ -2081,13 +2496,156 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
>       return rc;
>  }
>  
> +static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
> +                                struct nix_smq_flush_ctx *smq_flush_ctx)
> +{
> +     struct nix_smq_tree_ctx *smq_tree_ctx;
> +     u64 parent_off, regval;
> +     u16 schq;
> +     int lvl;
> +
> +     smq_flush_ctx->smq = smq;
> +
> +     schq = smq;
> +     for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
> +             smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
> +             if (lvl == NIX_TXSCH_LVL_TL1) {
> +                     smq_flush_ctx->tl1_schq = schq;
> +                     smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
> +                     smq_tree_ctx->pir_off = 0;
> +                     smq_tree_ctx->pir_val = 0;
> +                     parent_off = 0;
> +             } else if (lvl == NIX_TXSCH_LVL_TL2) {
> +                     smq_flush_ctx->tl2_schq = schq;
> +                     smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
> +                     smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
> +                     parent_off = NIX_AF_TL2X_PARENT(schq);
> +             } else if (lvl == NIX_TXSCH_LVL_TL3) {
> +                     smq_flush_ctx->tl3_schq = schq;
> +                     smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
> +                     smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
> +                     parent_off = NIX_AF_TL3X_PARENT(schq);
> +             } else if (lvl == NIX_TXSCH_LVL_TL4) {
> +                     smq_flush_ctx->tl4_schq = schq;
> +                     smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
> +                     smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
> +                     parent_off = NIX_AF_TL4X_PARENT(schq);
> +             } else if (lvl == NIX_TXSCH_LVL_MDQ) {
> +                     smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
> +                     smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
> +                     parent_off = NIX_AF_MDQX_PARENT(schq);
> +             }
> +             /* save cir/pir register values */
> +             smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, 
> smq_tree_ctx->cir_off);
> +             if (smq_tree_ctx->pir_off)
> +                     smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, 
> smq_tree_ctx->pir_off);
> +
> +             /* get parent txsch node */
> +             if (parent_off) {
> +                     regval = rvu_read64(rvu, blkaddr, parent_off);
> +                     schq = (regval >> 16) & 0x1FF;
> +             }
> +     }
> +}
> +
> +static void nix_dump_smq_status(struct rvu *rvu, int blkaddr, struct 
> nix_smq_flush_ctx *ctx)
> +{
> +     dev_info(rvu->dev, "smq:%d tl1_schq:%d tl2:%d tl3:%d tl4:%d\n", 
> ctx->smq, ctx->tl1_schq,
> +              ctx->tl2_schq, ctx->tl3_schq, ctx->tl4_schq);
> +
> +     dev_info(rvu->dev, "NIX_AF_SMQX_CFG:0x%llx\n",
> +              rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(ctx->smq)));
> +     dev_info(rvu->dev, "NIX_AF_SMQX_STATUS:0x%llx\n",
> +              rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(ctx->smq)));
> +     dev_info(rvu->dev, "NIX_AF_MDQX_MD_COUNT:0x%llx\n",
> +              rvu_read64(rvu, blkaddr, NIX_AF_MDQX_MD_COUNT));
> +     dev_info(rvu->dev, "NIX_AF_MDQX_IN_MD_COUNT:0x%llx\n",
> +              rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(ctx->smq)));
> +     dev_info(rvu->dev, "NIX_AF_MDQX_OUT_MD_COUNT:0x%llx\n",
> +              rvu_read64(rvu, blkaddr, NIX_AF_MDQX_OUT_MD_COUNT(ctx->smq)));
> +     dev_info(rvu->dev, "NIX_AF_TL1X_SW_XOFF:0x%llx\n",
> +              rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(ctx->tl1_schq)));
> +     dev_info(rvu->dev, "NIX_AF_TL2X_SW_XOFF=0x%llx\n",
> +              rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(ctx->tl2_schq)));
> +}
> +
> +static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
> +                                   struct nix_smq_flush_ctx *smq_flush_ctx, 
> bool enable)
> +{
> +     struct nix_txsch *txsch;
> +     struct nix_hw *nix_hw;
> +     u64 regoff;
> +     int tl2;
> +
> +     nix_hw = get_nix_hw(rvu->hw, blkaddr);
> +     if (!nix_hw)
> +             return;
> +
> +     /* loop through all TL2s with matching PF_FUNC */
> +     txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
> +     for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
> +             /* skip the smq(flush) TL2 */
> +             if (tl2 == smq_flush_ctx->tl2_schq)
> +                     continue;
> +             /* skip unused TL2s */
> +             if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
> +                     continue;
> +             /* skip if PF_FUNC doesn't match */
> +             if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & 
> ~RVU_PFVF_FUNC_MASK) !=
> +                 (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
> +                                 ~RVU_PFVF_FUNC_MASK)))
> +                     continue;
> +             /* enable/disable XOFF */
> +             regoff = NIX_AF_TL2X_SW_XOFF(tl2);
> +             if (enable)
> +                     rvu_write64(rvu, blkaddr, regoff, 0x1);
> +             else
> +                     rvu_write64(rvu, blkaddr, regoff, 0x0);
> +     }
> +}
> +
> +static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
> +                                   struct nix_smq_flush_ctx *smq_flush_ctx, 
> bool enable)
> +{
> +     u64 cir_off, pir_off, cir_val, pir_val;
> +     struct nix_smq_tree_ctx *smq_tree_ctx;
> +     int lvl;
> +
> +     for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
> +             smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
> +             cir_off = smq_tree_ctx->cir_off;
> +             cir_val = smq_tree_ctx->cir_val;
> +             pir_off = smq_tree_ctx->pir_off;
> +             pir_val = smq_tree_ctx->pir_val;
> +
> +             if (enable) {
> +                     rvu_write64(rvu, blkaddr, cir_off, cir_val);
> +                     if (lvl != NIX_TXSCH_LVL_TL1)
> +                             rvu_write64(rvu, blkaddr, pir_off, pir_val);
> +             } else {
> +                     rvu_write64(rvu, blkaddr, cir_off, 0x0);
> +                     if (lvl != NIX_TXSCH_LVL_TL1)
> +                             rvu_write64(rvu, blkaddr, pir_off, 0x0);
> +             }
> +     }
> +}
> +
>  static int nix_smq_flush(struct rvu *rvu, int blkaddr,
>                        int smq, u16 pcifunc, int nixlf)
>  {
> +     struct nix_smq_flush_ctx *smq_flush_ctx;
>       int pf = rvu_get_pf(pcifunc);
>       u8 cgx_id = 0, lmac_id = 0;
>       int err, restore_tx_en = 0;
>       u64 cfg;
> +     u8 link;
> +
> +     if (!is_rvu_otx2(rvu)) {
> +             /* Skip SMQ flush if pkt count is zero */
> +             cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
> +             if (!cfg)
> +                     return 0;
> +     }
>  
>       /* enable cgx tx if disabled */
>       if (is_pf_cgxmapped(rvu, pf)) {
> @@ -2096,6 +2654,14 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
>                                                  lmac_id, true);
>       }
>  
> +     /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
> +     smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
> +     if (!smq_flush_ctx)
> +             return -ENOMEM;
> +     nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
> +     nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
> +     nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
> +
>       cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
>       /* Do SMQ flush and set enqueue xoff */
>       cfg |= BIT_ULL(50) | BIT_ULL(49);
> @@ -2109,14 +2675,27 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
>       /* Wait for flush to complete */
>       err = rvu_poll_reg(rvu, blkaddr,
>                          NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
> -     if (err)
> -             dev_err(rvu->dev,
> -                     "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
> +     if (err) {
> +             dev_info(rvu->dev,
> +                      "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
> +                      nixlf, smq);
> +
> +             nix_dump_smq_status(rvu, blkaddr, smq_flush_ctx);
> +             link = (cgx_id * rvu->hw->lmac_per_cgx) + lmac_id;
> +             dev_info(rvu->dev, "NIX_AF_TX_LINKX_NORM_CREDIT:0x%llx\n",
> +                      rvu_read64(rvu, blkaddr, 
> NIX_AF_TX_LINKX_NORM_CREDIT(link)));
> +     }
> +
> +     /* clear XOFF on TL2s */
> +     nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
> +     nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
> +     kfree(smq_flush_ctx);
>  
>       rvu_cgx_enadis_rx_bp(rvu, pf, true);
>       /* restore cgx tx state */
>       if (restore_tx_en)
>               rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
> +
>       return err;
>  }
>  
> @@ -2153,6 +2732,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
>                               continue;
>                       nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
>                       nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
> +                     nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
>               }
>       }
>       nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
> @@ -2191,15 +2771,14 @@ static int nix_txschq_free(struct rvu *rvu, u16 
> pcifunc)
>               for (schq = 0; schq < txsch->schq.max; schq++) {
>                       if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
>                               continue;
> +                     nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
>                       rvu_free_rsrc(&txsch->schq, schq);
>                       txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
>               }
>       }
>       mutex_unlock(&rvu->rsrc_lock);
>  
> -     /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
> -     rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
> -     err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
> +     err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC);
>       if (err)
>               dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
>  
> @@ -2250,6 +2829,9 @@ static int nix_txschq_free_one(struct rvu *rvu,
>        */
>       nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
>  
> +     nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
> +     nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
> +
>       /* Flush if it is a SMQ. Onus of disabling
>        * TL2/3 queue links before SMQ flush is on user
>        */
> @@ -2259,6 +2841,8 @@ static int nix_txschq_free_one(struct rvu *rvu,
>               goto err;
>       }
>  
> +     nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
> +
>       /* Free the resource */
>       rvu_free_rsrc(&txsch->schq, schq);
>       txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
> @@ -2361,7 +2945,9 @@ static bool is_txschq_shaping_valid(struct rvu_hwinfo 
> *hw, int lvl, u64 reg)
>  static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
>                               u16 pcifunc, int blkaddr)
>  {
> +     struct rvu_pfvf *parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
>       u32 *pfvf_map;
> +
>       int schq;
>  
>       schq = nix_get_tx_link(rvu, pcifunc);
> @@ -2370,7 +2956,7 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct 
> nix_hw *nix_hw,
>       if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
>               return;
>       rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
> -                 (TXSCH_TL1_DFLT_RR_PRIO << 1));
> +                 (parent_pf->tl1_rr_prio << 1));
>  
>       /* On OcteonTx2 the config was in bytes and newer silcons
>        * it's changed to weight.
> @@ -2413,17 +2999,19 @@ static int nix_txschq_cfg_read(struct rvu *rvu, 
> struct nix_hw *nix_hw,
>       return 0;
>  }
>  
> -static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
> -                            u16 pcifunc, struct nix_txsch *txsch)
> +void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
> +                     struct nix_txsch *txsch, bool enable)
>  {
>       struct rvu_hwinfo *hw = rvu->hw;
>       int lbk_link_start, lbk_links;
>       u8 pf = rvu_get_pf(pcifunc);
>       int schq;
> +     u64 cfg;
>  
>       if (!is_pf_cgxmapped(rvu, pf))
>               return;
>  
> +     cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
>       lbk_link_start = hw->cgx_links;
>  
>       for (schq = 0; schq < txsch->schq.max; schq++) {
> @@ -2437,8 +3025,7 @@ static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int 
> blkaddr,
>                       rvu_write64(rvu, blkaddr,
>                                   NIX_AF_TL3_TL2X_LINKX_CFG(schq,
>                                                             lbk_link_start +
> -                                                           lbk_links),
> -                                 BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
> +                                                           lbk_links), cfg);
>       }
>  }
>  
> @@ -2544,8 +3131,6 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
>               rvu_write64(rvu, blkaddr, reg, regval);
>       }
>  
> -     rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
> -                        &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
>       return 0;
>  }
>  
> @@ -2558,8 +3143,8 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, 
> int blkaddr,
>           req->vtag_size > VTAGSIZE_T8)
>               return -EINVAL;
>  
> -     /* RX VTAG Type 7 reserved for vf vlan */
> -     if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
> +     /* RX VTAG Type 7,6 are reserved for vf vlan& FDSA tag strip */
> +     if (req->rx.vtag_type >= NIX_AF_LFX_RX_VTAG_TYPE6)
>               return NIX_AF_ERR_RX_VTAG_INUSE;
>  
>       if (req->rx.capture_vtag)
> @@ -2765,7 +3350,8 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
>  }
>  
>  static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
> -                          int mce, u8 op, u16 pcifunc, int next, bool eol)
> +                          int mce, u8 op, u16 pcifunc, int next,
> +                          int index, u8 mce_op, bool eol)
>  {
>       struct nix_aq_enq_req aq_req;
>       int err;
> @@ -2776,8 +3362,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct 
> nix_hw *nix_hw,
>       aq_req.qidx = mce;
>  
>       /* Use RSS with RSS index 0 */
> -     aq_req.mce.op = 1;
> -     aq_req.mce.index = 0;
> +     aq_req.mce.op = mce_op;
> +     aq_req.mce.index = index;
>       aq_req.mce.eol = eol;
>       aq_req.mce.pf_func = pcifunc;
>       aq_req.mce.next = next;
> @@ -2794,6 +3380,206 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct 
> nix_hw *nix_hw,
>       return 0;
>  }
>  
> +static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list)
> +{
> +     struct hlist_node *tmp;
> +     struct mce *mce;
> +
> +     /* Scan through the current list */
> +     hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
> +             hlist_del(&mce->node);
> +             kfree(mce);
> +     }
> +
> +     mce_list->count = 0;
> +     mce_list->max = 0;
> +}
> +
> +static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem)
> +{
> +     return elem->mce_start_index + elem->mcast_mce_list.count - 1;
> +}
> +
> +static int nix_update_ingress_mce_list_hw(struct rvu *rvu,
> +                                       struct nix_hw *nix_hw,
> +                                       struct nix_mcast_grp_elem *elem)
> +{
> +     int idx, last_idx, next_idx, err;
> +     struct nix_mce_list *mce_list;
> +     struct mce *mce, *prev_mce;
> +
> +     mce_list = &elem->mcast_mce_list;
> +     idx = elem->mce_start_index;
> +     last_idx = nix_get_last_mce_list_index(elem);
> +     hlist_for_each_entry(mce, &mce_list->head, node) {
> +             if (idx > last_idx)
> +                     break;
> +
> +             if (!mce->is_active) {
> +                     if (idx == elem->mce_start_index) {
> +                             idx++;
> +                             prev_mce = mce;
> +                             elem->mce_start_index = idx;
> +                             continue;
> +                     } else if (idx == last_idx) {
> +                             err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, 
> NIX_AQ_INSTOP_WRITE,
> +                                                     prev_mce->pcifunc, 
> next_idx,
> +                                                     prev_mce->rq_rss_index,
> +                                                     prev_mce->dest_type,
> +                                                     false);
> +                             if (err)
> +                                     return err;
> +
> +                             break;
> +                     }
> +             }
> +
> +             next_idx = idx + 1;
> +             /* EOL should be set in last MCE */
> +             err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
> +                                     mce->pcifunc, next_idx,
> +                                     mce->rq_rss_index, mce->dest_type,
> +                                     (next_idx > last_idx) ? true : false);
> +             if (err)
> +                     return err;
> +
> +             idx++;
> +             prev_mce = mce;
> +     }
> +
> +     return 0;
> +}
> +
> +static void nix_update_egress_mce_list_hw(struct rvu *rvu,
> +                                       struct nix_hw *nix_hw,
> +                                       struct nix_mcast_grp_elem *elem)
> +{
> +     struct nix_mce_list *mce_list;
> +     int idx, last_idx, next_idx;
> +     struct mce *mce, *prev_mce;
> +     u64 regval;
> +     u8 eol;
> +
> +     mce_list = &elem->mcast_mce_list;
> +     idx = elem->mce_start_index;
> +     last_idx = nix_get_last_mce_list_index(elem);
> +     hlist_for_each_entry(mce, &mce_list->head, node) {
> +             if (idx > last_idx)
> +                     break;
> +
> +             if (!mce->is_active) {
> +                     if (idx == elem->mce_start_index) {
> +                             idx++;
> +                             prev_mce = mce;
> +                             elem->mce_start_index = idx;
> +                             continue;
> +                     } else if (idx == last_idx) {
> +                             regval = (next_idx << 16) | (1 << 12) | 
> prev_mce->channel;
> +                             rvu_write64(rvu, nix_hw->blkaddr,
> +                                         NIX_AF_TX_MCASTX(idx - 1),
> +                                         regval);
> +                             break;
> +                     }
> +             }
> +
> +             eol = 0;
> +             next_idx = idx + 1;
> +             /* EOL should be set in last MCE */
> +             if (next_idx > last_idx)
> +                     eol = 1;
> +
> +             regval = (next_idx << 16) | (eol << 12) | mce->channel;
> +             rvu_write64(rvu, nix_hw->blkaddr,
> +                         NIX_AF_TX_MCASTX(idx),
> +                         regval);
> +             idx++;
> +             prev_mce = mce;
> +     }
> +}
> +
> +static int nix_del_mce_list_entry(struct rvu *rvu,
> +                               struct nix_hw *nix_hw,
> +                               struct nix_mcast_grp_elem *elem,
> +                               struct nix_mcast_grp_update_req *req)
> +{
> +     u32 num_entry = req->num_mce_entry;
> +     struct nix_mce_list *mce_list;
> +     struct mce *mce;
> +     bool is_found;
> +     int i;
> +
> +     mce_list = &elem->mcast_mce_list;
> +     for (i = 0; i < num_entry; i++) {
> +             is_found = false;
> +             hlist_for_each_entry(mce, &mce_list->head, node) {
> +                     /* If already exists, then delete */
> +                     if (mce->pcifunc == req->pcifunc[i]) {
> +                             hlist_del(&mce->node);
> +                             kfree(mce);
> +                             mce_list->count--;
> +                             is_found = true;
> +                             break;
> +                     }
> +             }
> +
> +             if (!is_found)
> +                     return NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
> +     }
> +
> +     mce_list->max = mce_list->count;
> +     /* Dump the updated list to HW */
> +     if (elem->dir == NIX_MCAST_INGRESS)
> +             return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
> +
> +     nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
> +     return 0;
> +}
> +
> +static int nix_add_mce_list_entry(struct rvu *rvu,
> +                               struct nix_hw *nix_hw,
> +                               struct nix_mcast_grp_elem *elem,
> +                               struct nix_mcast_grp_update_req *req)
> +{
> +     u32 num_entry = req->num_mce_entry;
> +     struct nix_mce_list *mce_list;
> +     struct hlist_node *tmp;
> +     struct mce *mce;
> +     int i;
> +
> +     mce_list = &elem->mcast_mce_list;
> +     for (i = 0; i < num_entry; i++) {
> +             mce = kzalloc(sizeof(*mce), GFP_KERNEL);
> +             if (!mce)
> +                     goto free_mce;
> +
> +             mce->pcifunc = req->pcifunc[i];
> +             mce->channel = req->channel[i];
> +             mce->rq_rss_index = req->rq_rss_index[i];
> +             mce->dest_type = req->dest_type[i];
> +             mce->is_active = 1;
> +             hlist_add_head(&mce->node, &mce_list->head);
> +             mce_list->count++;
> +     }
> +
> +     mce_list->max += num_entry;
> +
> +     /* Dump the updated list to HW */
> +     if (elem->dir == NIX_MCAST_INGRESS)
> +             return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
> +
> +     nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
> +     return 0;
> +
> +free_mce:
> +     hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
> +             hlist_del(&mce->node);
> +             kfree(mce);
> +             mce_list->count--;
> +     }
> +
> +     return -ENOMEM;
> +}
> +
>  static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
>                                    u16 pcifunc, bool add)
>  {
> @@ -2889,6 +3675,7 @@ int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
>               /* EOL should be set in last MCE */
>               err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
>                                       mce->pcifunc, next_idx,
> +                                     0, 1,
>                                       (next_idx > last_idx) ? true : false);
>               if (err)
>                       goto end;
> @@ -2969,6 +3756,16 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 
> pcifunc,
>       return err;
>  }
>  
> +static void nix_setup_mcast_grp(struct nix_hw *nix_hw)
> +{
> +     struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp;
> +
> +     INIT_LIST_HEAD(&mcast_grp->mcast_grp_head);
> +     mutex_init(&mcast_grp->mcast_grp_lock);
> +     mcast_grp->next_grp_index = 1;
> +     mcast_grp->count = 0;
> +}
> +
>  static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
>  {
>       struct nix_mcast *mcast = &nix_hw->mcast;
> @@ -2993,15 +3790,15 @@ static int nix_setup_mce_tables(struct rvu *rvu, 
> struct nix_hw *nix_hw)
>                       continue;
>  
>               /* save start idx of broadcast mce list */
> -             pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
> +             pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, 
> NIX_MCAST_INGRESS);
>               nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
>  
>               /* save start idx of multicast mce list */
> -             pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
> +             pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, 
> NIX_MCAST_INGRESS);
>               nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
>  
>               /* save the start idx of promisc mce list */
> -             pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
> +             pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, 
> NIX_MCAST_INGRESS);
>               nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
>  
>               for (idx = 0; idx < (numvfs + 1); idx++) {
> @@ -3016,7 +3813,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct 
> nix_hw *nix_hw)
>                       err = nix_blk_setup_mce(rvu, nix_hw,
>                                               pfvf->bcast_mce_idx + idx,
>                                               NIX_AQ_INSTOP_INIT,
> -                                             pcifunc, 0, true);
> +                                             pcifunc, 0, 0, 1, true);
>                       if (err)
>                               return err;
>  
> @@ -3024,7 +3821,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct 
> nix_hw *nix_hw)
>                       err = nix_blk_setup_mce(rvu, nix_hw,
>                                               pfvf->mcast_mce_idx + idx,
>                                               NIX_AQ_INSTOP_INIT,
> -                                             pcifunc, 0, true);
> +                                             pcifunc, 0, 0, 1, true);
>                       if (err)
>                               return err;
>  
> @@ -3032,7 +3829,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct 
> nix_hw *nix_hw)
>                       err = nix_blk_setup_mce(rvu, nix_hw,
>                                               pfvf->promisc_mce_idx + idx,
>                                               NIX_AQ_INSTOP_INIT,
> -                                             pcifunc, 0, true);
> +                                             pcifunc, 0, 0, 1, true);
>                       if (err)
>                               return err;
>               }
> @@ -3047,11 +3844,25 @@ static int nix_setup_mcast(struct rvu *rvu, struct 
> nix_hw *nix_hw, int blkaddr)
>       int err, size;
>  
>       size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
> -     size = (1ULL << size);
> +     size = BIT_ULL(size);
> +
> +     /* Allocate bitmap for rx mce entries */
> +     mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE;
> +     err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
> +     if (err)
> +             return -ENOMEM;
> +
> +     /* Allocate bitmap for tx mce entries */
> +     mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX;
> +     err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
> +     if (err) {
> +             rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
> +             return -ENOMEM;
> +     }
>  
>       /* Alloc memory for multicast/mirror replication entries */
>       err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
> -                      (256UL << MC_TBL_SIZE), size);
> +                      mcast->mce_counter[NIX_MCAST_INGRESS].max, size);
>       if (err)
>               return -ENOMEM;
>  
> @@ -3081,6 +3892,8 @@ static int nix_setup_mcast(struct rvu *rvu, struct 
> nix_hw *nix_hw, int blkaddr)
>  
>       mutex_init(&mcast->mce_lock);
>  
> +     nix_setup_mcast_grp(nix_hw);
> +
>       return nix_setup_mce_tables(rvu, nix_hw);
>  }
>  
> @@ -3156,10 +3969,16 @@ static int nix_setup_txschq(struct rvu *rvu, struct 
> nix_hw *nix_hw, int blkaddr)
>       }
>  
>       /* Setup a default value of 8192 as DWRR MTU */
> -     if (rvu->hw->cap.nix_common_dwrr_mtu) {
> -             rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU,
> +     if (rvu->hw->cap.nix_common_dwrr_mtu ||
> +         rvu->hw->cap.nix_multiple_dwrr_mtu) {
> +             rvu_write64(rvu, blkaddr,
> +                         nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM),
>                           convert_bytes_to_dwrr_mtu(8192));
> -             rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU,
> +             rvu_write64(rvu, blkaddr,
> +                         nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK),
> +                         convert_bytes_to_dwrr_mtu(8192));
> +             rvu_write64(rvu, blkaddr,
> +                         nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP),
>                           convert_bytes_to_dwrr_mtu(8192));
>       }
>  
> @@ -3228,8 +4047,12 @@ static void rvu_get_lbk_link_max_frs(struct rvu *rvu,  
> u16 *max_mtu)
>  
>  static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
>  {
> -     /* RPM supports FIFO len 128 KB */
> -     if (rvu_cgx_get_fifolen(rvu) == 0x20000)
> +     int fifo_size = rvu_cgx_get_fifolen(rvu);
> +
> +     /* RPM supports FIFO len 128 KB and RPM2 supports double the
> +      * FIFO len to accommodate 8 LMACS
> +      */
> +     if (fifo_size == 0x20000 || fifo_size == 0x40000)
>               *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
>       else
>               *max_mtu = NIC_HW_MAX_FRS;
> @@ -3246,6 +4069,11 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, 
> struct msg_req *req,
>       if (blkaddr < 0)
>               return NIX_AF_ERR_AF_LF_INVALID;
>  
> +     rsp->vwqe_delay = 0;
> +     if (!is_rvu_otx2(rvu))
> +             rsp->vwqe_delay = rvu_read64(rvu, blkaddr, NIX_AF_VWQE_TIMER) &
> +                               GENMASK_ULL(9, 0);
> +
>       if (is_afvf(pcifunc))
>               rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
>       else
> @@ -3253,19 +4081,28 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, 
> struct msg_req *req,
>  
>       rsp->min_mtu = NIC_HW_MIN_FRS;
>  
> -     if (!rvu->hw->cap.nix_common_dwrr_mtu) {
> +     if (!rvu->hw->cap.nix_common_dwrr_mtu &&
> +         !rvu->hw->cap.nix_multiple_dwrr_mtu) {
>               /* Return '1' on OTx2 */
>               rsp->rpm_dwrr_mtu = 1;
>               rsp->sdp_dwrr_mtu = 1;
> +             rsp->lbk_dwrr_mtu = 1;
>               return 0;
>       }
>  
> -     dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
> +     /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
> +     dwrr_mtu = rvu_read64(rvu, blkaddr,
> +                           nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
>       rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
>  
> -     dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU);
> +     dwrr_mtu = rvu_read64(rvu, blkaddr,
> +                           nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP));
>       rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
>  
> +     dwrr_mtu = rvu_read64(rvu, blkaddr,
> +                           nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK));
> +     rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
> +
>       return 0;
>  }
>  
> @@ -3577,6 +4414,20 @@ static int set_flowkey_fields(struct 
> nix_rx_flowkey_alg *alg, u32 flow_cfg)
>                       field->ltype_match = NPC_LT_LE_GTPU;
>                       field->ltype_mask = 0xF;
>                       break;
> +             case NIX_FLOW_KEY_TYPE_CH_LEN_90B:
> +                     field->lid = NPC_LID_LA;
> +                     field->hdr_offset = 24;
> +                     field->bytesm1 = 1; /* 2 Bytes*/
> +                     field->ltype_match = NPC_LT_LA_CUSTOM_L2_90B_ETHER;
> +                     field->ltype_mask = 0xF;
> +                     break;
> +             case NIX_FLOW_KEY_TYPE_CUSTOM0:
> +                     field->lid = NPC_LID_LC;
> +                     field->hdr_offset = 6;
> +                     field->bytesm1 = 1; /* 2 Bytes*/
> +                     field->ltype_match = NPC_LT_LC_CUSTOM0;
> +                     field->ltype_mask = 0xF;
> +                     break;
>               case NIX_FLOW_KEY_TYPE_VLAN:
>                       field->lid = NPC_LID_LB;
>                       field->hdr_offset = 2; /* Skip TPID (2-bytes) */
> @@ -3769,7 +4620,7 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
>                                     struct nix_set_mac_addr *req,
>                                     struct msg_rsp *rsp)
>  {
> -     bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
> +     bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
>       u16 pcifunc = req->hdr.pcifunc;
>       int blkaddr, nixlf, err;
>       struct rvu_pfvf *pfvf;
> @@ -3881,14 +4732,13 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, 
> struct nix_rx_mode *req,
>       }
>  
>       /* install/uninstall promisc entry */
> -     if (promisc) {
> +     if (promisc)
>               rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
>                                             pfvf->rx_chan_base,
>                                             pfvf->rx_chan_cnt);
> -     } else {
> +     else
>               if (!nix_rx_multicast)
>                       rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, 
> false);
> -     }
>  
>       return 0;
>  }
> @@ -3967,7 +4817,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, 
> struct nix_frs_cfg *req,
>       if (!req->sdp_link && req->maxlen > max_mtu)
>               return NIX_AF_ERR_FRS_INVALID;
>  
> -     if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
> +     if (req->update_minlen && req->minlen < (req->sdp_link ? SDP_HW_MIN_FRS 
> : NIC_HW_MIN_FRS))
>               return NIX_AF_ERR_FRS_INVALID;
>  
>       /* Check if config is for SDP link */
> @@ -4026,6 +4876,11 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, 
> struct nix_rx_cfg *req,
>       else
>               cfg &= ~BIT_ULL(40);
>  
> +     if (req->len_verify & NIX_RX_DROP_RE)
> +             cfg |= BIT_ULL(32);
> +     else
> +             cfg &= ~BIT_ULL(32);
> +
>       if (req->csum_verify & BIT(0))
>               cfg |= BIT_ULL(37);
>       else
> @@ -4055,6 +4910,9 @@ static void nix_link_config(struct rvu *rvu, int 
> blkaddr,
>       rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
>       rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
>  
> +     /* Set SDP link credit */
> +     rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT);
> +
>       /* Set default min/max packet lengths allowed on NIX Rx links.
>        *
>        * With HW reset minlen value of 60byte, HW will treat ARP pkts
> @@ -4066,14 +4924,30 @@ static void nix_link_config(struct rvu *rvu, int 
> blkaddr,
>                               ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
>       }
>  
> -     for (link = hw->cgx_links; link < hw->lbk_links; link++) {
> +     for (link = hw->cgx_links; link < hw->cgx_links + hw->lbk_links; 
> link++) {
>               rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
>                           ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
>       }
>       if (hw->sdp_links) {
>               link = hw->cgx_links + hw->lbk_links;
>               rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
> -                         SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
> +                         SDP_HW_MAX_FRS << 16 | SDP_HW_MIN_FRS);
> +     }
> +
> +     /* Set CPT link i.e second pass config */
> +     if (hw->cpt_links) {
> +             link = hw->cgx_links + hw->lbk_links + hw->sdp_links;
> +             /* Set default min/max packet lengths allowed to LBK as that
> +              * LBK link's range is max.
> +              */
> +             rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
> +                         ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
> +     }
> +
> +     /* Get MCS external bypass status for CN10K-B */
> +     if (mcs_get_blkcnt() == 1) {
> +             /* Adjust for 2 credits when external bypass is disabled */
> +             nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
>       }
>  
>       /* Set credits for Tx links assuming max packet length allowed.
> @@ -4088,7 +4962,7 @@ static void nix_link_config(struct rvu *rvu, int 
> blkaddr,
>  
>               /* Get LMAC id's from bitmap */
>               lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
> -             for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
> +             for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
>                       lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, 
> iter);
>                       if (!lmac_fifo_len) {
>                               dev_err(rvu->dev,
> @@ -4099,6 +4973,7 @@ static void nix_link_config(struct rvu *rvu, int 
> blkaddr,
>                       tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
>                       /* Enable credits and set credit pkt count to max 
> allowed */
>                       cfg =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
> +                     cfg |= (nix_hw->cc_mcs_cnt << 32);
>  
>                       link = iter + slink;
>                       nix_hw->tx_credits[link] = tx_credits;
> @@ -4223,8 +5098,11 @@ static void rvu_nix_setup_capabilities(struct rvu 
> *rvu, int blkaddr)
>        * Check if HW uses a common MTU for all DWRR quantum configs.
>        * On OcteonTx2 this register field is '0'.
>        */
> -     if (((hw_const >> 56) & 0x10) == 0x10)
> +     if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61)))
>               hw->cap.nix_common_dwrr_mtu = true;
> +
> +     if (hw_const & BIT_ULL(61))
> +             hw->cap.nix_multiple_dwrr_mtu = true;
>  }
>  
>  static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
> @@ -4250,12 +5128,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct 
> nix_hw *nix_hw)
>       /* Set chan/link to backpressure TL3 instead of TL2 */
>       rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
>  
> -     /* Disable SQ manager's sticky mode operation (set TM6 = 0)
> +     /* Disable SQ manager's sticky mode operation (set TM6 = 0, TM11 = 0)
>        * This sticky mode is known to cause SQ stalls when multiple
> -      * SQs are mapped to same SMQ and transmitting pkts at a time.
> +      * SQs are mapped to same SMQ and transmitting pkts simultaneously.
> +      * NIX PSE may dead lock when therea are any sticky to non-sticky
> +      * transmission. Hence disable it (TM5 = 0).
>        */
>       cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
> -     cfg &= ~BIT_ULL(15);
> +     cfg &= ~(BIT_ULL(15) | BIT_ULL(14) | BIT_ULL(23));
> +     /* NIX may drop credits when condition clocks are turned off.
> +      * Hence enable control flow clk (set TM9 = 1).
> +      */
> +     cfg |= BIT_ULL(21);
>       rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
>  
>       ltdefs = rvu->kpu.lt_def;
> @@ -4275,8 +5159,17 @@ static int rvu_nix_block_init(struct rvu *rvu, struct 
> nix_hw *nix_hw)
>       /* Restore CINT timer delay to HW reset values */
>       rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
>  
> +     cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
> +
>       /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
> -     rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, 0x1ULL);
> +     cfg |= 1ULL;
> +     if (!is_rvu_otx2(rvu))
> +             cfg |= NIX_PTP_1STEP_EN;
> +
> +     rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
> +
> +     if (!is_rvu_otx2(rvu))
> +             rvu_nix_block_cn10k_init(rvu, nix_hw);
>  
>       if (is_block_implemented(hw, blkaddr)) {
>               err = nix_setup_txschq(rvu, nix_hw, blkaddr);
> @@ -4299,6 +5192,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct 
> nix_hw *nix_hw)
>               if (err)
>                       return err;
>  
> +             err = nix_setup_bpids(rvu, nix_hw, blkaddr);
> +             if (err)
> +                     return err;
> +
>               /* Configure segmentation offload formats */
>               nix_setup_lso(rvu, nix_hw, blkaddr);
>  
> @@ -4388,6 +5285,19 @@ static int rvu_nix_block_init(struct rvu *rvu, struct 
> nix_hw *nix_hw)
>  
>               /* Enable Channel backpressure */
>               rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
> +             if (is_block_implemented(rvu->hw, BLKADDR_CPT0)) {
> +                     /* Config IPSec headers identification */
> +                     rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IPSECX(0),
> +                                 (ltdefs->rx_ipsec[0].lid << 8) |
> +                                 (ltdefs->rx_ipsec[0].ltype_match << 4) |
> +                                 ltdefs->rx_ipsec[0].ltype_mask);
> +
> +                     rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IPSECX(1),
> +                                 (ltdefs->rx_ipsec[1].spi_offset << 12) |
> +                                 (ltdefs->rx_ipsec[1].lid << 8) |
> +                                 (ltdefs->rx_ipsec[1].ltype_match << 4) |
> +                                 ltdefs->rx_ipsec[1].ltype_mask);
> +             }
>       }
>       return 0;
>  }
> @@ -4469,6 +5379,74 @@ void rvu_nix_freemem(struct rvu *rvu)
>       }
>  }
>  
> +static void nix_mcast_update_action(struct rvu *rvu,
> +                                 struct nix_mcast_grp_elem *elem)
> +{
> +     struct npc_mcam *mcam = &rvu->hw->mcam;
> +     struct nix_rx_action rx_action = { 0 };
> +     struct nix_tx_action tx_action = { 0 };
> +     int npc_blkaddr;
> +
> +     npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
> +     if (elem->dir == NIX_MCAST_INGRESS) {
> +             *(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam,
> +                                                      npc_blkaddr,
> +                                                      elem->mcam_index);
> +             rx_action.index = elem->mce_start_index;
> +             npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
> +                                 *(u64 *)&rx_action);
> +     } else {
> +             *(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam,
> +                                                      npc_blkaddr,
> +                                                      elem->mcam_index);
> +             tx_action.index = elem->mce_start_index;
> +             npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
> +                                 *(u64 *)&tx_action);
> +     }
> +}
> +
> +static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 
> is_active)
> +{
> +     struct nix_mcast_grp_elem *elem;
> +     struct nix_mcast_grp *mcast_grp;
> +     struct nix_hw *nix_hw;
> +     int blkaddr;
> +
> +     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
> +     nix_hw = get_nix_hw(rvu->hw, blkaddr);
> +     if (!nix_hw)
> +             return;
> +
> +     mcast_grp = &nix_hw->mcast_grp;
> +
> +     mutex_lock(&mcast_grp->mcast_grp_lock);
> +     list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) {
> +             struct nix_mce_list *mce_list;
> +             struct mce *mce;
> +
> +             /* Iterate the group elements and disable the element which
> +              * received the disable request.
> +              */
> +             mce_list = &elem->mcast_mce_list;
> +             hlist_for_each_entry(mce, &mce_list->head, node) {
> +                     if (mce->pcifunc == pcifunc) {
> +                             mce->is_active = is_active;
> +                             break;
> +                     }
> +             }
> +
> +             /* Dump the updated list to HW */
> +             if (elem->dir == NIX_MCAST_INGRESS)
> +                     nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
> +             else
> +                     nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
> +
> +             /* Update the multicast index in NPC rule */
> +             nix_mcast_update_action(rvu, elem);
> +     }
> +     mutex_unlock(&mcast_grp->mcast_grp_lock);
> +}
> +
>  int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
>                                    struct msg_rsp *rsp)
>  {
> @@ -4480,6 +5458,9 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, 
> struct msg_req *req,
>       if (err)
>               return err;
>  
> +     /* Enable the interface if it is in any multicast list */
> +     nix_mcast_update_mce_entry(rvu, pcifunc, 1);
> +
>       rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
>  
>       npc_mcam_enable_flows(rvu, pcifunc);
> @@ -4504,6 +5485,9 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, 
> struct msg_req *req,
>               return err;
>  
>       rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
> +     /* Disable the interface if it is in any multicast list */
> +     nix_mcast_update_mce_entry(rvu, pcifunc, 0);
> +
>  
>       pfvf = rvu_get_pfvf(rvu, pcifunc);
>       clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
> @@ -4517,6 +5501,8 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, 
> struct msg_req *req,
>       return 0;
>  }
>  
> +#define RX_SA_BASE  GENMASK_ULL(52, 7)
> +
>  void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int 
> nixlf)
>  {
>       struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
> @@ -4524,6 +5510,7 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, 
> int blkaddr, int nixlf)
>       int pf = rvu_get_pf(pcifunc);
>       struct mac_ops *mac_ops;
>       u8 cgx_id, lmac_id;
> +     u64 sa_base;
>       void *cgxd;
>       int err;
>  
> @@ -4536,6 +5523,9 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, 
> int blkaddr, int nixlf)
>       nix_rx_sync(rvu, blkaddr);
>       nix_txschq_free(rvu, pcifunc);
>  
> +     /* Reset SPI to SA index table */
> +     rvu_nix_free_spi_to_sa_table(rvu, pcifunc);
> +
>       clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
>  
>       rvu_cgx_start_stop_io(rvu, pcifunc, false);
> @@ -4577,9 +5567,32 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, 
> int blkaddr, int nixlf)
>               pfvf->hw_rx_tstamp_en = false;
>       }
>  
> +     /* reset priority flow control config */
> +     rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
> +
> +     /* reset 802.3x flow control config */
> +     rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
> +
>       nix_ctx_free(rvu, pfvf);
>  
>       nix_free_all_bandprof(rvu, pcifunc);
> +
> +     sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
> +     if (FIELD_GET(RX_SA_BASE, sa_base)) {
> +             err = rvu_cpt_ctx_flush(rvu, pcifunc);
> +             if (err)
> +                     dev_err(rvu->dev,
> +                             "CPT ctx flush failed with error: %d\n", err);
> +     }
> +     if (is_block_implemented(rvu->hw, BLKADDR_CPT0)) {
> +             /* reset the configuration related to inline ipsec */
> +             rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(nixlf),
> +                         0x0);
> +             rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf),
> +                         0x0);
> +             rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf),
> +                         0x0);
> +     }
>  }
>  
>  #define NIX_AF_LFX_TX_CFG_PTP_EN     BIT_ULL(32)
> @@ -4620,6 +5633,10 @@ static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 
> pcifunc, bool enable)
>  int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req 
> *req,
>                                         struct msg_rsp *rsp)
>  {
> +     /* Silicon does not support enabling time stamp in higig mode */
> +     if (rvu_cgx_is_higig2_enabled(rvu, rvu_get_pf(req->hdr.pcifunc)))
> +             return NIX_AF_ERR_PTP_CONFIG_FAIL;
> +
>       return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
>  }
>  
> @@ -4680,6 +5697,157 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu 
> *rvu,
>       return 0;
>  }
>  
> +#define IPSEC_GEN_CFG_EGRP    GENMASK_ULL(50, 48)
> +#define IPSEC_GEN_CFG_OPCODE  GENMASK_ULL(47, 32)
> +#define IPSEC_GEN_CFG_PARAM1  GENMASK_ULL(31, 16)
> +#define IPSEC_GEN_CFG_PARAM2  GENMASK_ULL(15, 0)
> +
> +#define CPT_INST_QSEL_BLOCK   GENMASK_ULL(28, 24)
> +#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
> +#define CPT_INST_QSEL_SLOT    GENMASK_ULL(7, 0)
> +
> +#define CPT_INST_CREDIT_TH    GENMASK_ULL(53, 32)
> +#define CPT_INST_CREDIT_BPID  GENMASK_ULL(30, 22)
> +#define CPT_INST_CREDIT_CNT   GENMASK_ULL(21, 0)
> +
> +static void nix_inline_ipsec_cfg(struct rvu *rvu, struct 
> nix_inline_ipsec_cfg *req,
> +                              int blkaddr)
> +{
> +     u8 cpt_idx, cpt_blkaddr;
> +     u64 val = 0;
> +
> +     cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
> +     if (req->enable) {
> +             val = 0;
> +             /* Enable context prefetching */
> +             if (!is_rvu_otx2(rvu))
> +                     val |= BIT_ULL(51);
> +
> +             /* Set OPCODE and EGRP */
> +             val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
> +             val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
> +             val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
> +             val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
> +
> +             rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
> +
> +             /* Set CPT queue for inline IPSec */
> +             val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
> +             val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
> +                               req->inst_qsel.cpt_pf_func);
> +
> +             if (!is_rvu_otx2(rvu)) {
> +                     cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
> +                                                    BLKADDR_CPT1;
> +                     val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
> +             }
> +
> +             rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
> +                         val);
> +
> +             /* Set CPT credit */
> +             val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
> +             if ((val & 0x3FFFFF) != 0x3FFFFF)
> +                     rvu_write64(rvu, blkaddr, 
> NIX_AF_RX_CPTX_CREDIT(cpt_idx),
> +                                 0x3FFFFF - val);
> +
> +             val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
> +             val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
> +             val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
> +             rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
> +     } else {
> +             rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
> +             rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
> +                         0x0);
> +             val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
> +             if ((val & 0x3FFFFF) != 0x3FFFFF)
> +                     rvu_write64(rvu, blkaddr, 
> NIX_AF_RX_CPTX_CREDIT(cpt_idx),
> +                                 0x3FFFFF - val);
> +     }
> +}
> +
> +int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
> +                                       struct nix_inline_ipsec_cfg *req,
> +                                       struct msg_rsp *rsp)
> +{
> +     if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
> +             return 0;
> +
> +     nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
> +     if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
> +             nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
> +
> +     return 0;
> +}
> +
> +int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
> +                                            struct msg_req *req,
> +                                            struct nix_inline_ipsec_cfg *rsp)
> +
> +{
> +     u64 val;
> +
> +     if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
> +             return 0;
> +
> +     val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
> +     rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
> +     rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
> +     rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
> +     rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
> +
> +     val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
> +     rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
> +     rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
> +     rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
> +
> +     return 0;
> +}
> +
> +int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
> +                                          struct nix_inline_ipsec_lf_cfg 
> *req,
> +                                          struct msg_rsp *rsp)
> +{
> +     int lf, blkaddr, err;
> +     u64 val;
> +
> +     if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
> +             return 0;
> +
> +     err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
> +     if (err)
> +             return err;
> +
> +     if (req->enable) {
> +             /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
> +             val = (u64)req->ipsec_cfg0.tt << 44 |
> +                   (u64)req->ipsec_cfg0.tag_const << 20 |
> +                   (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
> +                   req->ipsec_cfg0.lenm1_max;
> +
> +             if (blkaddr == BLKADDR_NIX1)
> +                     val |= BIT_ULL(46);
> +
> +             rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
> +
> +             /* Set SA_IDX_W and SA_IDX_MAX */
> +             val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
> +                   req->ipsec_cfg1.sa_idx_max;
> +             rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
> +
> +             /* Set SA base address */
> +             rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
> +                         req->sa_base_addr);
> +     } else {
> +             rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
> +             rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
> +             rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
> +                         0x0);
> +     }
> +
> +     return 0;
> +}
> +
>  void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
>  {
>       bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
> @@ -5194,6 +6362,7 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 
> pcifunc,
>       aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
>       aq_req.op = NIX_AQ_INSTOP_WRITE;
>       memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
> +     memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
>       /* Clear higher layer enable bit in the mid profile, just in case */
>       aq_req.prof.hl_en = 0;
>       aq_req.prof_mask.hl_en = 1;
> @@ -5309,3 +6478,572 @@ int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct 
> rvu *rvu, struct msg_req *re
>  
>       return 0;
>  }
> +
> +int rvu_mbox_handler_nix_rx_sw_sync(struct rvu *rvu, struct msg_req *req,
> +                                 struct msg_rsp *rsp)
> +{
> +     int blkaddr;
> +
> +     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
> +     if (blkaddr < 0)
> +             return NIX_AF_ERR_AF_LF_INVALID;
> +
> +     nix_rx_sync(rvu, blkaddr);
> +     return 0;
> +}
> +
> +bool rvu_nix_is_ptp_tx_enabled(struct rvu *rvu, u16 pcifunc)
> +{
> +     struct rvu_hwinfo *hw = rvu->hw;
> +     struct rvu_block *block;
> +     int blkaddr;
> +     int nixlf;
> +     u64 cfg;
> +
> +     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
> +     if (blkaddr < 0)
> +             return NIX_AF_ERR_AF_LF_INVALID;
> +
> +     block = &hw->block[blkaddr];
> +     nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
> +     if (nixlf < 0)
> +             return NIX_AF_ERR_AF_LF_INVALID;
> +
> +     cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
> +     return (cfg & BIT_ULL(32));
> +}
> +
> +static inline void
> +configure_rq_mask(struct rvu *rvu, int blkaddr, int nixlf,
> +               u8 rq_mask, bool enable)
> +{
> +     u64 cfg;
> +     u64 reg;
> +
> +     cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf));
> +     reg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf));
> +     if (enable) {
> +             cfg |= BIT_ULL(43);
> +             reg = (reg & ~GENMASK_ULL(36, 35)) | ((u64)rq_mask << 35);
> +     } else {
> +             cfg &= ~BIT_ULL(43);
> +             reg = (reg & ~GENMASK_ULL(36, 35));
> +     }
> +     rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf), cfg);
> +     rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), reg);
> +}
> +
> +static inline void
> +configure_spb_cpt(struct rvu *rvu, int blkaddr, int nixlf,
> +               struct nix_rq_cpt_field_mask_cfg_req *req, bool enable)
> +{
> +     u64 cfg;
> +
> +     cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf));
> +     if (enable) {
> +             cfg |= BIT_ULL(37);
> +             cfg &= ~GENMASK_ULL(42, 38);
> +             cfg |= ((u64)req->ipsec_cfg1.spb_cpt_sizem1 << 38);
> +             cfg &= ~GENMASK_ULL(63, 44);
> +             cfg |= ((u64)req->ipsec_cfg1.spb_cpt_aura << 44);
> +     } else {
> +             cfg &= ~BIT_ULL(37);
> +             cfg &= ~GENMASK_ULL(42, 38);
> +             cfg &= ~GENMASK_ULL(63, 44);
> +     }
> +     rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf), cfg);
> +}
> +
> +static
> +int nix_inline_rq_mask_alloc(struct rvu *rvu,
> +                          struct nix_rq_cpt_field_mask_cfg_req *req,
> +                          struct nix_hw *nix_hw, int blkaddr)
> +{
> +     u8 rq_cpt_mask_select;
> +     u64 reg_mask;
> +     u64 reg_set;
> +     int idx, rq_idx;
> +
> +     for (idx = 0; idx < nix_hw->rq_msk.in_use; idx++) {
> +             for (rq_idx = 0; rq_idx < RQ_CTX_MASK_MAX; rq_idx++) {
> +                     reg_mask = rvu_read64(rvu, blkaddr,
> +                                           NIX_AF_RX_RQX_MASKX(idx, rq_idx));
> +                     reg_set  = rvu_read64(rvu, blkaddr,
> +                                           NIX_AF_RX_RQX_SETX(idx, rq_idx));
> +                     if (reg_mask != req->rq_ctx_word_mask[rq_idx] ||
> +                         reg_set != req->rq_ctx_word_set[rq_idx])
> +                             break;
> +             }
> +             if (rq_idx == RQ_CTX_MASK_MAX)
> +                     break;
> +     }
> +
> +     if (idx < nix_hw->rq_msk.in_use) {
> +             /* Match found */
> +             rq_cpt_mask_select = idx;
> +             return idx;
> +     }
> +
> +     if (nix_hw->rq_msk.in_use == nix_hw->rq_msk.total)
> +             return NIX_AF_ERR_RQ_CPT_MASK;
> +
> +     rq_cpt_mask_select = nix_hw->rq_msk.in_use++;
> +
> +     for (rq_idx = 0; rq_idx < RQ_CTX_MASK_MAX; rq_idx++) {
> +             rvu_write64(rvu, blkaddr,
> +                         NIX_AF_RX_RQX_MASKX(rq_cpt_mask_select, rq_idx),
> +                         req->rq_ctx_word_mask[rq_idx]);
> +             rvu_write64(rvu, blkaddr,
> +                         NIX_AF_RX_RQX_SETX(rq_cpt_mask_select, rq_idx),
> +                         req->rq_ctx_word_set[rq_idx]);
> +     }
> +
> +     return rq_cpt_mask_select;
> +}
> +
> +int rvu_mbox_handler_nix_lf_inline_rq_cfg(struct rvu *rvu,
> +                                       struct nix_rq_cpt_field_mask_cfg_req 
> *req,
> +                                       struct msg_rsp *rsp)
> +{
> +     struct rvu_hwinfo *hw = rvu->hw;
> +     struct nix_hw *nix_hw;
> +     int blkaddr, nixlf;
> +     int rq_mask, err;
> +
> +     err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
> +     if (err)
> +             return err;
> +
> +     nix_hw = get_nix_hw(rvu->hw, blkaddr);
> +     if (!nix_hw)
> +             return NIX_AF_ERR_INVALID_NIXBLK;
> +
> +     if (!hw->cap.second_cpt_pass)
> +             return NIX_AF_ERR_INVALID_NIXBLK;
> +
> +     if (req->ipsec_cfg1.rq_mask_enable) {
> +             rq_mask = nix_inline_rq_mask_alloc(rvu, req, nix_hw, blkaddr);
> +             if (rq_mask < 0)
> +                     return NIX_AF_ERR_RQ_CPT_MASK;
> +     }
> +
> +     configure_rq_mask(rvu, blkaddr, nixlf, rq_mask,
> +                       req->ipsec_cfg1.rq_mask_enable);
> +     configure_spb_cpt(rvu, blkaddr, nixlf, req,
> +                       req->ipsec_cfg1.spb_cpt_enable);
> +     return 0;
> +}
> +
> +int rvu_mbox_handler_nix_set_vlan_tpid(struct rvu *rvu,
> +                                    struct nix_set_vlan_tpid *req,
> +                                    struct msg_rsp *rsp)
> +{
> +     u16 pcifunc = req->hdr.pcifunc;
> +     int nixlf, err, blkaddr;
> +     u64 cfg;
> +
> +     err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
> +     if (err)
> +             return err;
> +
> +     if (req->vlan_type != NIX_VLAN_TYPE_OUTER &&
> +         req->vlan_type != NIX_VLAN_TYPE_INNER)
> +             return NIX_AF_ERR_PARAM;
> +
> +     cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
> +
> +     if (req->vlan_type == NIX_VLAN_TYPE_OUTER)
> +             cfg = (cfg & ~GENMASK_ULL(15, 0)) | req->tpid;
> +     else
> +             cfg = (cfg & ~GENMASK_ULL(31, 16)) | ((u64)req->tpid << 16);
> +
> +     rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
> +     return 0;
> +}
> +
> +int rvu_mbox_handler_nix_tl1_rr_prio(struct rvu *rvu,
> +                                  struct nix_tl1_rr_prio_req *req,
> +                                  struct msg_rsp *rsp)
> +{
> +     u16 pcifunc = req->hdr.pcifunc;
> +     int blkaddr, nixlf, schq, err;
> +     struct rvu_pfvf *pfvf;
> +     u16 regval;
> +
> +     err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
> +     if (err)
> +             return err;
> +
> +     pfvf = rvu_get_pfvf(rvu, pcifunc);
> +     /* Only PF is allowed */
> +     if (is_vf(pcifunc))
> +             return NIX_AF_ERR_TL1_RR_PRIO_PERM_DENIED;
> +
> +     pfvf->tl1_rr_prio = req->tl1_rr_prio;
> +
> +     /* update TL1 topology */
> +     schq = nix_get_tx_link(rvu, pcifunc);
> +     regval = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq));
> +     regval &= ~GENMASK_ULL(4, 1);
> +     regval |= pfvf->tl1_rr_prio << 1;
> +     rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), regval);
> +
> +     return 0;
> +}
> +
> +static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct 
> nix_mcast_grp *mcast_grp,
> +                                                           u32 mcast_grp_idx)
> +{
> +     struct nix_mcast_grp_elem *iter;
> +     bool is_found = false;
> +
> +     list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) {
> +             if (iter->mcast_grp_idx == mcast_grp_idx) {
> +                     is_found = true;
> +                     break;
> +             }
> +     }
> +
> +     if (is_found)
> +             return iter;
> +
> +     return NULL;
> +}
> +
> +int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 
> mcast_grp_idx)
> +{
> +     struct nix_mcast_grp_elem *elem;
> +     struct nix_mcast_grp *mcast_grp;
> +     struct nix_hw *nix_hw;
> +     int blkaddr, ret;
> +
> +     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
> +     nix_hw = get_nix_hw(rvu->hw, blkaddr);
> +     if (!nix_hw)
> +             return NIX_AF_ERR_INVALID_NIXBLK;
> +
> +     mcast_grp = &nix_hw->mcast_grp;
> +     mutex_lock(&mcast_grp->mcast_grp_lock);
> +     elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
> +     if (!elem)
> +             ret = NIX_AF_ERR_INVALID_MCAST_GRP;
> +     else
> +             ret = elem->mce_start_index;
> +
> +     mutex_unlock(&mcast_grp->mcast_grp_lock);
> +     return ret;
> +}
> +
> +void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc)
> +{
> +     struct nix_mcast_grp_destroy_req dreq = { 0 };
> +     struct nix_mcast_grp_update_req ureq = { 0 };
> +     struct nix_mcast_grp_update_rsp ursp = { 0 };
> +     struct nix_mcast_grp_elem *elem, *tmp;
> +     struct nix_mcast_grp *mcast_grp;
> +     struct nix_hw *nix_hw;
> +     int blkaddr;
> +
> +     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
> +     nix_hw = get_nix_hw(rvu->hw, blkaddr);
> +     if (!nix_hw)
> +             return;
> +
> +     mcast_grp = &nix_hw->mcast_grp;
> +
> +     mutex_lock(&mcast_grp->mcast_grp_lock);
> +     list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) {
> +             struct nix_mce_list *mce_list;
> +             struct hlist_node *tmp;
> +             struct mce *mce;
> +
> +             /* If the pcifunc which created the multicast/mirror
> +              * group received an FLR, then delete the entire group.
> +              */
> +             if (elem->pcifunc == pcifunc) {
> +                     /* Delete group */
> +                     dreq.hdr.pcifunc = elem->pcifunc;
> +                     dreq.mcast_grp_idx = elem->mcast_grp_idx;
> +                     dreq.is_af = 1;
> +                     rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, 
> NULL);
> +                     continue;
> +             }
> +
> +             /* Iterate the group elements and delete the element which
> +              * received the FLR.
> +              */
> +             mce_list = &elem->mcast_mce_list;
> +             hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
> +                     if (mce->pcifunc == pcifunc) {
> +                             ureq.hdr.pcifunc = pcifunc;
> +                             ureq.num_mce_entry = 1;
> +                             ureq.mcast_grp_idx = elem->mcast_grp_idx;
> +                             ureq.op = NIX_MCAST_OP_DEL_ENTRY;
> +                             ureq.pcifunc[0] = pcifunc;
> +                             ureq.is_af = 1;
> +                             rvu_mbox_handler_nix_mcast_grp_update(rvu, 
> &ureq, &ursp);
> +                             break;
> +                     }
> +             }
> +     }
> +     mutex_unlock(&mcast_grp->mcast_grp_lock);
> +}
> +
> +int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
> +                                 u32 mcast_grp_idx, u16 mcam_index)
> +{
> +     struct nix_mcast_grp_elem *elem;
> +     struct nix_mcast_grp *mcast_grp;
> +     struct nix_hw *nix_hw;
> +     int blkaddr, ret = 0;
> +
> +     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
> +     nix_hw = get_nix_hw(rvu->hw, blkaddr);
> +     if (!nix_hw)
> +             return NIX_AF_ERR_INVALID_NIXBLK;
> +
> +     mcast_grp = &nix_hw->mcast_grp;
> +     mutex_lock(&mcast_grp->mcast_grp_lock);
> +     elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
> +     if (!elem)
> +             ret = NIX_AF_ERR_INVALID_MCAST_GRP;
> +     else
> +             elem->mcam_index = mcam_index;
> +
> +     mutex_unlock(&mcast_grp->mcast_grp_lock);
> +     return ret;
> +}
> +
> +int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu,
> +                                       struct nix_mcast_grp_create_req *req,
> +                                       struct nix_mcast_grp_create_rsp *rsp)
> +{
> +     struct nix_mcast_grp_elem *elem;
> +     struct nix_mcast_grp *mcast_grp;
> +     struct nix_hw *nix_hw;
> +     int blkaddr, err;
> +
> +     err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
> +     if (err)
> +             return err;
> +
> +     mcast_grp = &nix_hw->mcast_grp;
> +     elem = kzalloc(sizeof(*elem), GFP_KERNEL);
> +     if (!elem)
> +             return -ENOMEM;
> +
> +     INIT_HLIST_HEAD(&elem->mcast_mce_list.head);
> +     elem->mcam_index = -1;
> +     elem->mce_start_index = -1;
> +     elem->pcifunc = req->hdr.pcifunc;
> +     elem->dir = req->dir;
> +     elem->mcast_grp_idx = mcast_grp->next_grp_index++;
> +
> +     mutex_lock(&mcast_grp->mcast_grp_lock);
> +     list_add_tail(&elem->list, &mcast_grp->mcast_grp_head);
> +     mcast_grp->count++;
> +     mutex_unlock(&mcast_grp->mcast_grp_lock);
> +
> +     rsp->mcast_grp_idx = elem->mcast_grp_idx;
> +     return 0;
> +}
> +
> +int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu,
> +                                        struct nix_mcast_grp_destroy_req 
> *req,
> +                                        struct msg_rsp *rsp)
> +{
> +     struct npc_delete_flow_req uninstall_req = { 0 };
> +     struct npc_delete_flow_rsp uninstall_rsp = { 0 };
> +     struct nix_mcast_grp_elem *elem;
> +     struct nix_mcast_grp *mcast_grp;
> +     int blkaddr, err, ret = 0;
> +     struct nix_mcast *mcast;
> +     struct nix_hw *nix_hw;
> +
> +     err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
> +     if (err)
> +             return err;
> +
> +     mcast_grp = &nix_hw->mcast_grp;
> +
> +     /* If AF is requesting for the deletion,
> +      * then AF is already taking the lock
> +      */
> +     if (!req->is_af)
> +             mutex_lock(&mcast_grp->mcast_grp_lock);
> +
> +     elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
> +     if (!elem) {
> +             ret = NIX_AF_ERR_INVALID_MCAST_GRP;
> +             goto unlock_grp;
> +     }
> +
> +     /* If no mce entries are associated with the group
> +      * then just remove it from the global list.
> +      */
> +     if (!elem->mcast_mce_list.count)
> +             goto delete_grp;
> +
> +     /* Delete the associated mcam entry and
> +      * remove all mce entries from the group
> +      */
> +     mcast = &nix_hw->mcast;
> +     mutex_lock(&mcast->mce_lock);
> +     if (elem->mcam_index != -1) {
> +             uninstall_req.hdr.pcifunc = req->hdr.pcifunc;
> +             uninstall_req.entry = elem->mcam_index;
> +             rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, 
> &uninstall_rsp);
> +     }
> +
> +     nix_free_mce_list(mcast, elem->mcast_mce_list.count,
> +                       elem->mce_start_index, elem->dir);
> +     nix_delete_mcast_mce_list(&elem->mcast_mce_list);
> +     mutex_unlock(&mcast->mce_lock);
> +
> +delete_grp:
> +     list_del(&elem->list);
> +     kfree(elem);
> +     mcast_grp->count--;
> +
> +unlock_grp:
> +     if (!req->is_af)
> +             mutex_unlock(&mcast_grp->mcast_grp_lock);
> +
> +     return ret;
> +}
> +
> +int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
> +                                       struct nix_mcast_grp_update_req *req,
> +                                       struct nix_mcast_grp_update_rsp *rsp)
> +{
> +     struct nix_mcast_grp_destroy_req dreq = { 0 };
> +     struct npc_mcam *mcam = &rvu->hw->mcam;
> +     struct nix_mcast_grp_elem *elem;
> +     struct nix_mcast_grp *mcast_grp;
> +     int blkaddr, err, npc_blkaddr;
> +     u16 prev_count, new_count;
> +     struct nix_mcast *mcast;
> +     struct nix_hw *nix_hw;
> +     int i, ret;
> +
> +     if (!req->num_mce_entry)
> +             return 0;
> +
> +     err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
> +     if (err)
> +             return err;
> +
> +     mcast_grp = &nix_hw->mcast_grp;
> +
> +     /* If AF is requesting for the updation,
> +      * then AF is already taking the lock.
> +      */
> +     if (!req->is_af)
> +             mutex_lock(&mcast_grp->mcast_grp_lock);
> +
> +     elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
> +     if (!elem) {
> +             ret = NIX_AF_ERR_INVALID_MCAST_GRP;
> +             goto unlock_grp;
> +     }
> +
> +     /* If any pcifunc matches the group's pcifunc, then we can
> +      * delete the entire group.
> +      */
> +     if (req->op == NIX_MCAST_OP_DEL_ENTRY) {
> +             for (i = 0; i < req->num_mce_entry; i++) {
> +                     if (elem->pcifunc == req->pcifunc[i]) {
> +                             /* Delete group */
> +                             dreq.hdr.pcifunc = elem->pcifunc;
> +                             dreq.mcast_grp_idx = elem->mcast_grp_idx;
> +                             dreq.is_af = 1;
> +                             rvu_mbox_handler_nix_mcast_grp_destroy(rvu, 
> &dreq, NULL);
> +                             ret = 0;
> +                             goto unlock_grp;
> +                     }
> +             }
> +     }
> +
> +     mcast = &nix_hw->mcast;
> +     mutex_lock(&mcast->mce_lock);
> +     npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
> +     if (elem->mcam_index != -1)
> +             npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, 
> false);
> +
> +     prev_count = elem->mcast_mce_list.count;
> +     if (req->op == NIX_MCAST_OP_ADD_ENTRY) {
> +             new_count = prev_count + req->num_mce_entry;
> +             if (prev_count)
> +                     nix_free_mce_list(mcast, prev_count, 
> elem->mce_start_index, elem->dir);
> +
> +             elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, 
> elem->dir);
> +
> +             /* It is possible not to get contiguous memory */
> +             if (elem->mce_start_index < 0) {
> +                     if (elem->mcam_index != -1) {
> +                             npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
> +                                                   elem->mcam_index, true);
> +                             ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST;
> +                             goto unlock_mce;
> +                     }
> +             }
> +
> +             ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req);
> +             if (ret) {
> +                     nix_free_mce_list(mcast, new_count, 
> elem->mce_start_index, elem->dir);
> +                     if (prev_count)
> +                             elem->mce_start_index = 
> nix_alloc_mce_list(mcast,
> +                                                                        
> prev_count,
> +                                                                        
> elem->dir);
> +
> +                     if (elem->mcam_index != -1)
> +                             npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
> +                                                   elem->mcam_index, true);
> +
> +                     goto unlock_mce;
> +             }
> +     } else {
> +             if (!prev_count || prev_count < req->num_mce_entry) {
> +                     if (elem->mcam_index != -1)
> +                             npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
> +                                                   elem->mcam_index, true);
> +                     ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
> +                     goto unlock_mce;
> +             }
> +
> +             nix_free_mce_list(mcast, prev_count, elem->mce_start_index, 
> elem->dir);
> +             new_count = prev_count - req->num_mce_entry;
> +             elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, 
> elem->dir);
> +             ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req);
> +             if (ret) {
> +                     nix_free_mce_list(mcast, new_count, 
> elem->mce_start_index, elem->dir);
> +                     elem->mce_start_index = nix_alloc_mce_list(mcast, 
> prev_count, elem->dir);
> +                     if (elem->mcam_index != -1)
> +                             npc_enable_mcam_entry(rvu, mcam,
> +                                                   npc_blkaddr,
> +                                                   elem->mcam_index,
> +                                                   true);
> +                     goto unlock_mce;
> +             }
> +     }
> +
> +     if (elem->mcam_index == -1) {
> +             rsp->mce_start_index = elem->mce_start_index;
> +             ret = 0;
> +             goto unlock_mce;
> +     }
> +
> +     nix_mcast_update_action(rvu, elem);
> +     npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true);
> +     rsp->mce_start_index = elem->mce_start_index;
> +     ret = 0;
> +
> +unlock_mce:
> +     mutex_unlock(&mcast->mce_lock);
> +
> +unlock_grp:
> +     if (!req->is_af)
> +             mutex_unlock(&mcast_grp->mcast_grp_lock);
> +
> +     return ret;
> +}
> 
> ---
> base-commit: c1c8bb2ac5cfab52cc2a87d38409ff442fc8b5b5
> change-id: 20240820-octeon-sdkv5-15-da90b143e8f3
> 
> Best regards,
> -- 
> Kevin Hao <haoke...@gmail.com>
> 
-=-=-=-=-=-=-=-=-=-=-=-
Links: You receive all messages sent to this group.
View/Reply Online (#14291): 
https://lists.yoctoproject.org/g/linux-yocto/message/14291
Mute This Topic: https://lists.yoctoproject.org/mt/107993432/21656
Group Owner: linux-yocto+ow...@lists.yoctoproject.org
Unsubscribe: https://lists.yoctoproject.org/g/linux-yocto/unsub 
[arch...@mail-archive.com]
-=-=-=-=-=-=-=-=-=-=-=-

Reply via email to