From: Kevin Hao <kexin....@windriver.com>

This reverts commit e6a43185a1bd93c7763fffb285619ee3ba4ec407.
The changes in commit e6a43185a1bd are only irrelevant for the
non-octeon branches. They are necessary for octeon branches.

Signed-off-by: Kevin Hao <kexin....@windriver.com>
---
Hi Bruce,

Please merge this on v5.15/standard/preempt-rt/cn-sdkv5.4/octeon branch.
---
 .../net/ethernet/marvell/octeontx2/af/rvu_nix.c    | 1138 ++++++++++++++++++--
 1 file changed, 1032 insertions(+), 106 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 
b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a47021bb9cf9..0f92025042df 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -12,6 +12,7 @@
 #include "rvu_reg.h"
 #include "rvu.h"
 #include "npc.h"
+#include "mcs.h"
 #include "cgx.h"
 #include "lmac_common.h"
 #include "rvu_fixes.h"
@@ -72,12 +73,19 @@ enum nix_makr_fmt_indexes {
 /* For now considering MC resources needed for broadcast
  * pkt replication only. i.e 256 HWVFs + 12 PFs.
  */
-#define MC_TBL_SIZE    MC_TBL_SZ_512
-#define MC_BUF_CNT     MC_BUF_CNT_128
+#define MC_TBL_SIZE    MC_TBL_SZ_2K
+#define MC_BUF_CNT     MC_BUF_CNT_1024
+
+#define MC_TX_MAX      2048
 
 struct mce {
        struct hlist_node       node;
+       u32                     rq_rss_index;
        u16                     pcifunc;
+       u16                     channel;
+       u8                      dest_type;
+       u8                      is_active;
+       u8                      reserved[2];
 };
 
 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
@@ -165,18 +173,33 @@ static void nix_mce_list_init(struct nix_mce_list *list, 
int max)
        list->max = max;
 }
 
-static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
+static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir)
 {
+       struct rsrc_bmap *mce_counter;
        int idx;
 
        if (!mcast)
-               return 0;
+               return -EINVAL;
 
-       idx = mcast->next_free_mce;
-       mcast->next_free_mce += count;
+       mce_counter = &mcast->mce_counter[dir];
+       if (!rvu_rsrc_check_contig(mce_counter, count))
+               return -ENOSPC;
+
+       idx = rvu_alloc_rsrc_contig(mce_counter, count);
        return idx;
 }
 
+static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, 
u8 dir)
+{
+       struct rsrc_bmap *mce_counter;
+
+       if (!mcast)
+               return;
+
+       mce_counter = &mcast->mce_counter[dir];
+       rvu_free_rsrc_contig(mce_counter, count, start);
+}
+
 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
 {
        int nix_blkaddr = 0, i = 0;
@@ -192,6 +215,18 @@ struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int 
blkaddr)
        return NULL;
 }
 
+int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type)
+{
+       if (hw->cap.nix_multiple_dwrr_mtu)
+               return NIX_AF_DWRR_MTUX(smq_link_type);
+
+       if (smq_link_type == SMQ_LINK_TYPE_SDP)
+               return NIX_AF_DWRR_SDP_MTU;
+
+       /* Here it's same reg for RPM and LBK */
+       return NIX_AF_DWRR_RPM_MTU;
+}
+
 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
 {
        dwrr_mtu &= 0x1FULL;
@@ -330,8 +365,11 @@ static int nix_interface_init(struct rvu *rvu, u16 
pcifunc, int type, int nixlf,
                pfvf->tx_chan_cnt = 1;
                rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
 
-               cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
-               rvu_npc_set_pkind(rvu, pkind, pfvf);
+               if (rvu_cgx_is_pkind_config_permitted(rvu, pcifunc)) {
+                       cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+                                     pkind);
+                       rvu_npc_set_pkind(rvu, pkind, pfvf);
+               }
 
                break;
        case NIX_INTF_TYPE_LBK:
@@ -468,14 +506,190 @@ static void nix_interface_deinit(struct rvu *rvu, u16 
pcifunc, u8 nixlf)
        rvu_cgx_disable_dmac_entries(rvu, pcifunc);
 }
 
-int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
-                                   struct nix_bp_cfg_req *req,
+#define NIX_BPIDS_PER_LMAC     8
+#define NIX_BPIDS_PER_CPT      1
+static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr)
+{
+       struct nix_bp *bp = &hw->bp;
+       int err, max_bpids;
+       u64 cfg;
+
+       cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+       max_bpids = (cfg >> 12) & 0xFFF;
+
+       /* Reserve the BPIds for CGX and SDP */
+       bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC;
+       bp->sdp_bpid_cnt = rvu->hw->sdp_links * (cfg & 0xFFF);
+       bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt +
+                            NIX_BPIDS_PER_CPT;
+       bp->bpids.max = max_bpids - bp->free_pool_base;
+
+       err = rvu_alloc_bitmap(&bp->bpids);
+       if (err)
+               return err;
+
+       bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max,
+                                 sizeof(u16), GFP_KERNEL);
+       if (!bp->fn_map)
+               return -ENOMEM;
+
+       bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max,
+                                   sizeof(u8), GFP_KERNEL);
+       if (!bp->intf_map)
+               return -ENOMEM;
+
+       bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max,
+                                  sizeof(u8), GFP_KERNEL);
+       if (!bp->ref_cnt)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
+{
+       int blkaddr, bpid, err;
+       struct nix_hw *nix_hw;
+       struct nix_bp *bp;
+
+       if (!is_afvf(pcifunc))
+               return;
+
+       err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+       if (err)
+               return;
+
+       bp = &nix_hw->bp;
+
+       mutex_lock(&rvu->rsrc_lock);
+       for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+               if (bp->fn_map[bpid] == pcifunc) {
+                       bp->ref_cnt[bpid]--;
+                       if (bp->ref_cnt[bpid])
+                               continue;
+                       rvu_free_rsrc(&bp->bpids, bpid);
+                       bp->fn_map[bpid] = 0;
+               }
+       }
+       mutex_unlock(&rvu->rsrc_lock);
+}
+
+int rvu_mbox_handler_nix_rx_chan_cfg(struct rvu *rvu,
+                                    struct nix_rx_chan_cfg *req,
+                                    struct nix_rx_chan_cfg *rsp)
+{
+       struct rvu_pfvf *pfvf;
+       int blkaddr;
+       u16 chan;
+
+       pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+       blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+       chan = pfvf->rx_chan_base + req->chan;
+
+       if (req->type == NIX_INTF_TYPE_CPT)
+               chan = chan | BIT(11);
+
+       if (req->read) {
+               rsp->val = rvu_read64(rvu, blkaddr,
+                                     NIX_AF_RX_CHANX_CFG(chan));
+               rsp->chan = req->chan;
+       } else {
+               rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), req->val);
+       }
+       return 0;
+}
+
+int rvu_mbox_handler_nix_alloc_bpids(struct rvu *rvu,
+                                    struct nix_alloc_bpid_req *req,
+                                    struct nix_bpids *rsp)
+{
+       u16 pcifunc = req->hdr.pcifunc;
+       struct nix_hw *nix_hw;
+       int blkaddr, cnt = 0;
+       struct nix_bp *bp;
+       int bpid, err;
+
+       err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+       if (err)
+               return err;
+
+       bp = &nix_hw->bp;
+
+       /* For interface like sso uses same bpid across multiple
+        * application. Find the bpid is it already allocate or
+        * allocate a new one.
+        */
+       mutex_lock(&rvu->rsrc_lock);
+       if (req->type > NIX_INTF_TYPE_CPT || req->type == NIX_INTF_TYPE_LBK) {
+               for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+                       if (bp->intf_map[bpid] == req->type) {
+                               rsp->bpids[cnt] = bpid + bp->free_pool_base;
+                               rsp->bpid_cnt++;
+                               bp->ref_cnt[bpid]++;
+                               cnt++;
+                       }
+               }
+               if (rsp->bpid_cnt)
+                       goto exit;
+       }
+
+       for (cnt = 0; cnt < req->bpid_cnt; cnt++) {
+               bpid = rvu_alloc_rsrc(&bp->bpids);
+               if (bpid < 0)
+                       goto exit;
+               rsp->bpids[cnt] = bpid + bp->free_pool_base;
+               bp->intf_map[bpid] = req->type;
+               bp->fn_map[bpid] = pcifunc;
+               bp->ref_cnt[bpid]++;
+               rsp->bpid_cnt++;
+       }
+exit:
+       mutex_unlock(&rvu->rsrc_lock);
+       return 0;
+}
+
+int rvu_mbox_handler_nix_free_bpids(struct rvu *rvu,
+                                   struct nix_bpids *req,
                                    struct msg_rsp *rsp)
 {
        u16 pcifunc = req->hdr.pcifunc;
+       int blkaddr, cnt, err, id;
+       struct nix_hw *nix_hw;
+       struct nix_bp *bp;
+       u16 bpid;
+
+       err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+       if (err)
+               return err;
+
+       bp = &nix_hw->bp;
+       mutex_lock(&rvu->rsrc_lock);
+       for (cnt = 0; cnt < req->bpid_cnt; cnt++) {
+               bpid = req->bpids[cnt] - bp->free_pool_base;
+               bp->ref_cnt[bpid]--;
+               if (bp->ref_cnt[bpid])
+                       continue;
+               rvu_free_rsrc(&bp->bpids, bpid);
+               for (id = 0; id < bp->bpids.max; id++) {
+                       if (bp->fn_map[id] == pcifunc)
+                               bp->fn_map[id] = 0;
+               }
+       }
+       mutex_unlock(&rvu->rsrc_lock);
+       return 0;
+}
+
+static int nix_bp_disable(struct rvu *rvu,
+                         struct nix_bp_cfg_req *req,
+                         struct msg_rsp *rsp, bool cpt_link)
+{
+       u16 pcifunc = req->hdr.pcifunc;
+       int blkaddr, pf, type, err;
        struct rvu_pfvf *pfvf;
-       int blkaddr, pf, type;
+       struct nix_hw *nix_hw;
        u16 chan_base, chan;
+       struct nix_bp *bp;
+       u16 chan_v, bpid;
        u64 cfg;
 
        pf = rvu_get_pf(pcifunc);
@@ -483,41 +697,89 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
        if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
                return 0;
 
-       pfvf = rvu_get_pfvf(rvu, pcifunc);
-       blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+       if (is_sdp_pfvf(pcifunc))
+               type = NIX_INTF_TYPE_SDP;
 
+       if (cpt_link && !rvu->hw->cpt_links)
+               return 0;
+
+       pfvf = rvu_get_pfvf(rvu, pcifunc);
+       err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+       if (err)
+               return err;
+
+       bp = &nix_hw->bp;
        chan_base = pfvf->rx_chan_base + req->chan_base;
+
+       if (cpt_link) {
+               type = NIX_INTF_TYPE_CPT;
+               cfg = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
+               /* MODE=0 or MODE=1 => CPT looks only channels starting from 
cpt chan base */
+               cfg = (cfg >> 20) & 0x3;
+               if (cfg != 2)
+                       chan_base = rvu->hw->cpt_chan_base;
+       }
+
        for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
-               cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
-               rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+               /* CPT channel for a given link channel is always
+                * assumed to be BIT(11) set in link channel.
+                */
+               if (cpt_link)
+                       chan_v = chan | BIT(11);
+               else
+                       chan_v = chan;
+
+               cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
+               rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
                            cfg & ~BIT_ULL(16));
+
+               if (type == NIX_INTF_TYPE_LBK) {
+                       bpid = cfg & GENMASK(8, 0);
+                       mutex_lock(&rvu->rsrc_lock);
+                       rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base);
+                       for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+                               if (bp->fn_map[bpid] == pcifunc) {
+                                       bp->fn_map[bpid] = 0;
+                                       bp->ref_cnt[bpid] = 0;
+                               }
+                       }
+                       mutex_unlock(&rvu->rsrc_lock);
+               }
        }
        return 0;
 }
 
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+                                   struct nix_bp_cfg_req *req,
+                                   struct msg_rsp *rsp)
+{
+       return nix_bp_disable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu,
+                                       struct nix_bp_cfg_req *req,
+                                       struct msg_rsp *rsp)
+{
+       return nix_bp_disable(rvu, req, rsp, true);
+}
+
 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
                            int type, int chan_id)
 {
-       int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
-       u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
+       int bpid, blkaddr, sdp_chan_base, err;
        struct rvu_hwinfo *hw = rvu->hw;
        struct rvu_pfvf *pfvf;
+       struct nix_hw *nix_hw;
        u8 cgx_id, lmac_id;
-       u64 cfg;
-
-       blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
-       cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
-       lmac_chan_cnt = cfg & 0xFF;
-
-       cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
-       lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
-
-       cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
-       sdp_chan_cnt = cfg & 0xFFF;
-       sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
+       struct nix_bp *bp;
 
        pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
 
+       err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+       if (err)
+               return err;
+
+       bp = &nix_hw->bp;
        /* Backpressure IDs range division
         * CGX channles are mapped to (0 - 191) BPIDs
         * LBK channles are mapped to (192 - 255) BPIDs
@@ -530,38 +792,52 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct 
nix_bp_cfg_req *req,
         */
        switch (type) {
        case NIX_INTF_TYPE_CGX:
-               if ((req->chan_base + req->chan_cnt) > 15)
-                       return -EINVAL;
+               if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC)
+                       return NIX_AF_ERR_INVALID_BPID_REQ;
+
                rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
                /* Assign bpid based on cgx, lmac and chan id */
-               bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
-                       (lmac_id * lmac_chan_cnt) + req->chan_base;
+               bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) +
+                       (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base;
 
                if (req->bpid_per_chan)
                        bpid += chan_id;
-               if (bpid > cgx_bpid_cnt)
-                       return -EINVAL;
+               if (bpid > bp->cgx_bpid_cnt)
+                       return NIX_AF_ERR_INVALID_BPID;
+               break;
+       case NIX_INTF_TYPE_CPT:
+               bpid = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt;
                break;
-
        case NIX_INTF_TYPE_LBK:
-               if ((req->chan_base + req->chan_cnt) > 63)
-                       return -EINVAL;
-               bpid = cgx_bpid_cnt + req->chan_base;
-               if (req->bpid_per_chan)
-                       bpid += chan_id;
-               if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
-                       return -EINVAL;
+               /* Alloc bpid from the free pool */
+               mutex_lock(&rvu->rsrc_lock);
+               bpid = rvu_alloc_rsrc(&bp->bpids);
+               if (bpid < 0) {
+                       mutex_unlock(&rvu->rsrc_lock);
+                       return NIX_AF_ERR_INVALID_BPID;
+               }
+               bp->fn_map[bpid] = req->hdr.pcifunc;
+               bp->ref_cnt[bpid]++;
+               bpid += bp->free_pool_base;
+               mutex_unlock(&rvu->rsrc_lock);
                break;
        case NIX_INTF_TYPE_SDP:
-               if ((req->chan_base + req->chan_cnt) > 255)
-                       return -EINVAL;
+               if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt)
+                       return NIX_AF_ERR_INVALID_BPID_REQ;
+
+               /* Handle usecase of 2 SDP blocks */
+               if (!hw->cap.programmable_chans)
+                       sdp_chan_base = pfvf->rx_chan_base - 
NIX_CHAN_SDP_CH_START;
+               else
+                       sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base;
+
+               bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base;
 
-               bpid = sdp_bpid_cnt + req->chan_base;
                if (req->bpid_per_chan)
                        bpid += chan_id;
 
-               if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
-                       return -EINVAL;
+               if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt))
+                       return NIX_AF_ERR_INVALID_BPID;
                break;
        default:
                return -EINVAL;
@@ -569,15 +845,17 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct 
nix_bp_cfg_req *req,
        return bpid;
 }
 
-int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
-                                  struct nix_bp_cfg_req *req,
-                                  struct nix_bp_cfg_rsp *rsp)
+static int nix_bp_enable(struct rvu *rvu,
+                        struct nix_bp_cfg_req *req,
+                        struct nix_bp_cfg_rsp *rsp,
+                        bool cpt_link)
 {
        int blkaddr, pf, type, chan_id = 0;
        u16 pcifunc = req->hdr.pcifunc;
+       s16 bpid, bpid_base = -1;
        struct rvu_pfvf *pfvf;
        u16 chan_base, chan;
-       s16 bpid, bpid_base;
+       u16 chan_v;
        u64 cfg;
 
        pf = rvu_get_pf(pcifunc);
@@ -590,25 +868,46 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
            type != NIX_INTF_TYPE_SDP)
                return 0;
 
+       if (cpt_link && !rvu->hw->cpt_links)
+               return 0;
+
        pfvf = rvu_get_pfvf(rvu, pcifunc);
        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 
-       bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
        chan_base = pfvf->rx_chan_base + req->chan_base;
-       bpid = bpid_base;
+
+       if (cpt_link) {
+               type = NIX_INTF_TYPE_CPT;
+               cfg = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
+               /* MODE=0 or MODE=1 => CPT looks only channels starting from 
cpt chan base */
+               cfg = (cfg >> 20) & 0x3;
+               if (cfg != 2)
+                       chan_base = rvu->hw->cpt_chan_base;
+       }
 
        for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
+               bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
                if (bpid < 0) {
                        dev_warn(rvu->dev, "Fail to enable backpressure\n");
                        return -EINVAL;
                }
+               if (bpid_base < 0)
+                       bpid_base = bpid;
 
-               cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+               /* CPT channel for a given link channel is always
+                * assumed to be BIT(11) set in link channel.
+                */
+
+               if (cpt_link)
+                       chan_v = chan | BIT(11);
+               else
+                       chan_v = chan;
+
+               cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
                cfg &= ~GENMASK_ULL(8, 0);
-               rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+               rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
                            cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
                chan_id++;
-               bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
        }
 
        for (chan = 0; chan < req->chan_cnt; chan++) {
@@ -623,6 +922,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
        return 0;
 }
 
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+                                  struct nix_bp_cfg_req *req,
+                                  struct nix_bp_cfg_rsp *rsp)
+{
+       return nix_bp_enable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu,
+                                      struct nix_bp_cfg_req *req,
+                                      struct nix_bp_cfg_rsp *rsp)
+{
+       return nix_bp_enable(rvu, req, rsp, true);
+}
+
 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
                                 u64 format, bool v4, u64 *fidx)
 {
@@ -823,12 +1136,15 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct 
rvu_block *block,
 {
        struct admin_queue *aq = block->aq;
        struct nix_aq_res_s *result;
-       int timeout = 1000;
-       u64 reg, head;
+       u64 reg, head, intr;
+       int timeout = 2000;
        int ret;
 
-       result = (struct nix_aq_res_s *)aq->res->base;
+       reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
+       if (reg & BIT_ULL(63))
+               nix_aq_reset(rvu, block);
 
+       result = (struct nix_aq_res_s *)aq->res->base;
        /* Get current head pointer where to append this instruction */
        reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
        head = (reg >> 4) & AQ_PTR_MASK;
@@ -842,18 +1158,28 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct 
rvu_block *block,
        /* Ring the doorbell and wait for result */
        rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
        while (result->compcode == NIX_AQ_COMP_NOTDONE) {
+               intr = rvu_read64(rvu, block->addr, NIX_AF_ERR_INT);
                cpu_relax();
                udelay(1);
                timeout--;
-               if (!timeout)
+               if (!timeout) {
+                       dev_err_ratelimited(rvu->dev,
+                                           "%s wait timeout intr=0x%llx 
status=0x%llx compcode:%d\n",
+                                           __func__, intr,
+                                          rvu_read64(rvu, block->addr, 
NIX_AF_AQ_STATUS),
+                                           result->compcode);
                        return -EBUSY;
+               }
        }
 
        if (result->compcode != NIX_AQ_COMP_GOOD) {
                /* TODO: Replace this with some error code */
+               dev_err(rvu->dev, "AQ failed with error:%d\n", 
result->compcode);
                if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
                    result->compcode == NIX_AQ_COMP_LOCKERR ||
                    result->compcode == NIX_AQ_COMP_CTX_POISON) {
+                       dev_err(rvu->dev, "AQ failed due to cache line 
error:%d\n",
+                               result->compcode);
                        ret = rvu_ndc_fix_locked_cacheline(rvu, 
BLKADDR_NDC_NIX0_RX);
                        ret |= rvu_ndc_fix_locked_cacheline(rvu, 
BLKADDR_NDC_NIX0_TX);
                        ret |= rvu_ndc_fix_locked_cacheline(rvu, 
BLKADDR_NDC_NIX1_RX);
@@ -1248,7 +1574,9 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct 
hwctx_disable_req *req)
                aq_req.cq.ena = 0;
                aq_req.cq_mask.ena = 1;
                aq_req.cq.bp_ena = 0;
+               aq_req.cq.lbp_ena = 0;
                aq_req.cq_mask.bp_ena = 1;
+               aq_req.cq_mask.lbp_ena = 1;
                q_cnt = pfvf->cq_ctx->qsize;
                bmap = pfvf->cq_bmap;
        }
@@ -1330,6 +1658,8 @@ int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
        return rvu_nix_aq_enq_inst(rvu, req, rsp);
 }
 #endif
+EXPORT_SYMBOL(rvu_mbox_handler_nix_aq_enq);
+
 /* CN10K mbox handler */
 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
                                      struct nix_cn10k_aq_enq_req *req,
@@ -1338,6 +1668,7 @@ int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
        return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
                                  (struct nix_aq_enq_rsp *)rsp);
 }
+EXPORT_SYMBOL(rvu_mbox_handler_nix_cn10k_aq_enq);
 
 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
                                       struct hwctx_disable_req *req,
@@ -1351,10 +1682,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
                                  struct nix_lf_alloc_rsp *rsp)
 {
        int nixlf, qints, hwctx_size, intf, err, rc = 0;
+       struct rvu_pfvf *pfvf, *parent_pf;
        struct rvu_hwinfo *hw = rvu->hw;
        u16 pcifunc = req->hdr.pcifunc;
        struct rvu_block *block;
-       struct rvu_pfvf *pfvf;
        u64 cfg, ctx_cfg;
        int blkaddr;
 
@@ -1364,6 +1695,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
        if (req->way_mask)
                req->way_mask &= 0xFFFF;
 
+       parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
        pfvf = rvu_get_pfvf(rvu, pcifunc);
        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
        if (!pfvf->nixlf || blkaddr < 0)
@@ -1522,8 +1854,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
        rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
 
        /* Configure pkind for TX parse config */
-       cfg = NPC_TX_DEF_PKIND;
-       rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
+       if (rvu_cgx_is_pkind_config_permitted(rvu, pcifunc)) {
+               cfg = NPC_TX_DEF_PKIND;
+               rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
+       }
 
        intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
        if (is_sdp_pfvf(pcifunc))
@@ -1541,6 +1875,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
        rvu_write64(rvu, blkaddr,
                    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
                    VTAGSIZE_T4 | VTAG_STRIP);
+       /* Configure RX VTAG Type 6 (strip) for fdsa */
+       rvu_write64(rvu, blkaddr,
+                   NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE6),
+                   VTAGSIZE_T4 | VTAG_STRIP | VTAG_CAPTURE);
 
        goto exit;
 
@@ -1601,6 +1939,9 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct 
nix_lf_free_req *req,
        else
                rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
 
+       /* Reset SPI to SA index table */
+       rvu_nix_free_spi_to_sa_table(rvu, pcifunc);
+
        /* Free any tx vtag def entries used by this NIX LF */
        if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
                nix_free_tx_vtag_entries(rvu, pcifunc);
@@ -1746,6 +2087,42 @@ handle_txschq_shaper_update(struct rvu *rvu, int 
blkaddr, int nixlf,
        return true;
 }
 
+static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
+                                 int lvl, int schq)
+{
+       u64 tlx_parent = 0, tlx_schedule = 0;
+
+       switch (lvl) {
+       case NIX_TXSCH_LVL_TL2:
+               tlx_parent   = NIX_AF_TL2X_PARENT(schq);
+               tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
+               break;
+       case NIX_TXSCH_LVL_TL3:
+               tlx_parent   = NIX_AF_TL3X_PARENT(schq);
+               tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
+               break;
+       case NIX_TXSCH_LVL_TL4:
+               tlx_parent   = NIX_AF_TL4X_PARENT(schq);
+               tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
+               break;
+       case NIX_TXSCH_LVL_MDQ:
+               /* no need to reset SMQ_CFG as HW clears this CSR
+                * on SMQ flush
+                */
+               tlx_parent   = NIX_AF_MDQX_PARENT(schq);
+               tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
+               break;
+       default:
+               return;
+       }
+
+       if (tlx_parent)
+               rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
+
+       if (tlx_schedule)
+               rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
+}
+
 /* Disable shaping of pkts by a scheduler queue
  * at a given scheduler level.
  */
@@ -2035,6 +2412,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
 {
        struct rvu_hwinfo *hw = rvu->hw;
        u16 pcifunc = req->hdr.pcifunc;
+       struct rvu_pfvf *parent_pf;
        int link, blkaddr, rc = 0;
        int lvl, idx, start, end;
        struct nix_txsch *txsch;
@@ -2051,6 +2429,8 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
        if (!nix_hw)
                return NIX_AF_ERR_INVALID_NIXBLK;
 
+       parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+
        mutex_lock(&rvu->rsrc_lock);
 
        /* Check if request is valid as per HW capabilities
@@ -2095,6 +2475,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
                                pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
                        nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
                        nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+                       nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
                }
 
                for (idx = 0; idx < req->schq[lvl]; idx++) {
@@ -2104,11 +2485,12 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
                                pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
                        nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
                        nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+                       nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
                }
        }
 
        rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
-       rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
+       rsp->aggr_lvl_rr_prio = parent_pf->tl1_rr_prio;
        rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
                                       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
                                       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
@@ -2120,13 +2502,156 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
        return rc;
 }
 
+static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
+                                  struct nix_smq_flush_ctx *smq_flush_ctx)
+{
+       struct nix_smq_tree_ctx *smq_tree_ctx;
+       u64 parent_off, regval;
+       u16 schq;
+       int lvl;
+
+       smq_flush_ctx->smq = smq;
+
+       schq = smq;
+       for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
+               smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
+               if (lvl == NIX_TXSCH_LVL_TL1) {
+                       smq_flush_ctx->tl1_schq = schq;
+                       smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
+                       smq_tree_ctx->pir_off = 0;
+                       smq_tree_ctx->pir_val = 0;
+                       parent_off = 0;
+               } else if (lvl == NIX_TXSCH_LVL_TL2) {
+                       smq_flush_ctx->tl2_schq = schq;
+                       smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
+                       smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
+                       parent_off = NIX_AF_TL2X_PARENT(schq);
+               } else if (lvl == NIX_TXSCH_LVL_TL3) {
+                       smq_flush_ctx->tl3_schq = schq;
+                       smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
+                       smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
+                       parent_off = NIX_AF_TL3X_PARENT(schq);
+               } else if (lvl == NIX_TXSCH_LVL_TL4) {
+                       smq_flush_ctx->tl4_schq = schq;
+                       smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
+                       smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
+                       parent_off = NIX_AF_TL4X_PARENT(schq);
+               } else if (lvl == NIX_TXSCH_LVL_MDQ) {
+                       smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
+                       smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
+                       parent_off = NIX_AF_MDQX_PARENT(schq);
+               }
+               /* save cir/pir register values */
+               smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, 
smq_tree_ctx->cir_off);
+               if (smq_tree_ctx->pir_off)
+                       smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, 
smq_tree_ctx->pir_off);
+
+               /* get parent txsch node */
+               if (parent_off) {
+                       regval = rvu_read64(rvu, blkaddr, parent_off);
+                       schq = (regval >> 16) & 0x1FF;
+               }
+       }
+}
+
+static void nix_dump_smq_status(struct rvu *rvu, int blkaddr, struct 
nix_smq_flush_ctx *ctx)
+{
+       dev_info(rvu->dev, "smq:%d tl1_schq:%d tl2:%d tl3:%d tl4:%d\n", 
ctx->smq, ctx->tl1_schq,
+                ctx->tl2_schq, ctx->tl3_schq, ctx->tl4_schq);
+
+       dev_info(rvu->dev, "NIX_AF_SMQX_CFG:0x%llx\n",
+                rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(ctx->smq)));
+       dev_info(rvu->dev, "NIX_AF_SMQX_STATUS:0x%llx\n",
+                rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(ctx->smq)));
+       dev_info(rvu->dev, "NIX_AF_MDQX_MD_COUNT:0x%llx\n",
+                rvu_read64(rvu, blkaddr, NIX_AF_MDQX_MD_COUNT));
+       dev_info(rvu->dev, "NIX_AF_MDQX_IN_MD_COUNT:0x%llx\n",
+                rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(ctx->smq)));
+       dev_info(rvu->dev, "NIX_AF_MDQX_OUT_MD_COUNT:0x%llx\n",
+                rvu_read64(rvu, blkaddr, NIX_AF_MDQX_OUT_MD_COUNT(ctx->smq)));
+       dev_info(rvu->dev, "NIX_AF_TL1X_SW_XOFF:0x%llx\n",
+                rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(ctx->tl1_schq)));
+       dev_info(rvu->dev, "NIX_AF_TL2X_SW_XOFF=0x%llx\n",
+                rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(ctx->tl2_schq)));
+}
+
+static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
+                                     struct nix_smq_flush_ctx *smq_flush_ctx, 
bool enable)
+{
+       struct nix_txsch *txsch;
+       struct nix_hw *nix_hw;
+       u64 regoff;
+       int tl2;
+
+       nix_hw = get_nix_hw(rvu->hw, blkaddr);
+       if (!nix_hw)
+               return;
+
+       /* loop through all TL2s with matching PF_FUNC */
+       txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+       for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
+               /* skip the smq(flush) TL2 */
+               if (tl2 == smq_flush_ctx->tl2_schq)
+                       continue;
+               /* skip unused TL2s */
+               if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
+                       continue;
+               /* skip if PF_FUNC doesn't match */
+               if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & 
~RVU_PFVF_FUNC_MASK) !=
+                   (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
+                                   ~RVU_PFVF_FUNC_MASK)))
+                       continue;
+               /* enable/disable XOFF */
+               regoff = NIX_AF_TL2X_SW_XOFF(tl2);
+               if (enable)
+                       rvu_write64(rvu, blkaddr, regoff, 0x1);
+               else
+                       rvu_write64(rvu, blkaddr, regoff, 0x0);
+       }
+}
+
+static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
+                                     struct nix_smq_flush_ctx *smq_flush_ctx, 
bool enable)
+{
+       u64 cir_off, pir_off, cir_val, pir_val;
+       struct nix_smq_tree_ctx *smq_tree_ctx;
+       int lvl;
+
+       for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
+               smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
+               cir_off = smq_tree_ctx->cir_off;
+               cir_val = smq_tree_ctx->cir_val;
+               pir_off = smq_tree_ctx->pir_off;
+               pir_val = smq_tree_ctx->pir_val;
+
+               if (enable) {
+                       rvu_write64(rvu, blkaddr, cir_off, cir_val);
+                       if (lvl != NIX_TXSCH_LVL_TL1)
+                               rvu_write64(rvu, blkaddr, pir_off, pir_val);
+               } else {
+                       rvu_write64(rvu, blkaddr, cir_off, 0x0);
+                       if (lvl != NIX_TXSCH_LVL_TL1)
+                               rvu_write64(rvu, blkaddr, pir_off, 0x0);
+               }
+       }
+}
+
 static int nix_smq_flush(struct rvu *rvu, int blkaddr,
                         int smq, u16 pcifunc, int nixlf)
 {
+       struct nix_smq_flush_ctx *smq_flush_ctx;
        int pf = rvu_get_pf(pcifunc);
        u8 cgx_id = 0, lmac_id = 0;
        int err, restore_tx_en = 0;
        u64 cfg;
+       u8 link;
+
+       if (!is_rvu_otx2(rvu)) {
+               /* Skip SMQ flush if pkt count is zero */
+               cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
+               if (!cfg)
+                       return 0;
+       }
 
        /* enable cgx tx if disabled */
        if (is_pf_cgxmapped(rvu, pf)) {
@@ -2135,6 +2660,14 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
                                                   lmac_id, true);
        }
 
+       /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
+       smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
+       if (!smq_flush_ctx)
+               return -ENOMEM;
+       nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
+       nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
+       nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
+
        cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
        /* Do SMQ flush and set enqueue xoff */
        cfg |= BIT_ULL(50) | BIT_ULL(49);
@@ -2150,14 +2683,27 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
        /* Wait for flush to complete */
        err = rvu_poll_reg(rvu, blkaddr,
                           NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
-       if (err)
-               dev_err(rvu->dev,
-                       "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
+       if (err) {
+               dev_info(rvu->dev,
+                        "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
+                        nixlf, smq);
+
+               nix_dump_smq_status(rvu, blkaddr, smq_flush_ctx);
+               link = (cgx_id * rvu->hw->lmac_per_cgx) + lmac_id;
+               dev_info(rvu->dev, "NIX_AF_TX_LINKX_NORM_CREDIT:0x%llx\n",
+                        rvu_read64(rvu, blkaddr, 
NIX_AF_TX_LINKX_NORM_CREDIT(link)));
+       }
+
+       /* clear XOFF on TL2s */
+       nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
+       nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
+       kfree(smq_flush_ctx);
 
        rvu_cgx_enadis_rx_bp(rvu, pf, true);
        /* restore cgx tx state */
        if (restore_tx_en)
                rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+
        return err;
 }
 
@@ -2194,6 +2740,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
                                continue;
                        nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
                        nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
+                       nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
                }
        }
        nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
@@ -2232,15 +2779,14 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
                for (schq = 0; schq < txsch->schq.max; schq++) {
                        if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
                                continue;
+                       nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
                        rvu_free_rsrc(&txsch->schq, schq);
                        txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
                }
        }
        mutex_unlock(&rvu->rsrc_lock);
 
-       /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
-       rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
-       err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
+       err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC);
        if (err)
                dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
 
@@ -2291,6 +2837,9 @@ static int nix_txschq_free_one(struct rvu *rvu,
         */
        nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
 
+       nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+       nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
+
        /* Flush if it is a SMQ. Onus of disabling
         * TL2/3 queue links before SMQ flush is on user
         */
@@ -2403,7 +2952,9 @@ static bool is_txschq_shaping_valid(struct rvu_hwinfo 
*hw, int lvl, u64 reg)
 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
                                u16 pcifunc, int blkaddr)
 {
+       struct rvu_pfvf *parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
        u32 *pfvf_map;
+
        int schq;
 
        schq = nix_get_tx_link(rvu, pcifunc);
@@ -2412,7 +2963,7 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct 
nix_hw *nix_hw,
        if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
                return;
        rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
-                   (TXSCH_TL1_DFLT_RR_PRIO << 1));
+                   (parent_pf->tl1_rr_prio << 1));
 
        /* On OcteonTx2 the config was in bytes and newer silcons
         * it's changed to weight.
@@ -2455,17 +3006,19 @@ static int nix_txschq_cfg_read(struct rvu *rvu, struct 
nix_hw *nix_hw,
        return 0;
 }
 
-static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
-                              u16 pcifunc, struct nix_txsch *txsch)
+void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
+                       struct nix_txsch *txsch, bool enable)
 {
        struct rvu_hwinfo *hw = rvu->hw;
        int lbk_link_start, lbk_links;
        u8 pf = rvu_get_pf(pcifunc);
        int schq;
+       u64 cfg;
 
        if (!is_pf_cgxmapped(rvu, pf))
                return;
 
+       cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
        lbk_link_start = hw->cgx_links;
 
        for (schq = 0; schq < txsch->schq.max; schq++) {
@@ -2479,8 +3032,7 @@ static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int 
blkaddr,
                        rvu_write64(rvu, blkaddr,
                                    NIX_AF_TL3_TL2X_LINKX_CFG(schq,
                                                              lbk_link_start +
-                                                             lbk_links),
-                                   BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
+                                                             lbk_links), cfg);
        }
 }
 
@@ -2603,8 +3155,8 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, 
int blkaddr,
            req->vtag_size > VTAGSIZE_T8)
                return -EINVAL;
 
-       /* RX VTAG Type 7 reserved for vf vlan */
-       if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+       /* RX VTAG Type 7,6 are reserved for vf vlan& FDSA tag strip */
+       if (req->rx.vtag_type >= NIX_AF_LFX_RX_VTAG_TYPE6)
                return NIX_AF_ERR_RX_VTAG_INUSE;
 
        if (req->rx.capture_vtag)
@@ -2810,7 +3362,8 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
 }
 
 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
-                            int mce, u8 op, u16 pcifunc, int next, bool eol)
+                            int mce, u8 op, u16 pcifunc, int next,
+                            int index, u8 mce_op, bool eol)
 {
        struct nix_aq_enq_req aq_req;
        int err;
@@ -2821,8 +3374,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct 
nix_hw *nix_hw,
        aq_req.qidx = mce;
 
        /* Use RSS with RSS index 0 */
-       aq_req.mce.op = 1;
-       aq_req.mce.index = 0;
+       aq_req.mce.op = mce_op;
+       aq_req.mce.index = index;
        aq_req.mce.eol = eol;
        aq_req.mce.pf_func = pcifunc;
        aq_req.mce.next = next;
@@ -2839,6 +3392,206 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct 
nix_hw *nix_hw,
        return 0;
 }
 
+static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list)
+{
+       struct hlist_node *tmp;
+       struct mce *mce;
+
+       /* Scan through the current list */
+       hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
+               hlist_del(&mce->node);
+               kfree(mce);
+       }
+
+       mce_list->count = 0;
+       mce_list->max = 0;
+}
+
+static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem)
+{
+       return elem->mce_start_index + elem->mcast_mce_list.count - 1;
+}
+
+static int nix_update_ingress_mce_list_hw(struct rvu *rvu,
+                                         struct nix_hw *nix_hw,
+                                         struct nix_mcast_grp_elem *elem)
+{
+       int idx, last_idx, next_idx, err;
+       struct nix_mce_list *mce_list;
+       struct mce *mce, *prev_mce;
+
+       mce_list = &elem->mcast_mce_list;
+       idx = elem->mce_start_index;
+       last_idx = nix_get_last_mce_list_index(elem);
+       hlist_for_each_entry(mce, &mce_list->head, node) {
+               if (idx > last_idx)
+                       break;
+
+               if (!mce->is_active) {
+                       if (idx == elem->mce_start_index) {
+                               idx++;
+                               prev_mce = mce;
+                               elem->mce_start_index = idx;
+                               continue;
+                       } else if (idx == last_idx) {
+                               err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, 
NIX_AQ_INSTOP_WRITE,
+                                                       prev_mce->pcifunc, 
next_idx,
+                                                       prev_mce->rq_rss_index,
+                                                       prev_mce->dest_type,
+                                                       false);
+                               if (err)
+                                       return err;
+
+                               break;
+                       }
+               }
+
+               next_idx = idx + 1;
+               /* EOL should be set in last MCE */
+               err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
+                                       mce->pcifunc, next_idx,
+                                       mce->rq_rss_index, mce->dest_type,
+                                       (next_idx > last_idx) ? true : false);
+               if (err)
+                       return err;
+
+               idx++;
+               prev_mce = mce;
+       }
+
+       return 0;
+}
+
+static void nix_update_egress_mce_list_hw(struct rvu *rvu,
+                                         struct nix_hw *nix_hw,
+                                         struct nix_mcast_grp_elem *elem)
+{
+       struct nix_mce_list *mce_list;
+       int idx, last_idx, next_idx;
+       struct mce *mce, *prev_mce;
+       u64 regval;
+       u8 eol;
+
+       mce_list = &elem->mcast_mce_list;
+       idx = elem->mce_start_index;
+       last_idx = nix_get_last_mce_list_index(elem);
+       hlist_for_each_entry(mce, &mce_list->head, node) {
+               if (idx > last_idx)
+                       break;
+
+               if (!mce->is_active) {
+                       if (idx == elem->mce_start_index) {
+                               idx++;
+                               prev_mce = mce;
+                               elem->mce_start_index = idx;
+                               continue;
+                       } else if (idx == last_idx) {
+                               regval = (next_idx << 16) | (1 << 12) | 
prev_mce->channel;
+                               rvu_write64(rvu, nix_hw->blkaddr,
+                                           NIX_AF_TX_MCASTX(idx - 1),
+                                           regval);
+                               break;
+                       }
+               }
+
+               eol = 0;
+               next_idx = idx + 1;
+               /* EOL should be set in last MCE */
+               if (next_idx > last_idx)
+                       eol = 1;
+
+               regval = (next_idx << 16) | (eol << 12) | mce->channel;
+               rvu_write64(rvu, nix_hw->blkaddr,
+                           NIX_AF_TX_MCASTX(idx),
+                           regval);
+               idx++;
+               prev_mce = mce;
+       }
+}
+
+static int nix_del_mce_list_entry(struct rvu *rvu,
+                                 struct nix_hw *nix_hw,
+                                 struct nix_mcast_grp_elem *elem,
+                                 struct nix_mcast_grp_update_req *req)
+{
+       u32 num_entry = req->num_mce_entry;
+       struct nix_mce_list *mce_list;
+       struct mce *mce;
+       bool is_found;
+       int i;
+
+       mce_list = &elem->mcast_mce_list;
+       for (i = 0; i < num_entry; i++) {
+               is_found = false;
+               hlist_for_each_entry(mce, &mce_list->head, node) {
+                       /* If already exists, then delete */
+                       if (mce->pcifunc == req->pcifunc[i]) {
+                               hlist_del(&mce->node);
+                               kfree(mce);
+                               mce_list->count--;
+                               is_found = true;
+                               break;
+                       }
+               }
+
+               if (!is_found)
+                       return NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
+       }
+
+       mce_list->max = mce_list->count;
+       /* Dump the updated list to HW */
+       if (elem->dir == NIX_MCAST_INGRESS)
+               return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
+
+       nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
+       return 0;
+}
+
+static int nix_add_mce_list_entry(struct rvu *rvu,
+                                 struct nix_hw *nix_hw,
+                                 struct nix_mcast_grp_elem *elem,
+                                 struct nix_mcast_grp_update_req *req)
+{
+       u32 num_entry = req->num_mce_entry;
+       struct nix_mce_list *mce_list;
+       struct hlist_node *tmp;
+       struct mce *mce;
+       int i;
+
+       mce_list = &elem->mcast_mce_list;
+       for (i = 0; i < num_entry; i++) {
+               mce = kzalloc(sizeof(*mce), GFP_KERNEL);
+               if (!mce)
+                       goto free_mce;
+
+               mce->pcifunc = req->pcifunc[i];
+               mce->channel = req->channel[i];
+               mce->rq_rss_index = req->rq_rss_index[i];
+               mce->dest_type = req->dest_type[i];
+               mce->is_active = 1;
+               hlist_add_head(&mce->node, &mce_list->head);
+               mce_list->count++;
+       }
+
+       mce_list->max += num_entry;
+
+       /* Dump the updated list to HW */
+       if (elem->dir == NIX_MCAST_INGRESS)
+               return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
+
+       nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
+       return 0;
+
+free_mce:
+       hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
+               hlist_del(&mce->node);
+               kfree(mce);
+               mce_list->count--;
+       }
+
+       return -ENOMEM;
+}
+
 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
                                     u16 pcifunc, bool add)
 {
@@ -2934,6 +3687,7 @@ int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
                /* EOL should be set in last MCE */
                err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
                                        mce->pcifunc, next_idx,
+                                       0, 1,
                                        (next_idx > last_idx) ? true : false);
                if (err)
                        goto end;
@@ -3014,6 +3768,16 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 
pcifunc,
        return err;
 }
 
+static void nix_setup_mcast_grp(struct nix_hw *nix_hw)
+{
+       struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp;
+
+       INIT_LIST_HEAD(&mcast_grp->mcast_grp_head);
+       mutex_init(&mcast_grp->mcast_grp_lock);
+       mcast_grp->next_grp_index = 1;
+       mcast_grp->count = 0;
+}
+
 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
 {
        struct nix_mcast *mcast = &nix_hw->mcast;
@@ -3038,15 +3802,15 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct 
nix_hw *nix_hw)
                        continue;
 
                /* save start idx of broadcast mce list */
-               pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+               pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, 
NIX_MCAST_INGRESS);
                nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
 
                /* save start idx of multicast mce list */
-               pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+               pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, 
NIX_MCAST_INGRESS);
                nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
 
                /* save the start idx of promisc mce list */
-               pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+               pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, 
NIX_MCAST_INGRESS);
                nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
 
                for (idx = 0; idx < (numvfs + 1); idx++) {
@@ -3061,7 +3825,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct 
nix_hw *nix_hw)
                        err = nix_blk_setup_mce(rvu, nix_hw,
                                                pfvf->bcast_mce_idx + idx,
                                                NIX_AQ_INSTOP_INIT,
-                                               pcifunc, 0, true);
+                                               pcifunc, 0, 0, 1, true);
                        if (err)
                                return err;
 
@@ -3069,7 +3833,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct 
nix_hw *nix_hw)
                        err = nix_blk_setup_mce(rvu, nix_hw,
                                                pfvf->mcast_mce_idx + idx,
                                                NIX_AQ_INSTOP_INIT,
-                                               pcifunc, 0, true);
+                                               pcifunc, 0, 0, 1, true);
                        if (err)
                                return err;
 
@@ -3077,7 +3841,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct 
nix_hw *nix_hw)
                        err = nix_blk_setup_mce(rvu, nix_hw,
                                                pfvf->promisc_mce_idx + idx,
                                                NIX_AQ_INSTOP_INIT,
-                                               pcifunc, 0, true);
+                                               pcifunc, 0, 0, 1, true);
                        if (err)
                                return err;
                }
@@ -3092,11 +3856,25 @@ static int nix_setup_mcast(struct rvu *rvu, struct 
nix_hw *nix_hw, int blkaddr)
        int err, size;
 
        size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
-       size = (1ULL << size);
+       size = BIT_ULL(size);
+
+       /* Allocate bitmap for rx mce entries */
+       mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE;
+       err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
+       if (err)
+               return -ENOMEM;
+
+       /* Allocate bitmap for tx mce entries */
+       mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX;
+       err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
+       if (err) {
+               rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
+               return -ENOMEM;
+       }
 
        /* Alloc memory for multicast/mirror replication entries */
        err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
-                        (256UL << MC_TBL_SIZE), size);
+                        mcast->mce_counter[NIX_MCAST_INGRESS].max, size);
        if (err)
                return -ENOMEM;
 
@@ -3126,6 +3904,8 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw 
*nix_hw, int blkaddr)
 
        mutex_init(&mcast->mce_lock);
 
+       nix_setup_mcast_grp(nix_hw);
+
        return nix_setup_mce_tables(rvu, nix_hw);
 }
 
@@ -3201,10 +3981,16 @@ static int nix_setup_txschq(struct rvu *rvu, struct 
nix_hw *nix_hw, int blkaddr)
        }
 
        /* Setup a default value of 8192 as DWRR MTU */
-       if (rvu->hw->cap.nix_common_dwrr_mtu) {
-               rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU,
+       if (rvu->hw->cap.nix_common_dwrr_mtu ||
+           rvu->hw->cap.nix_multiple_dwrr_mtu) {
+               rvu_write64(rvu, blkaddr,
+                           nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM),
                            convert_bytes_to_dwrr_mtu(8192));
-               rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU,
+               rvu_write64(rvu, blkaddr,
+                           nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK),
+                           convert_bytes_to_dwrr_mtu(8192));
+               rvu_write64(rvu, blkaddr,
+                           nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP),
                            convert_bytes_to_dwrr_mtu(8192));
        }
 
@@ -3273,8 +4059,12 @@ static void rvu_get_lbk_link_max_frs(struct rvu *rvu,  
u16 *max_mtu)
 
 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
 {
-       /* RPM supports FIFO len 128 KB */
-       if (rvu_cgx_get_fifolen(rvu) == 0x20000)
+       int fifo_size = rvu_cgx_get_fifolen(rvu);
+
+       /* RPM supports FIFO len 128 KB and RPM2 supports double the
+        * FIFO len to accommodate 8 LMACS
+        */
+       if (fifo_size == 0x20000 || fifo_size == 0x40000)
                *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
        else
                *max_mtu = NIC_HW_MAX_FRS;
@@ -3298,19 +4088,28 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, 
struct msg_req *req,
 
        rsp->min_mtu = NIC_HW_MIN_FRS;
 
-       if (!rvu->hw->cap.nix_common_dwrr_mtu) {
+       if (!rvu->hw->cap.nix_common_dwrr_mtu &&
+           !rvu->hw->cap.nix_multiple_dwrr_mtu) {
                /* Return '1' on OTx2 */
                rsp->rpm_dwrr_mtu = 1;
                rsp->sdp_dwrr_mtu = 1;
+               rsp->lbk_dwrr_mtu = 1;
                return 0;
        }
 
-       dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
+       /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
+       dwrr_mtu = rvu_read64(rvu, blkaddr,
+                             nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
        rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
 
-       dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU);
+       dwrr_mtu = rvu_read64(rvu, blkaddr,
+                             nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP));
        rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
 
+       dwrr_mtu = rvu_read64(rvu, blkaddr,
+                             nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK));
+       rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
+
        return 0;
 }
 
@@ -3940,14 +4739,13 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, 
struct nix_rx_mode *req,
        }
 
        /* install/uninstall promisc entry */
-       if (promisc) {
+       if (promisc)
                rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
                                              pfvf->rx_chan_base,
                                              pfvf->rx_chan_cnt);
-       } else {
+       else
                if (!nix_rx_multicast)
                        rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, 
false);
-       }
 
        return 0;
 }
@@ -4026,7 +4824,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, 
struct nix_frs_cfg *req,
        if (!req->sdp_link && req->maxlen > max_mtu)
                return NIX_AF_ERR_FRS_INVALID;
 
-       if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
+       if (req->update_minlen && req->minlen < (req->sdp_link ? SDP_HW_MIN_FRS 
: NIC_HW_MIN_FRS))
                return NIX_AF_ERR_FRS_INVALID;
 
        /* Check if config is for SDP link */
@@ -4085,6 +4883,11 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, 
struct nix_rx_cfg *req,
        else
                cfg &= ~BIT_ULL(40);
 
+       if (req->len_verify & NIX_RX_DROP_RE)
+               cfg |= BIT_ULL(32);
+       else
+               cfg &= ~BIT_ULL(32);
+
        if (req->csum_verify & BIT(0))
                cfg |= BIT_ULL(37);
        else
@@ -4114,6 +4917,9 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
        rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
        rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
 
+       /* Set SDP link credit */
+       rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT);
+
        /* Set default min/max packet lengths allowed on NIX Rx links.
         *
         * With HW reset minlen value of 60byte, HW will treat ARP pkts
@@ -4125,14 +4931,30 @@ static void nix_link_config(struct rvu *rvu, int 
blkaddr,
                                ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
        }
 
-       for (link = hw->cgx_links; link < hw->lbk_links; link++) {
+       for (link = hw->cgx_links; link < hw->cgx_links + hw->lbk_links; 
link++) {
                rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
                            ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
        }
        if (hw->sdp_links) {
                link = hw->cgx_links + hw->lbk_links;
                rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
-                           SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+                           SDP_HW_MAX_FRS << 16 | SDP_HW_MIN_FRS);
+       }
+
+       /* Set CPT link i.e second pass config */
+       if (hw->cpt_links) {
+               link = hw->cgx_links + hw->lbk_links + hw->sdp_links;
+               /* Set default min/max packet lengths allowed to LBK as that
+                * LBK link's range is max.
+                */
+               rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+                           ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
+       }
+
+       /* Get MCS external bypass status for CN10K-B */
+       if (mcs_get_blkcnt() == 1) {
+               /* Adjust for 2 credits when external bypass is disabled */
+               nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
        }
 
        /* Set credits for Tx links assuming max packet length allowed.
@@ -4147,7 +4969,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
 
                /* Get LMAC id's from bitmap */
                lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
-               for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
+               for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
                        lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, 
iter);
                        if (!lmac_fifo_len) {
                                dev_err(rvu->dev,
@@ -4158,6 +4980,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
                        tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
                        /* Enable credits and set credit pkt count to max 
allowed */
                        cfg =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+                       cfg |= (nix_hw->cc_mcs_cnt << 32);
 
                        link = iter + slink;
                        nix_hw->tx_credits[link] = tx_credits;
@@ -4282,8 +5105,11 @@ static void rvu_nix_setup_capabilities(struct rvu *rvu, 
int blkaddr)
         * Check if HW uses a common MTU for all DWRR quantum configs.
         * On OcteonTx2 this register field is '0'.
         */
-       if (((hw_const >> 56) & 0x10) == 0x10)
+       if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61)))
                hw->cap.nix_common_dwrr_mtu = true;
+
+       if (hw_const & BIT_ULL(61))
+               hw->cap.nix_multiple_dwrr_mtu = true;
 }
 
 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
@@ -4334,8 +5160,17 @@ static int rvu_nix_block_init(struct rvu *rvu, struct 
nix_hw *nix_hw)
        /* Restore CINT timer delay to HW reset values */
        rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
 
+       cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
+
        /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
-       rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, 0x1ULL);
+       cfg |= 1ULL;
+       if (!is_rvu_otx2(rvu))
+               cfg |= NIX_PTP_1STEP_EN;
+
+       rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
+
+       if (!is_rvu_otx2(rvu))
+               rvu_nix_block_cn10k_init(rvu, nix_hw);
 
        if (is_block_implemented(hw, blkaddr)) {
                err = nix_setup_txschq(rvu, nix_hw, blkaddr);
@@ -4358,6 +5193,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct 
nix_hw *nix_hw)
                if (err)
                        return err;
 
+               err = nix_setup_bpids(rvu, nix_hw, blkaddr);
+               if (err)
+                       return err;
+
                /* Configure segmentation offload formats */
                nix_setup_lso(rvu, nix_hw, blkaddr);
 
@@ -4547,6 +5386,74 @@ void rvu_nix_freemem(struct rvu *rvu)
        }
 }
 
+static void nix_mcast_update_action(struct rvu *rvu,
+                                   struct nix_mcast_grp_elem *elem)
+{
+       struct npc_mcam *mcam = &rvu->hw->mcam;
+       struct nix_rx_action rx_action = { 0 };
+       struct nix_tx_action tx_action = { 0 };
+       int npc_blkaddr;
+
+       npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+       if (elem->dir == NIX_MCAST_INGRESS) {
+               *(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam,
+                                                        npc_blkaddr,
+                                                        elem->mcam_index);
+               rx_action.index = elem->mce_start_index;
+               npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
+                                   *(u64 *)&rx_action);
+       } else {
+               *(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam,
+                                                        npc_blkaddr,
+                                                        elem->mcam_index);
+               tx_action.index = elem->mce_start_index;
+               npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
+                                   *(u64 *)&tx_action);
+       }
+}
+
+static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 
is_active)
+{
+       struct nix_mcast_grp_elem *elem;
+       struct nix_mcast_grp *mcast_grp;
+       struct nix_hw *nix_hw;
+       int blkaddr;
+
+       blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+       nix_hw = get_nix_hw(rvu->hw, blkaddr);
+       if (!nix_hw)
+               return;
+
+       mcast_grp = &nix_hw->mcast_grp;
+
+       mutex_lock(&mcast_grp->mcast_grp_lock);
+       list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) {
+               struct nix_mce_list *mce_list;
+               struct mce *mce;
+
+               /* Iterate the group elements and disable the element which
+                * received the disable request.
+                */
+               mce_list = &elem->mcast_mce_list;
+               hlist_for_each_entry(mce, &mce_list->head, node) {
+                       if (mce->pcifunc == pcifunc) {
+                               mce->is_active = is_active;
+                               break;
+                       }
+               }
+
+               /* Dump the updated list to HW */
+               if (elem->dir == NIX_MCAST_INGRESS)
+                       nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
+               else
+                       nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
+
+               /* Update the multicast index in NPC rule */
+               nix_mcast_update_action(rvu, elem);
+       }
+       mutex_unlock(&mcast_grp->mcast_grp_lock);
+}
+
 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
                                     struct msg_rsp *rsp)
 {
@@ -4558,6 +5465,9 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, 
struct msg_req *req,
        if (err)
                return err;
 
+       /* Enable the interface if it is in any multicast list */
+       nix_mcast_update_mce_entry(rvu, pcifunc, 1);
+
        rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
 
        npc_mcam_enable_flows(rvu, pcifunc);
@@ -4582,6 +5492,9 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, 
struct msg_req *req,
                return err;
 
        rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+       /* Disable the interface if it is in any multicast list */
+       nix_mcast_update_mce_entry(rvu, pcifunc, 0);
+
 
        pfvf = rvu_get_pfvf(rvu, pcifunc);
        clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
@@ -4595,6 +5508,8 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, 
struct msg_req *req,
        return 0;
 }
 
+#define RX_SA_BASE  GENMASK_ULL(52, 7)
+
 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
 {
        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -4602,6 +5517,7 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, 
int blkaddr, int nixlf)
        int pf = rvu_get_pf(pcifunc);
        struct mac_ops *mac_ops;
        u8 cgx_id, lmac_id;
+       u64 sa_base;
        void *cgxd;
        int err;
 
@@ -4614,6 +5530,9 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, 
int blkaddr, int nixlf)
        nix_rx_sync(rvu, blkaddr);
        nix_txschq_free(rvu, pcifunc);
 
+       /* Reset SPI to SA index table */
+       rvu_nix_free_spi_to_sa_table(rvu, pcifunc);
+
        clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
 
        rvu_cgx_start_stop_io(rvu, pcifunc, false);
@@ -4655,6 +5574,12 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, 
int blkaddr, int nixlf)
                pfvf->hw_rx_tstamp_en = false;
        }
 
+       /* reset priority flow control config */
+       rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
+
+       /* reset 802.3x flow control config */
+       rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
+
        nix_ctx_free(rvu, pfvf);
 
        sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
@@ -5294,6 +6219,7 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 
pcifunc,
        aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
        aq_req.op = NIX_AQ_INSTOP_WRITE;
        memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
+       memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
        /* Clear higher layer enable bit in the mid profile, just in case */
        aq_req.prof.hl_en = 0;
        aq_req.prof_mask.hl_en = 1;

---
base-commit: 2e4869fca0749ba823c237e550ea2c38d01493eb
change-id: 20240820-octeon-sdkv5-4-1813417cc2ab

Best regards,
-- 
Kevin Hao <haoke...@gmail.com>

-=-=-=-=-=-=-=-=-=-=-=-
Links: You receive all messages sent to this group.
View/Reply Online (#14289): 
https://lists.yoctoproject.org/g/linux-yocto/message/14289
Mute This Topic: https://lists.yoctoproject.org/mt/107993478/21656
Group Owner: linux-yocto+ow...@lists.yoctoproject.org
Unsubscribe: https://lists.yoctoproject.org/g/linux-yocto/unsub 
[arch...@mail-archive.com]
-=-=-=-=-=-=-=-=-=-=-=-

Reply via email to