Newer chips require the PTU_PTE_VALID bit to be set for every page
table entry for context memory and rings.  Additional bits are also
required for page table entries for all rings.  Add a flags field to
bnxt_ring_mem_info struct to specify these additional bits to be used
when setting up the pages tables as needed.

Signed-off-by: Michael Chan <michael.c...@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 17 +++++++++++++++--
 drivers/net/ethernet/broadcom/bnxt/bnxt.h |  8 ++++++++
 2 files changed, 23 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 602dc09..f0da558 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2230,8 +2230,11 @@ static void bnxt_free_ring(struct bnxt *bp, struct 
bnxt_ring_mem_info *rmem)
 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
 {
        struct pci_dev *pdev = bp->pdev;
+       u64 valid_bit = 0;
        int i;
 
+       if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
+               valid_bit = PTU_PTE_VALID;
        if (rmem->nr_pages > 1) {
                rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
                                                  rmem->nr_pages * 8,
@@ -2242,6 +2245,8 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct 
bnxt_ring_mem_info *rmem)
        }
 
        for (i = 0; i < rmem->nr_pages; i++) {
+               u64 extra_bits = valid_bit;
+
                rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
                                                     rmem->page_size,
                                                     &rmem->dma_arr[i],
@@ -2249,8 +2254,16 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct 
bnxt_ring_mem_info *rmem)
                if (!rmem->pg_arr[i])
                        return -ENOMEM;
 
-               if (rmem->nr_pages > 1)
-                       rmem->pg_tbl[i] = cpu_to_le64(rmem->dma_arr[i]);
+               if (rmem->nr_pages > 1) {
+                       if (i == rmem->nr_pages - 2 &&
+                           (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+                               extra_bits |= PTU_PTE_NEXT_TO_LAST;
+                       else if (i == rmem->nr_pages - 1 &&
+                                (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+                               extra_bits |= PTU_PTE_LAST;
+                       rmem->pg_tbl[i] =
+                               cpu_to_le64(rmem->dma_arr[i] | extra_bits);
+               }
        }
 
        if (rmem->vmem_size) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 2e4b621..5792e5c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -580,6 +580,10 @@ struct bnxt_sw_rx_agg_bd {
 struct bnxt_ring_mem_info {
        int                     nr_pages;
        int                     page_size;
+       u32                     flags;
+#define BNXT_RMEM_VALID_PTE_FLAG       1
+#define BNXT_RMEM_RING_PTE_FLAG                2
+
        void                    **pg_arr;
        dma_addr_t              *dma_arr;
 
@@ -1109,6 +1113,10 @@ struct bnxt_vf_rep {
        struct bnxt_vf_rep_stats        tx_stats;
 };
 
+#define PTU_PTE_VALID             0x1UL
+#define PTU_PTE_LAST              0x2UL
+#define PTU_PTE_NEXT_TO_LAST      0x4UL
+
 struct bnxt {
        void __iomem            *bar0;
        void __iomem            *bar1;
-- 
2.5.1

Reply via email to