This patch enables IPoIB-UD RX to allocate S/G buffer up to payload size
4096. The link IPoIB MTU size is up to 4K - 4.

Signed-off-by: Shirley Ma <[EMAIL PROTECTED]>
---

 drivers/infiniband/ulp/ipoib/ipoib.h    |   14 +----
 drivers/infiniband/ulp/ipoib/ipoib_cm.c |   25 ++++----
 drivers/infiniband/ulp/ipoib/ipoib_ib.c |   95
+++++++++++-------------------
 3 files changed, 50 insertions(+), 84 deletions(-)

diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h
b/drivers/infiniband/ulp/ipoib/ipoib.h
index 004a80b..57d33d5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -56,9 +56,6 @@
 /* constants */
 
 enum {
-       IPOIB_PACKET_SIZE         = 2048,
-       IPOIB_BUF_SIZE            = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
-
        IPOIB_ENCAP_LEN           = 4,
 
        IPOIB_MAX_IB_MTU          = 4096,
@@ -142,11 +139,6 @@ struct ipoib_mcast {
 
 struct ipoib_rx_buf {
        struct sk_buff *skb;
-       u64             mapping;
-};
-
-struct ipoib_cm_rx_buf {
-       struct sk_buff *skb;
        u64             mapping[IPOIB_CM_RX_SG];
 };
 
@@ -198,7 +190,7 @@ enum ipoib_cm_state {
 struct ipoib_cm_rx {
        struct ib_cm_id        *id;
        struct ib_qp           *qp;
-       struct ipoib_cm_rx_buf *rx_ring;
+       struct ipoib_rx_buf    *rx_ring;
        struct list_head        list;
        struct net_device      *dev;
        unsigned long           jiffies;
@@ -223,7 +215,7 @@ struct ipoib_cm_tx {
 
 struct ipoib_cm_dev_priv {
        struct ib_srq          *srq;
-       struct ipoib_cm_rx_buf *srq_ring;
+       struct ipoib_rx_buf    *srq_ring;
        struct ib_cm_id        *id;
        struct list_head        passive_ids;   /* state: LIVE */
        struct list_head        rx_error_list; /* state: ERROR */
@@ -473,7 +465,7 @@ int ipoib_pkey_dev_delay_open(struct net_device
*dev);
 void ipoib_drain_cq(struct net_device *dev);
 void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
                   unsigned int length, struct sk_buff *toskb);
-struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
+struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev,
                                      int id, int frags, int head_size,
                                      int pad, u64 *mapping);
 static void inline ipoib_dma_unmap_rx(struct ipoib_dev_priv *priv, int
frags,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 2c2c6b2..b2fe0f8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -126,7 +126,7 @@ static int ipoib_cm_post_receive_nonsrq(struct
net_device *dev,
 }
 
 static void ipoib_cm_free_rx_ring(struct net_device *dev,
-                                 struct ipoib_cm_rx_buf *rx_ring)
+                                 struct ipoib_rx_buf *rx_ring)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        int i;
@@ -283,11 +283,11 @@ static int ipoib_cm_nonsrq_init_rx(struct
net_device *dev, struct ib_cm_id *cm_i
        spin_unlock_irq(&priv->lock);
 
        for (i = 0; i < ipoib_recvq_size; ++i) {
-               rx->rx_ring[i].skb = ipoib_cm_alloc_rx_skb(dev, i,
-                                                          IPOIB_CM_RX_SG - 1,
-                                                          IPOIB_CM_HEAD_SIZE, 
-                                                          12,
-                                                          
rx->rx_ring[i].mapping);
+               rx->rx_ring[i].skb = ipoib_alloc_rx_skb(dev, i,
+                                                       IPOIB_CM_RX_SG - 1,
+                                                       IPOIB_CM_HEAD_SIZE, 
+                                                       12,
+                                                       rx->rx_ring[i].mapping);
                if (!rx->rx_ring[i].skb) {
                        ipoib_warn(priv, "failed to allocate receive buffer 
%d\n", i);
                                ret = -ENOMEM;
@@ -426,7 +426,7 @@ static int ipoib_cm_rx_handler(struct ib_cm_id
*cm_id,
 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
-       struct ipoib_cm_rx_buf *rx_ring;
+       struct ipoib_rx_buf *rx_ring;
        unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
        struct sk_buff *skb, *newskb;
        struct ipoib_cm_rx *p;
@@ -491,8 +491,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev,
struct ib_wc *wc)
        frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
                                              (unsigned)IPOIB_CM_HEAD_SIZE)) / 
PAGE_SIZE;
 
-       newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, IPOIB_CM_HEAD_SIZE, 
-                                      12, mapping);
+       newskb = ipoib_alloc_rx_skb(dev, wr_id, frags, IPOIB_CM_HEAD_SIZE, 12,
mapping);
        if (unlikely(!newskb)) {
                /*
                 * If we can't allocate a new RX buffer, dump
@@ -1396,10 +1395,10 @@ int ipoib_cm_dev_init(struct net_device *dev)
        if (ipoib_cm_has_srq(dev)) {
                for (i = 0; i < ipoib_recvq_size; ++i) {
                        priv->cm.srq_ring[i].skb = 
-                             ipoib_cm_alloc_rx_skb(dev, i, 
-                                                   priv->cm.num_frags - 1, 
-                                                   IPOIB_CM_HEAD_SIZE, 12, 
-                                                   
priv->cm.srq_ring[i].mapping);
+                             ipoib_alloc_rx_skb(dev, i, 
+                                                priv->cm.num_frags - 1, 
+                                                IPOIB_CM_HEAD_SIZE, 12, 
+                                                priv->cm.srq_ring[i].mapping);
                        if (!priv->cm.srq_ring[i].skb) {        
                                ipoib_warn(priv, "failed to allocate "
                                           "receive buffer %d\n", i);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index c40329f..e6540a4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -90,25 +90,16 @@ void ipoib_free_ah(struct kref *kref)
 static int ipoib_ib_post_receive(struct net_device *dev, int id)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
-       struct ib_sge list;
-       struct ib_recv_wr param;
        struct ib_recv_wr *bad_wr;
        int ret;
 
-       list.addr     = priv->rx_ring[id].mapping;
-       list.length   = IPOIB_BUF_SIZE;
-       list.lkey     = priv->mr->lkey;
-
-       param.next    = NULL;
-       param.wr_id   = id | IPOIB_OP_RECV;
-       param.sg_list = &list;
-       param.num_sge = 1;
-
-       ret = ib_post_recv(priv->qp, &param, &bad_wr);
+       priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
+       ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
        if (unlikely(ret)) {
                ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
-               ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
-                                   IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+               ipoib_dma_unmap_rx(priv, IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1,
+                                  IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
+                                  priv->rx_ring[id].mapping);
                dev_kfree_skb_any(priv->rx_ring[id].skb);
                priv->rx_ring[id].skb = NULL;
        }
@@ -116,9 +107,9 @@ static int ipoib_ib_post_receive(struct net_device
*dev, int id)
        return ret;
 }
 
-struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
-                                     int id, int frags, int head_size,
-                                     int pad, u64 *mapping)
+struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev,
+                                  int id, int frags, int head_size,
+                                  int pad, u64 *mapping)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct sk_buff *skb;
@@ -201,43 +192,17 @@ void skb_put_frags(struct sk_buff *skb, unsigned
int hdr_space,
        }
 }
 
-static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
-{
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
-       struct sk_buff *skb;
-       u64 addr;
-
-       skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
-       if (!skb)
-               return -ENOMEM;
-
-       /*
-        * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
-        * header.  So we need 4 more bytes to get to 48 and align the
-        * IP header to a multiple of 16.
-        */
-       skb_reserve(skb, 4);
-
-       addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
-                                DMA_FROM_DEVICE);
-       if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
-               dev_kfree_skb_any(skb);
-               return -EIO;
-       }
-
-       priv->rx_ring[id].skb     = skb;
-       priv->rx_ring[id].mapping = addr;
-
-       return 0;
-}
-
 static int ipoib_ib_post_receives(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        int i;
 
        for (i = 0; i < ipoib_recvq_size; ++i) {
-               if (ipoib_alloc_rx_skb(dev, i)) {
+               priv->rx_ring[i].skb = ipoib_alloc_rx_skb(dev, i,
+                                       IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1,
+                                       IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu), 4,
+                                       priv->rx_ring[i].mapping);
+               if (!priv->rx_ring[i].skb) {
                        ipoib_warn(priv, "failed to allocate receive buffer 
%d\n", i);
                        return -ENOMEM;
                }
@@ -254,8 +219,9 @@ static void ipoib_ib_handle_rx_wc(struct net_device
*dev, struct ib_wc *wc)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
-       struct sk_buff *skb;
-       u64 addr;
+       struct sk_buff *skb, *newskb;
+       u64 mapping[IPOIB_UD_RX_SG(priv->max_ib_mtu)];
+       int frags;
 
        ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
                       wr_id, wc->status);
@@ -267,15 +233,15 @@ static void ipoib_ib_handle_rx_wc(struct
net_device *dev, struct ib_wc *wc)
        }
 
        skb  = priv->rx_ring[wr_id].skb;
-       addr = priv->rx_ring[wr_id].mapping;
 
        if (unlikely(wc->status != IB_WC_SUCCESS)) {
                if (wc->status != IB_WC_WR_FLUSH_ERR)
                        ipoib_warn(priv, "failed recv event "
                                   "(status=%d, wrid=%d vend_err %x)\n",
                                   wc->status, wr_id, wc->vendor_err);
-               ib_dma_unmap_single(priv->ca, addr,
-                                   IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+               ipoib_dma_unmap_rx(priv, IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1,
+                                  IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
+                                  priv->rx_ring[wr_id].mapping);
                dev_kfree_skb_any(skb);
                priv->rx_ring[wr_id].skb = NULL;
                return;
@@ -288,11 +254,17 @@ static void ipoib_ib_handle_rx_wc(struct
net_device *dev, struct ib_wc *wc)
        if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
                goto repost;
 
+       frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
+                                             
(unsigned)(IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu)))) /
PAGE_SIZE;
+       newskb = ipoib_alloc_rx_skb(dev, wr_id, frags,
+                                   IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
+                                   4, mapping);        
        /*
         * If we can't allocate a new RX buffer, dump
         * this packet and reuse the old buffer.
         */
-       if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
+       if (unlikely(newskb)) {
+               ipoib_dbg(priv, "failed to allocate receive buffer %d\n", 
wr_id);       
                ++dev->stats.rx_dropped;
                goto repost;
        }
@@ -300,9 +272,12 @@ static void ipoib_ib_handle_rx_wc(struct net_device
*dev, struct ib_wc *wc)
        ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
                       wc->byte_len, wc->slid);
 
-       ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+       ipoib_dma_unmap_rx(priv, frags, IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
+                          priv->rx_ring[wr_id].mapping);
+       memcpy(priv->rx_ring[wr_id].mapping, mapping,
+              (frags + 1) * sizeof *mapping);
 
-       skb_put(skb, wc->byte_len);
+       skb_put_frags(skb, IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu), wc->byte_len,
newskb);
        skb_pull(skb, IB_GRH_BYTES);
 
        skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -715,10 +690,10 @@ int ipoib_ib_dev_stop(struct net_device *dev, int
flush)
                                rx_req = &priv->rx_ring[i];
                                if (!rx_req->skb)
                                        continue;
-                               ib_dma_unmap_single(priv->ca,
-                                                   rx_req->mapping,
-                                                   IPOIB_BUF_SIZE,
-                                                   DMA_FROM_DEVICE);
+                               ipoib_dma_unmap_rx(priv,
+                                                  
IPOIB_UD_RX_SG(priv->max_ib_mtu) - 1,
+                                                  
IPOIB_UD_HEAD_SIZE(priv->max_ib_mtu),
+                                                  priv->rx_ring[i].mapping);
                                dev_kfree_skb_any(rx_req->skb);
                                rx_req->skb = NULL;
                        }


_______________________________________________
general mailing list
[email protected]
http://lists.openfabrics.org/cgi-bin/mailman/listinfo/general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to