The following lightly tested patch addresses Roland's comments on IPoIB CM. Applies on top of PATCHv6:
- Randomise RQ PSN - Fix for modular IPv6 - MTU endian-ness fix for ICMPs - Cosmetics Signed-off-by: Michael S. Tsirkin <[EMAIL PROTECTED]> --- Roland, do you want me to report the full fixed-up patch instead? Pls let me know when IPoIB CM is in for-2.6.21, I'll switch to that for my testing. diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig index 0ffca11..af78ccc 100644 --- a/drivers/infiniband/ulp/ipoib/Kconfig +++ b/drivers/infiniband/ulp/ipoib/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_IPOIB tristate "IP-over-InfiniBand" - depends on INFINIBAND && NETDEVICES && INET + depends on INFINIBAND && NETDEVICES && INET && (IPV6 || IPV6=n) ---help--- Support for the IP-over-InfiniBand protocol (IPoIB). This transports IP packets over InfiniBand so you can use your IB diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 8082d50..eb885ee 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -127,7 +127,6 @@ struct ipoib_tx_buf { u64 mapping; }; -#ifdef CONFIG_INFINIBAND_IPOIB_CM struct ib_cm_id; struct ipoib_cm_data { @@ -181,7 +180,6 @@ struct ipoib_cm_dev_priv { struct ib_recv_wr rx_wr; }; -#endif /* * Device private locking: tx_lock protects members used in TX fast * path (and we use LLTX so upper layers don't do extra locking). diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index e7e7cc0..8ee6f06 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -37,7 +37,7 @@ #include <net/dst.h> #include <net/icmp.h> -#ifdef CONFIG_IPV6 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #include <linux/icmpv6.h> #endif @@ -170,7 +170,8 @@ static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, } static int ipoib_cm_modify_rx_qp(struct net_device *dev, - struct ib_cm_id *cm_id, struct ib_qp *qp) + struct ib_cm_id *cm_id, struct ib_qp *qp, + unsigned psn) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_qp_attr qp_attr; @@ -193,7 +194,7 @@ static int ipoib_cm_modify_rx_qp(struct net_device *dev, ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); return ret; } - qp_attr.rq_psn = 0 /* FIXME */; + qp_attr.rq_psn = psn; ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); @@ -203,7 +204,8 @@ static int ipoib_cm_modify_rx_qp(struct net_device *dev, } static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, - struct ib_qp *qp, struct ib_cm_req_event_param *req) + struct ib_qp *qp, struct ib_cm_req_event_param *req, + unsigned psn) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_data data = {}; @@ -219,7 +221,7 @@ static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, rep.target_ack_delay = 20; /* FIXME */ rep.srq = 1; rep.qp_num = qp->qp_num; - rep.starting_psn = 0 /* FIXME */; + rep.starting_psn = psn; return ib_send_cm_rep(cm_id, &rep); } @@ -229,6 +231,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_rx *p; unsigned long flags; + unsigned psn; int ret; ipoib_dbg(priv, "REQ arrived\n"); @@ -243,11 +246,12 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even goto err_qp; } - ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp); + psn = random32() & 0xffffff; + ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); if (ret) goto err_modify; - ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd); + ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); if (ret) { ipoib_warn(priv, "failed to send REP: %d\n", ret); goto err_rep; @@ -742,7 +746,7 @@ static int ipoib_cm_send_req(struct net_device *dev, req.retry_count = 0; /* RFC draft warns against retries */ req.rnr_retry_count = 0; /* RFC draft warns against retries */ req.max_cm_retries = 15; - req.srq = 15; + req.srq = 1; return ib_send_cm_req(id, &req); } @@ -1041,7 +1045,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work) struct sk_buff *skb; unsigned long flags; - __be32 mtu = cpu_to_be32(priv->mcast_mtu); + unsigned mtu = priv->mcast_mtu; spin_lock_irqsave(&priv->tx_lock, flags); spin_lock(&priv->lock); @@ -1050,7 +1054,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work) spin_unlock_irqrestore(&priv->tx_lock, flags); if (skb->protocol == htons(ETH_P_IP)) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); -#ifdef CONFIG_IPV6 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) else if (skb->protocol == htons(ETH_P_IPV6)) icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); #endif -- MST _______________________________________________ openib-general mailing list openib-general@openib.org http://openib.org/mailman/listinfo/openib-general To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general