The following patch adds MRA receive handling to the CM, and fixes
a bug where the state checking was reversed when sending the MRA.

Note: if the MRA is lost, it will not be resent.  That support will
come in a separate patch.

Signed-off-by: Sean Hefty <[EMAIL PROTECTED]>


Index: cm.c
===================================================================
--- cm.c        (revision 2402)
+++ cm.c        (working copy)
@@ -1712,7 +1712,12 @@ static void cm_format_rej(struct cm_rej_
                rej_msg->local_comm_id = 0;
                cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
                break;
+       case IB_CM_MRA_REQ_SENT:
+               rej_msg->local_comm_id = cm_id_priv->id.local_id;
+               cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
+               break;
        case IB_CM_REP_RCVD:
+       case IB_CM_MRA_REP_SENT:
                rej_msg->local_comm_id = cm_id_priv->id.local_id;
                cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
                break;
@@ -1826,7 +1831,9 @@ static struct cm_id_private * cm_acquire
                                cm_id_priv = NULL;
                }
                spin_unlock_irqrestore(&cm.lock, flags);
-       } else
+       } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
+               cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
+       else
                cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
 
        return cm_id_priv;
@@ -1849,14 +1856,14 @@ static int cm_rej_handler(struct cm_work
        spin_lock_irqsave(&cm_id_priv->lock, flags);
        switch (cm_id_priv->id.state) {
        case IB_CM_REQ_SENT:
+       case IB_CM_MRA_REQ_RCVD:
        case IB_CM_REP_SENT:
+       case IB_CM_MRA_REP_RCVD:
                ib_cancel_mad(cm_id_priv->av.port->mad_agent,
                              (unsigned long) cm_id_priv->msg);
                /* fall through */
        case IB_CM_REQ_RCVD:
        case IB_CM_MRA_REQ_SENT:
-       case IB_CM_MRA_REQ_RCVD:
-       case IB_CM_MRA_REP_RCVD:
                if (rej_msg->reason == IB_CM_REJ_STALE_CONN)
                        cm_enter_timewait(cm_id_priv);
                else
@@ -1957,10 +1964,10 @@ int ib_send_cm_mra(struct ib_cm_id *cm_i
                      service_timeout, private_data, private_data_len);
 
        spin_lock_irqsave(&cm_id_priv->lock, flags);
-       if (cm_id_priv->id.state != IB_CM_REQ_RCVD &&
-           cm_id_priv->id.state != IB_CM_REP_RCVD &&
-           (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
-            cm_id_priv->id.lap_state != IB_CM_LAP_RCVD)) {
+       if (cm_id_priv->id.state == IB_CM_REQ_RCVD ||
+           cm_id_priv->id.state == IB_CM_REP_RCVD ||
+           (cm_id_priv->id.state == IB_CM_ESTABLISHED &&
+            cm_id_priv->id.lap_state == IB_CM_LAP_RCVD)) {
 
                ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
                                       &msg->send_wr, &bad_send_wr);
@@ -1993,11 +2000,80 @@ out:
 }
 EXPORT_SYMBOL(ib_send_cm_mra);
 
+static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
+{
+       switch (cm_mra_get_msg_mraed(mra_msg)) {
+       case CM_MSG_RESPONSE_REQ:
+               return cm_acquire_id(mra_msg->remote_comm_id, 0);
+       case CM_MSG_RESPONSE_REP:
+       case CM_MSG_RESPONSE_OTHER:
+               return cm_acquire_id(mra_msg->remote_comm_id,
+                                    mra_msg->local_comm_id);
+       default:
+               return NULL;
+       }
+}
+
 static int cm_mra_handler(struct cm_work *work)
 {
-       /* todo: write MRA handler */
-       /* todo: add timeout mechanism separate from retries for
-               receiver of MRA */
+       struct cm_id_private *cm_id_priv;
+       struct cm_mra_msg *mra_msg;
+       unsigned long flags;
+       int timeout, ret;
+
+       mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
+       cm_id_priv = cm_acquire_mraed_id(mra_msg);
+       if (!cm_id_priv)
+               return -EINVAL;
+
+       work->cm_event.private_data = &mra_msg->private_data;
+       work->cm_event.param.mra_rcvd.service_timeout =
+                                       cm_mra_get_service_timeout(mra_msg);
+       timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg));
+
+       spin_lock_irqsave(&cm_id_priv->lock, flags);
+       switch (cm_id_priv->id.state) {
+       case IB_CM_REQ_SENT:
+               if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
+                   ib_modify_mad(cm_id_priv->av.port->mad_agent,
+                                 (unsigned long) cm_id_priv->msg, timeout))
+                       goto out;
+               cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
+               break;
+       case IB_CM_REP_SENT:
+               if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
+                   ib_modify_mad(cm_id_priv->av.port->mad_agent,
+                                 (unsigned long) cm_id_priv->msg, timeout))
+                       goto out;
+               cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
+               break;
+       case IB_CM_ESTABLISHED:
+               if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
+                   cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
+                   ib_modify_mad(cm_id_priv->av.port->mad_agent,
+                                 (unsigned long) cm_id_priv->msg, timeout))
+                       goto out;
+               cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
+               break;
+       default:
+               goto out;
+       }
+
+       cm_id_priv->msg->context[1] = (void *) (unsigned long)
+                                     cm_id_priv->id.state;
+       ret = atomic_inc_and_test(&cm_id_priv->work_count);
+       if (!ret)
+               list_add_tail(&work->list, &cm_id_priv->work_list);
+       spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+       if (ret)
+               cm_process_work(cm_id_priv, work);
+       else
+               cm_deref_id(cm_id_priv);
+       return 0;
+out:
+       spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       cm_deref_id(cm_id_priv);
        return -EINVAL;
 }
 
@@ -2557,19 +2633,21 @@ static void cm_process_send_error(struct
 
        memset(&cm_event, 0, sizeof cm_event);
        cm_id_priv = msg->context[0];
-       state = (enum ib_cm_state) (unsigned long) msg->context[1];
 
        /* Discard old sends or ones without a response. */
        spin_lock_irqsave(&cm_id_priv->lock, flags);
+       state = (enum ib_cm_state) (unsigned long) msg->context[1];
        if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
                goto discard;
 
        switch (state) {
        case IB_CM_REQ_SENT:
+       case IB_CM_MRA_REQ_RCVD:
                cm_reset_to_idle(cm_id_priv);
                cm_event.event = IB_CM_REQ_ERROR;
                break;
        case IB_CM_REP_SENT:
+       case IB_CM_MRA_REP_RCVD:
                cm_reset_to_idle(cm_id_priv);
                cm_event.event = IB_CM_REP_ERROR;
                break;



_______________________________________________
openib-general mailing list
[email protected]
http://openib.org/mailman/listinfo/openib-general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to