This patch will transition a QP to the INIT state and bind the QP to
the cma_id.  It is called after a route has been resolved and
should assist with transport independent code.

Signed-off-by: Sean Hefty <[EMAIL PROTECTED]>


Index: ulp/cma/cma.c
===================================================================
--- ulp/cma/cma.c       (revision 3568)
+++ ulp/cma/cma.c       (working copy)
@@ -116,6 +116,55 @@ struct cma_id_private* cma_alloc_id(stru
        return cma_id_priv;
 }
 
+static int cma_modify_ib_qp_init(struct cma_id_private *cma_id_priv,
+                                struct ib_qp *qp, int qp_access_flags)
+{
+       struct ib_qp_attr qp_attr;
+       struct ib_sa_path_rec *path_rec;
+       int ret;
+
+       qp_attr.qp_state = IB_QPS_INIT;
+       qp_attr.qp_access_flags = qp_access_flags;
+
+       path_rec = cma_id_priv->cma_id.route.path_rec;
+       ret = ib_find_cached_pkey(cma_id_priv->cma_id.device, qp_attr.port_num,
+                                 be16_to_cpu(path_rec->pkey),
+                                 &qp_attr.pkey_index);
+       if (ret)
+               return ret;
+
+       ret = ib_find_cached_gid(cma_id_priv->cma_id.device, &path_rec->sgid,
+                                &qp_attr.port_num, NULL);
+       if (ret)
+               return ret;
+
+       return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS |
+                                         IB_QP_PKEY_INDEX | IB_QP_PORT);
+}
+
+int rdma_cma_init_qp(struct rdma_cma_id *cma_id, struct ib_qp *qp,
+                    int qp_access_flags)
+{
+       struct cma_id_private *cma_id_priv;
+       int ret;
+
+       cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);
+
+       switch (cma_id->device->node_type) {
+       case IB_NODE_CA:
+               ret = cma_modify_ib_qp_init(cma_id_priv, qp, qp_access_flags);
+               break;
+       default:
+               ret = -ENOSYS;
+               break;
+       }
+
+       if (!ret)
+               cma_id->qp = qp;
+       return ret;
+}
+EXPORT_SYMBOL(rdma_cma_init_qp);
+
 static int cma_modify_ib_qp_rtr(struct cma_id_private *cma_id_priv)
 {
        struct ib_qp_attr qp_attr;
@@ -552,7 +601,7 @@ static int cma_connect_ib(struct cma_id_
                req.alternate_path = &route->path_rec[1];
 
        req.service_id = cma_get_service_id(&route->dst_addr);
-       req.qp_num = conn_param->qp->qp_num;
+       req.qp_num = cma_id_priv->cma_id.qp->qp_num;
        req.qp_type = IB_QPT_RC;
        req.starting_psn = req.qp_num;
        req.responder_resources = conn_param->responder_resources;
@@ -563,7 +612,7 @@ static int cma_connect_ib(struct cma_id_
        req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
        req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
        req.max_cm_retries = CMA_MAX_CM_RETRIES;
-       req.srq = conn_param->qp->srq ? 1 : 0;
+       req.srq = cma_id_priv->cma_id.qp->srq ? 1 : 0;
 
        return ib_send_cm_req(cma_id_priv->cm_id, &req);
 }
@@ -576,8 +625,6 @@ int rdma_cma_connect(struct rdma_cma_id 
 
        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);
 
-       cma_id->qp = conn_param->qp;
-
        switch (cma_id->device->node_type) {
        case IB_NODE_CA:
                ret = cma_connect_ib(cma_id_priv, conn_param);
@@ -602,7 +649,7 @@ static int cma_accept_ib(struct cma_id_p
                return ret;
 
        memset(&rep, 0, sizeof rep);
-       rep.qp_num = conn_param->qp->qp_num;
+       rep.qp_num = cma_id_priv->cma_id.qp->qp_num;
        rep.starting_psn = rep.qp_num;
        rep.private_data = conn_param->private_data;
        rep.private_data_len = conn_param->private_data_len;
@@ -612,7 +659,7 @@ static int cma_accept_ib(struct cma_id_p
        rep.failover_accepted = 0;
        rep.flow_control = conn_param->flow_control;
        rep.rnr_retry_count = conn_param->rnr_retry_count;
-       rep.srq = conn_param->qp->srq ? 1 : 0;
+       rep.srq = cma_id_priv->cma_id.qp->srq ? 1 : 0;
 
        return ib_send_cm_rep(cma_id_priv->cm_id, &rep);
 }
@@ -625,8 +672,6 @@ int rdma_cma_accept(struct rdma_cma_id *
 
        cma_id_priv = container_of(cma_id, struct cma_id_private, cma_id);
 
-       cma_id->qp = conn_param->qp;
-
        switch (cma_id->device->node_type) {
        case IB_NODE_CA:
                ret = cma_accept_ib(cma_id_priv, conn_param);
Index: include/rdma/rdma_cma.h
===================================================================
--- include/rdma/rdma_cma.h     (revision 3568)
+++ include/rdma/rdma_cma.h     (working copy)
@@ -93,8 +93,14 @@ int rdma_cma_resolve_route(struct rdma_c
                           struct sockaddr *src_addr, struct sockaddr *dst_addr,
                           int timeout_ms);
 
+/**
+ * rdma_cma_init_qp - Associates a QP with a CMA identifier and initializes the
+ *   QP for use in establishing a connection.
+ */
+int rdma_cma_init_qp(struct rdma_cma_id *cma_id, struct ib_qp *qp,
+                    int qp_access_flags);
+
 struct rdma_cma_conn_param {
-       struct ib_qp *qp;
        const void *private_data;
        u8 private_data_len;
        u8 responder_resources;



_______________________________________________
openib-general mailing list
[email protected]
http://openib.org/mailman/listinfo/openib-general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to