Automatically negotiate the maximum usable RDMA resources if requested by the user.
From: Sean Hefty <[email protected]> --- trunk/ulp/librdmacm/include/rdma/rdma_cma.h | 5 +++ trunk/ulp/librdmacm/src/cma.cpp | 43 ++++++++++++++++++++++----- 2 files changed, 40 insertions(+), 8 deletions(-) diff --git a/trunk/ulp/librdmacm/include/rdma/rdma_cma.h b/trunk/ulp/librdmacm/include/rdma/rdma_cma.h index 16f7f21..294a75b 100644 --- a/trunk/ulp/librdmacm/include/rdma/rdma_cma.h +++ b/trunk/ulp/librdmacm/include/rdma/rdma_cma.h @@ -146,6 +146,11 @@ struct rdma_cm_id struct ibv_cq *recv_cq; }; +enum { + RDMA_MAX_RESP_RES = 0xFF, + RDMA_MAX_INIT_DEPTH = 0xFF +}; + struct rdma_conn_param { const void *private_data; diff --git a/trunk/ulp/librdmacm/src/cma.cpp b/trunk/ulp/librdmacm/src/cma.cpp index 0d6e6f5..05c672d 100644 --- a/trunk/ulp/librdmacm/src/cma.cpp +++ b/trunk/ulp/librdmacm/src/cma.cpp @@ -74,6 +74,8 @@ struct cma_id_private int index; volatile LONG refcnt; struct rdma_cm_id **req_list; + uint8_t initiator_depth; + uint8_t responder_resources; }; struct cma_device @@ -737,15 +739,20 @@ void rdma_destroy_qp(struct rdma_cm_id *id) } static int ucma_valid_param(struct cma_id_private *id_priv, - struct rdma_conn_param *conn_param) + struct rdma_conn_param *param) { if (id_priv->id.ps != RDMA_PS_TCP) { return 0; } - if ((conn_param->responder_resources > id_priv->cma_dev->max_responder_resources) || - (conn_param->initiator_depth > id_priv->cma_dev->max_initiator_depth)) { - return -1; + if ((param->responder_resources != RDMA_MAX_RESP_RES) && + (param->responder_resources > id_priv->cma_dev->max_responder_resources)) { + return rdma_seterrno(EINVAL); + } + + if ((param->initiator_depth != RDMA_MAX_INIT_DEPTH) && + (param->initiator_depth > id_priv->cma_dev->max_initiator_depth)) { + return rdma_seterrno(EINVAL); } return 0; @@ -765,9 +772,14 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) return ret; } + id_priv->responder_resources = min(conn_param->responder_resources, + id_priv->cma_dev->max_responder_resources); + id_priv->initiator_depth = min(conn_param->initiator_depth, + id_priv->cma_dev->max_initiator_depth); + RtlZeroMemory(&attr, sizeof attr); - attr.ResponderResources = conn_param->responder_resources; - attr.InitiatorDepth = conn_param->initiator_depth; + attr.ResponderResources = id_priv->responder_resources; + attr.InitiatorDepth = id_priv->initiator_depth; attr.RetryCount = conn_param->retry_count; attr.RnrRetryCount = conn_param->rnr_retry_count; if ((attr.DataLength = conn_param->private_data_len)) { @@ -892,9 +904,22 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) return ret; } + if (conn_param->initiator_depth == RDMA_MAX_INIT_DEPTH) { + id_priv->initiator_depth = min(id_priv->initiator_depth, + id_priv->cma_dev->max_initiator_depth); + } else { + id_priv->initiator_depth = conn_param->initiator_depth; + } + if (conn_param->responder_resources == RDMA_MAX_RESP_RES) { + id_priv->responder_resources = min(id_priv->responder_resources, + id_priv->cma_dev->max_responder_resources); + } else { + id_priv->responder_resources = conn_param->responder_resources; + } + RtlZeroMemory(&attr, sizeof attr); - attr.ResponderResources = conn_param->responder_resources; - attr.InitiatorDepth = conn_param->initiator_depth; + attr.ResponderResources = id_priv->responder_resources; + attr.InitiatorDepth = id_priv->initiator_depth; attr.RetryCount = conn_param->retry_count; attr.RnrRetryCount = conn_param->rnr_retry_count; if ((attr.DataLength = conn_param->private_data_len)) { @@ -1004,6 +1029,8 @@ static int ucma_process_conn_req(struct cma_event *event) event->event.event = RDMA_CM_EVENT_CONNECT_REQUEST; id_priv->state = cma_passive_connect; event->event.listen_id = &listen->id; + id_priv->initiator_depth = event->event.param.conn.initiator_depth; + id_priv->responder_resources = event->event.param.conn.responder_resources; return 0; _______________________________________________ ofw mailing list [email protected] http://lists.openfabrics.org/cgi-bin/mailman/listinfo/ofw
