Bub Thomas wrote:
Dotan,
the ibv_rc_pingpong example works for me so I can exclude the
architecture.
I never got the libibcm example compiled.
Which is your example and which architecture x86 vs. x86_64 did you
compile it for?
Can you share your libibcm the example code? (if it is not the standard
that I can't get compiled)
Thomas
I started to modify the qp_test (a test that can be found in
https://openib.org/svn/trunk/contrib/mellanox/ibtp/gen2/userspace/useraccess/qp_test/)
here is the main file that deals with the libibcm.
I'm sorry, but if you'll add this file to the qp_test it won't compile
(because of some more changed in the code).
When I'll finish to clean the code i will commit the full version (with
the libibcm support) to the openib svn.
I hope that this code will help you ...
Dotan
/*
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* $Id: connect_qp.c 136 2005-09-21 15:07:17Z dotanb $
*
*/
#include <infiniband/verbs.h>
#include <infiniband/cm.h>
#include <vl.h>
#include "types.h"
#include "connect_qp.h"
extern struct config_t config;
/*************************************************************
* Function: fill_av_props
*************************************************************/
void fill_av_props(
IN const struct test_config_t
*test_config_p,
IN uint8_t port,
IN uint16_t dlid,
IN union ibv_gid dest_gid,
IN uint8_t src_path_bits,
INOUT struct thread_resources_t *thread_rsc_p,
OUT struct ibv_ah_attr *ah_attr_p)
{
memset(ah_attr_p, 0, sizeof(struct ibv_ah_attr));
ah_attr_p->port_num = port;
ah_attr_p->dlid = dlid;
ah_attr_p->static_rate = DEF_STATIC_RATE;
ah_attr_p->src_path_bits = src_path_bits;
/* do casting to prevent compiler warning */
ah_attr_p->sl = (uint8_t)((VL_MASK_IS_SET(test_config_p->test_flags,
IS_SUPPORTED_RAND_SL)) ?
VL_random(&thread_rsc_p->rand, 15) : DEF_SL);
if ((VL_MASK_IS_SET(test_config_p->test_flags, IS_SUPPORTED_GRH)) ||
(VL_MASK_IS_SET(test_config_p->test_flags, IS_SUPPORTED_MULTICAST))) {
ah_attr_p->is_global = 1;
/* fill the GRH */
ah_attr_p->grh.dgid = dest_gid;
ah_attr_p->grh.flow_label = DEF_FLOW_LABEL;
ah_attr_p->grh.sgid_index = DEF_SGID_IDX;
ah_attr_p->grh.hop_limit = DEF_HOP_LIMIT;
ah_attr_p->grh.traffic_class = DEF_TRAFFIC_CLASS;
}
}
void fill_sa_path_rec(
IN const struct test_config_t
*test_config_p,
IN uint16_t dlid,
IN uint16_t slid,
IN uint16_t pkey,
IN union ibv_gid dest_gid,
IN union ibv_gid src_gid,
INOUT struct thread_resources_t *thread_rsc_p,
OUT struct ibv_sa_path_rec
*sa_path_rec_p)
{
/*
union ibv_gid *src = (union ibv_gid *)&sa_path_rec_p.sgid;
union ibv_gid *dest = (union ibv_gid *)&sa_path_rec_p.dgid;
*/
memset(sa_path_rec_p, 0, sizeof(struct ibv_sa_path_rec));
sa_path_rec_p->dgid = dest_gid;
sa_path_rec_p->sgid = src_gid;
sa_path_rec_p->dlid = htons(dlid);
sa_path_rec_p->slid = htons(slid);
sa_path_rec_p->raw_traffic = 0; /* IB packet */
sa_path_rec_p->flow_label = DEF_FLOW_LABEL;
sa_path_rec_p->hop_limit = DEF_HOP_LIMIT;
sa_path_rec_p->traffic_class = DEF_TRAFFIC_CLASS;
sa_path_rec_p->reversible = 0x1000000; /* reversible is required */
sa_path_rec_p->numb_path = 0; // todo
sa_path_rec_p->pkey = pkey;
sa_path_rec_p->sl =
(uint8_t)((VL_MASK_IS_SET(test_config_p->test_flags, IS_SUPPORTED_RAND_SL)) ?
VL_random(&thread_rsc_p->rand, 15) : DEF_SL);
sa_path_rec_p->mtu_selector = 2; /* use exactly this MTU */
sa_path_rec_p->mtu = test_config_p->path_mtu;
sa_path_rec_p->rate_selector = 2; // todo
sa_path_rec_p->rate = 3; // todo
sa_path_rec_p->packet_life_time_selector = 2; // todo
sa_path_rec_p->packet_life_time = 2; // todo
sa_path_rec_p->preference = 0;
}
/*************************************************************
* Function: modify_qp_init
*************************************************************/
static int modify_qp_init(
IN const struct test_config_t
*test_config_p,
IN const struct cm_data_t
*cm_data_local_p,
IN struct ibv_qp *h_qp,
IN enum ibv_qp_type ts_type)
{
struct ibv_qp_attr qp_attr;
enum ibv_qp_attr_mask attr_mask = 0;
/* clear qp_attr */
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
/* modify QP state to init */
qp_attr.qp_state = IBV_QPS_INIT;
attr_mask |= IBV_QP_STATE;
qp_attr.pkey_index = DEF_PKEY_IX;
attr_mask |= IBV_QP_PKEY_INDEX;
qp_attr.port_num = config.ib_port;
attr_mask |= IBV_QP_PORT;
if (ts_type == IBV_QPT_UD) {
qp_attr.qkey = cm_data_local_p->qkey;
attr_mask |= IBV_QP_QKEY;
}
if ((ts_type == IBV_QPT_UC) || (ts_type == IBV_QPT_RC)) {
qp_attr.qp_access_flags =
test_config_p->thread_resources_sizes.qp_access;
attr_mask |= IBV_QP_ACCESS_FLAGS;
}
return ibv_modify_qp(h_qp, &qp_attr, attr_mask);
}
/*************************************************************
* Function: modify_qp_rtr
*************************************************************/
static int modify_qp_rtr(
IN const struct test_config_t
*test_config_p,
IN const struct cm_data_t
*cm_data_remote_p,
INOUT struct thread_resources_t *thread_rsc_p,
IN struct ibv_qp *h_qp,
IN enum ibv_qp_type ts_type)
{
struct ibv_qp_attr qp_attr;
enum ibv_qp_attr_mask attr_mask = 0;
/* clear qp_attr */
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
/* modify QP state to RTR */
qp_attr.qp_state = IBV_QPS_RTR;
attr_mask |= IBV_QP_STATE;
if ((ts_type == IBV_QPT_UC) || (ts_type == IBV_QPT_RC)) {
qp_attr.rq_psn = cm_data_remote_p->psn;
attr_mask |= IBV_QP_RQ_PSN;
qp_attr.dest_qp_num = cm_data_remote_p->qp_num;
attr_mask |= IBV_QP_DEST_QPN;
/* set primary path properties */
fill_av_props(test_config_p, config.ib_port,
thread_rsc_p->remote_con_data.lid,
thread_rsc_p->remote_con_data.gid,
thread_rsc_p->local_con_data.primary_src_path_bits, thread_rsc_p,
&qp_attr.ah_attr);
attr_mask |= IBV_QP_AV;
qp_attr.path_mtu = (uint8_t)test_config_p->path_mtu;
attr_mask |= IBV_QP_PATH_MTU;
/* set the alternate path properties (if needed) */
if (config.ib_port_alt) {
fill_av_props(test_config_p, config.ib_port_alt,
thread_rsc_p->remote_con_data.lid_alt,
thread_rsc_p->remote_con_data.gid_alt,
thread_rsc_p->local_con_data.alternate_src_path_bits, thread_rsc_p,
&qp_attr.alt_ah_attr);
qp_attr.alt_pkey_index = DEF_PKEY_IX;
qp_attr.alt_port_num = config.ib_port_alt;
qp_attr.alt_timeout =
(uint8_t)test_config_p->qp_timeout;
attr_mask |= IBV_QP_ALT_PATH;
}
}
if (ts_type == IBV_QPT_RC) {
qp_attr.max_dest_rd_atomic = cm_data_remote_p->max_rd_atomic;
attr_mask |= IBV_QP_MAX_DEST_RD_ATOMIC;
qp_attr.min_rnr_timer = (uint8_t)DEF_RNR_NAK_TIMER;
attr_mask |= IBV_QP_MIN_RNR_TIMER;
}
return ibv_modify_qp(h_qp, &qp_attr, attr_mask);
}
/*************************************************************
* Function: modify_qp_rts
*************************************************************/
static int modify_qp_rts(
IN const struct test_config_t
*test_config_p,
IN const struct cm_data_t
*cm_data_local_p,
IN struct ibv_qp *h_qp,
IN enum ibv_qp_type ts_type)
{
struct ibv_qp_attr qp_attr;
enum ibv_qp_attr_mask attr_mask = 0;
/* clear qp_attr */
memset(&qp_attr, 0, sizeof(struct ibv_qp_attr));
/* modify QP state to RTS */
qp_attr.qp_state = IBV_QPS_RTS;
attr_mask |= IBV_QP_STATE;
qp_attr.sq_psn = cm_data_local_p->psn;
attr_mask |= IBV_QP_SQ_PSN;
if (ts_type == IBV_QPT_RC) {
qp_attr.timeout = (uint8_t)test_config_p->qp_timeout;
attr_mask |= IBV_QP_TIMEOUT;
qp_attr.retry_cnt = (uint8_t)test_config_p->qp_retry_count;
attr_mask |= IBV_QP_RETRY_CNT;
qp_attr.rnr_retry = (uint8_t)ACK_RNR_RETRY_COUNT;
attr_mask |= IBV_QP_RNR_RETRY;
qp_attr.max_rd_atomic = cm_data_local_p->max_rd_atomic;
attr_mask |= IBV_QP_MAX_QP_RD_ATOMIC;
}
/* change the APM state machine (if needed) */
if (config.ib_port_alt) {
qp_attr.path_mig_state = IBV_MIG_REARM;
attr_mask |= IBV_QP_PATH_MIG_STATE;
}
return ibv_modify_qp(h_qp, &qp_attr, attr_mask);
}
/*************************************************************
* Function: connect_qp_cm
*************************************************************/
static int connect_qp_cm(
IN const struct test_config_t
*test_config_p,
INOUT struct thread_resources_t *thread_rsc_p,
IN uint32_t qp_idx)
{
struct cm_data_t cm_data_local, cm_data_remote;
struct ib_cm_req_param req_param;
struct ib_cm_rep_param rep_param;
struct ib_cm_event *event = NULL;
struct ib_cm_id *recevied_cm_id = NULL;
struct ibv_sa_path_rec primary_path, alternate_path;
enum ib_cm_event_type received_event;
int rc, total_result = -1;
/* exchange CM data in socket */
cm_data_local.qp_num = thread_rsc_p->qp_attr_arr[qp_idx].qp_num;
cm_data_local.qkey = VL_random(&thread_rsc_p->rand, 0x7fffffff); /* the
MSB bit is cleared */
cm_data_local.psn = VL_random(&thread_rsc_p->rand, 0xffffff);
cm_data_local.max_rd_atomic =
test_config_p->thread_resources_sizes.max_rd_atomic;
rc = modify_qp_init(test_config_p, &cm_data_local,
thread_rsc_p->qp_attr_arr[qp_idx].qp_handle, test_config_p->ts_type);
if (rc) {
VL_DATA_ERR(("thread %d: ib_modify_qp[%u] to INIT failed",
thread_rsc_p->thread_id, qp_idx));
return rc;
}
rc = VL_sock_sync_ready(&thread_rsc_p->sock);
if (rc != 0) {
VL_SOCK_ERR(("thread %d: Failed exchange CM data between sides
for QP[%u]", thread_rsc_p->thread_id, qp_idx));
return -1;
}
if (config.is_daemon) {
rc = ib_cm_get_event(thread_rsc_p->h_cm_device, &event);
if (rc) {
VL_DATA_ERR(("thread %d: Failed to get CM event of
QP[%u]", thread_rsc_p->thread_id, qp_idx));
goto cleanup;
}
if (event->event != IB_CM_REQ_RECEIVED) {
VL_DATA_ERR(("thread %d: Got unexpected event %s when
conecting QP[%u], (expected event was IB_CM_REQ_RECEIVED)",
thread_rsc_p->thread_id,
VL_ib_cm_event_type_str(event->event), qp_idx));
/* in case of error, ack the event and don't check the
status of this call */
rc = -1;
goto cleanup;
}
printf("thread %d: got IB_CM_REQ_RECEIVED event\n", thread_rsc_p->thread_id);
cm_data_remote.qp_num = event->param.req_rcvd.remote_qpn;
cm_data_remote.qkey = 0; // todo - dotan - fill the qkey
cm_data_remote.psn = event->param.req_rcvd.starting_psn;
rc = modify_qp_rtr(test_config_p, &cm_data_remote,
thread_rsc_p, thread_rsc_p->qp_attr_arr[qp_idx].qp_handle,
test_config_p->ts_type);
if (rc) {
VL_DATA_ERR(("thread %d: ib_modify_qp[%u] to RTR
failed", thread_rsc_p->thread_id, qp_idx));
goto cleanup;
}
rc = modify_qp_rts(test_config_p, &cm_data_local,
thread_rsc_p->qp_attr_arr[qp_idx].qp_handle, test_config_p->ts_type);
if (rc) {
VL_DATA_ERR(("thread %d: ib_modify_qp[%u] to RTS
failed", thread_rsc_p->thread_id, qp_idx));
goto cleanup;
}
recevied_cm_id = event->cm_id;
memset(&rep_param, 0, sizeof(rep_param));
rep_param.qp_num = cm_data_local.qp_num;
rep_param.starting_psn = cm_data_local.psn;
rep_param.private_data = 0;
rep_param.private_data_len = 0; /* we don't send any private
data */
rep_param.responder_resources = cm_data_local.max_rd_atomic;
rep_param.initiator_depth = cm_data_local.max_rd_atomic;
//rep_param.target_ack_delay = xxxxxxxxxxx; // todo
//rep_param.failover_accepted = xxxxxxxxxxx; // todo
//rep_param.flow_control = xxxxxxxxxxx; // todo
rep_param.rnr_retry_count = (uint8_t)ACK_RNR_RETRY_COUNT;
rep_param.srq = (VL_MASK_IS_SET(test_config_p->test_flags,
IS_SUPPORTED_CM)) ? 1 : 0;
rc = ib_cm_send_rep(recevied_cm_id, &rep_param);
if (rc) {
VL_DATA_ERR(("thread %d: Failed sending rep to CM for
QP[%u]", thread_rsc_p->thread_id, qp_idx));
goto cleanup;
}
printf("thread %d: sending rep\n", thread_rsc_p->thread_id);
rc = ib_cm_ack_event(event);
if (rc) {
VL_DATA_ERR(("thread %d: Failed to ack on CM event of
QP[%u]", thread_rsc_p->thread_id, qp_idx));
goto cleanup;
}
event = NULL;
rc = ib_cm_get_event(thread_rsc_p->h_cm_device, &event);
if (rc) {
VL_DATA_ERR(("thread %d: Failed to get CM event of
QP[%u]", thread_rsc_p->thread_id, qp_idx));
goto cleanup;
}
if (event->event != IB_CM_RTU_RECEIVED) {
VL_DATA_ERR(("thread %d: Got unexpected event %s when
conecting QP[%u], (expected event was IB_CM_RTU_RECEIVED)",
thread_rsc_p->thread_id,
VL_ib_cm_event_type_str(event->event), qp_idx));
/* in case of error, ack the event and don't check the
status of this call */
rc = -1;
goto cleanup;
}
printf("thread %d: got IB_CM_RTU_RECEIVED event\n", thread_rsc_p->thread_id);
/* this event will be acked at the cleanup section */
} else {
int XXXXX = 0;
memset(&req_param, 0, sizeof(req_param));
/* fill the primary path */
fill_sa_path_rec(test_config_p,
thread_rsc_p->remote_con_data.lid, thread_rsc_p->local_con_data.lid,
0xffff,
thread_rsc_p->remote_con_data.gid, thread_rsc_p->local_con_data.gid,
thread_rsc_p, &primary_path);
req_param.primary_path = &primary_path;
/* fill the alternate path */
if (config.ib_port_alt) {
fill_sa_path_rec(test_config_p,
thread_rsc_p->remote_con_data.lid_alt, thread_rsc_p->local_con_data.lid_alt,
0xffff,
thread_rsc_p->remote_con_data.gid_alt, thread_rsc_p->local_con_data.gid_alt,
thread_rsc_p, &alternate_path);
req_param.alternate_path = &alternate_path;
}
req_param.service_id =
VL_htonll(test_config_p->thread_resources_sizes.cm_service_id); // todo -change
the service id
req_param.qp_num = cm_data_local.qp_num;
req_param.qp_type = test_config_p->ts_type;
req_param.starting_psn = cm_data_local.psn;
req_param.private_data = 0;
req_param.private_data_len = 0; /* we don't send any private
data */
req_param.peer_to_peer = XXXXX;
req_param.responder_resources = cm_data_local.max_rd_atomic;
req_param.initiator_depth = cm_data_local.max_rd_atomic;
req_param.remote_cm_response_timeout = DEF_CM_RESPONSE_TIMEOUT;
//req_param.flow_control = XXXXX; being set with 0 // todo
req_param.local_cm_response_timeout = DEF_CM_RESPONSE_TIMEOUT;
req_param.retry_count = (uint8_t)test_config_p->qp_retry_count;
req_param.rnr_retry_count = (uint8_t)ACK_RNR_RETRY_COUNT;
req_param.max_cm_retries = DEF_CM_RETRY;
req_param.srq = (VL_MASK_IS_SET(test_config_p->test_flags,
IS_SUPPORTED_CM)) ? 1 : 0;
rc = ib_cm_send_req(thread_rsc_p->qp_attr_arr[qp_idx].cm_id,
&req_param);
if (rc) {
VL_DATA_ERR(("thread %d: Failed sending req to CM for
QP[%u]", thread_rsc_p->thread_id, qp_idx));
goto cleanup;
}
printf("thread %d: sending req\n", thread_rsc_p->thread_id);
rc = ib_cm_get_event(thread_rsc_p->h_cm_device, &event);
if (rc) {
VL_DATA_ERR(("thread %d: Failed to get CM event of
QP[%u]", thread_rsc_p->thread_id, qp_idx));
goto cleanup;
}
received_event = event->event;
if (received_event != IB_CM_REP_RECEIVED) {
VL_DATA_ERR(("thread %d: Got unexpected event %s when
conecting QP[%u], (expected event was IB_CM_REP_RECEIVED)",
thread_rsc_p->thread_id,
VL_ib_cm_event_type_str(event->event), qp_idx));
rc = -1;
goto cleanup;
}
printf("thread %d: got IB_CM_REP_RECEIVED event\n", thread_rsc_p->thread_id);
cm_data_remote.qp_num = event->param.rep_rcvd.remote_qpn;
cm_data_remote.qkey = 0; // todo - dotan - fill the qkey
cm_data_remote.psn = event->param.rep_rcvd.starting_psn;
cm_data_remote.max_rd_atomic = 0; // todo - which value to use
here?
rc = modify_qp_rtr(test_config_p, &cm_data_remote,
thread_rsc_p, thread_rsc_p->qp_attr_arr[qp_idx].qp_handle,
test_config_p->ts_type);
if (rc) {
VL_DATA_ERR(("thread %d: ib_modify_qp[%u] to RTR
failed", thread_rsc_p->thread_id, qp_idx));
goto cleanup;
}
rc = modify_qp_rts(test_config_p, &cm_data_local,
thread_rsc_p->qp_attr_arr[qp_idx].qp_handle, test_config_p->ts_type);
if (rc) {
VL_DATA_ERR(("thread %d: ib_modify_qp[%u] to RTS
failed", thread_rsc_p->thread_id, qp_idx));
goto cleanup;
}
rc = ib_cm_send_rtu(thread_rsc_p->qp_attr_arr[qp_idx].cm_id,
NULL, 0);
if (rc) {
VL_DATA_ERR(("thread %d: Failed sending rtu to CM for
QP[%u]", thread_rsc_p->thread_id, qp_idx));
goto cleanup;
}
printf("thread %d: sending rtu\n", thread_rsc_p->thread_id);
/* this event will be acked at the cleanup section */
}
rc = VL_sock_sync_ready(&thread_rsc_p->sock);
if (rc != 0) {
VL_SOCK_ERR(("thread %d: Failed exchange CM data between sides
for QP[%u]", thread_rsc_p->thread_id, qp_idx));
goto cleanup;
}
printf("thread %d: cm_data_remote.qp_num = 0x%x\n", thread_rsc_p->thread_id,
cm_data_remote.qp_num);
printf("thread %d: cm_data_remote.qkey = 0x%x\n", thread_rsc_p->thread_id,
cm_data_remote.qkey);
printf("thread %d: cm_data_remote.psn = 0x%x\n", thread_rsc_p->thread_id,
cm_data_remote.psn);
printf("thread %d: cm_data_remote.max_rd_atomic = 0x%x\n",
thread_rsc_p->thread_id, cm_data_remote.max_rd_atomic);
printf("thread %d: XXXX QP[%u] was connected\n", thread_rsc_p->thread_id,
qp_idx);
total_result = 0;
cleanup:
/* destroy the event (if it wasn't done before) */
if (event) {
rc = ib_cm_ack_event(event);
if (rc) {
VL_DATA_ERR(("thread %d: Failed to ack on CM event of
QP[%u]", thread_rsc_p->thread_id, qp_idx));
total_result = rc;
}
}
printf("XXXXXXXXXX recevied_cm_id = %p\n", recevied_cm_id);
#if 0
/* destroy the CM id if we got one (in the server) */
if (recevied_cm_id) {
rc = ib_cm_destroy_id(recevied_cm_id);
if (rc) {
VL_DATA_ERR(("thread %d: Failed to destroy CM ID of
QP[%u]", thread_rsc_p->thread_id, qp_idx));
total_result = rc;
}
}
#endif
return total_result;
}
/*************************************************************
* Function: connect_qp_sock
*************************************************************/
static int connect_qp_sock(
IN const struct test_config_t
*test_config_p,
INOUT struct thread_resources_t *thread_rsc_p,
IN uint32_t qp_idx)
{
int rc;
struct cm_data_t cm_data_local, cm_data_remote;
struct cm_data_t tmp_cm_data_local, tmp_cm_data_remote;
/* exchange CM data in socket */
cm_data_local.qp_num = thread_rsc_p->qp_attr_arr[qp_idx].qp_num;
cm_data_local.qkey = VL_random(&thread_rsc_p->rand, 0x7fffffff); /* the
MSB bit is cleared */
cm_data_local.psn = VL_random(&thread_rsc_p->rand, 0xffffff);
cm_data_local.max_rd_atomic =
test_config_p->thread_resources_sizes.max_rd_atomic;
/* fix the endianess issues */
tmp_cm_data_local.qp_num = htonl(cm_data_local.qp_num);
tmp_cm_data_local.qkey = htonl(cm_data_local.qkey);
tmp_cm_data_local.psn = htonl(cm_data_local.psn);
tmp_cm_data_local.max_rd_atomic = htonl(cm_data_local.max_rd_atomic);
rc = VL_sock_sync_data(&thread_rsc_p->sock, sizeof(struct cm_data_t),
&tmp_cm_data_local, &tmp_cm_data_remote);
if (rc != 0) {
VL_SOCK_ERR(("thread %d: Failed exchange CM data between sides
for QP[%u]", thread_rsc_p->thread_id, qp_idx));
return -1;
}
/* fix the endianess issues */
cm_data_remote.qp_num = ntohl(tmp_cm_data_remote.qp_num);
cm_data_remote.qkey = ntohl(tmp_cm_data_remote.qkey);
cm_data_remote.psn = ntohl(tmp_cm_data_remote.psn);
cm_data_remote.max_rd_atomic = ntohl(tmp_cm_data_remote.max_rd_atomic);
/* fill data from remote side in local DB */
thread_rsc_p->remote_data_arr[qp_idx].qp_num = cm_data_remote.qp_num;
thread_rsc_p->remote_data_arr[qp_idx].qkey = cm_data_remote.qkey;
rc = modify_qp_init(test_config_p, &cm_data_local,
thread_rsc_p->qp_attr_arr[qp_idx].qp_handle, test_config_p->ts_type);
if (rc) {
VL_DATA_ERR(("thread %d: ib_modify_qp[%u] to INIT failed",
thread_rsc_p->thread_id, qp_idx));
return rc;
}
rc = modify_qp_rtr(test_config_p, &cm_data_remote, thread_rsc_p,
thread_rsc_p->qp_attr_arr[qp_idx].qp_handle, test_config_p->ts_type);
if (rc) {
VL_DATA_ERR(("thread %d: ib_modify_qp[%u] to RTR failed",
thread_rsc_p->thread_id, qp_idx));
return rc;
}
rc = modify_qp_rts(test_config_p, &cm_data_local,
thread_rsc_p->qp_attr_arr[qp_idx].qp_handle, test_config_p->ts_type);
if (rc) {
VL_DATA_ERR(("thread %d: ib_modify_qp[%u] to RTS failed",
thread_rsc_p->thread_id, qp_idx));
return rc;
}
return 0;
}
/*************************************************************
* Function: connect_qp
*************************************************************/
int connect_qp(
IN const struct test_config_t
*test_config_p,
INOUT struct thread_resources_t *thread_rsc_p,
IN uint32_t qp_idx)
{
if (VL_MASK_IS_SET(test_config_p->test_flags, IS_SUPPORTED_CM))
return connect_qp_cm(test_config_p, thread_rsc_p, qp_idx);
else
return connect_qp_sock(test_config_p, thread_rsc_p, qp_idx);
}
/*************************************************************
* Function: connect_sync_qp
*************************************************************/
int connect_sync_qp(
IN const struct test_config_t
*test_config_p,
INOUT struct thread_resources_t *thread_rsc_p)
{
struct cm_data_t cm_data_local;
int rc;
uint32_t local_qp_num, tmp_qp_num;
/* exchange CM data in socket */
/* fix the endianess issues */
local_qp_num = htonl(thread_rsc_p->sync_qp->qp_num);
rc = VL_sock_sync_data(&thread_rsc_p->sock, sizeof(uint32_t),
&local_qp_num, &tmp_qp_num);
if (rc != 0) {
VL_SOCK_ERR(("thread %d: Failed exchange CM data between sides
for synch QP", thread_rsc_p->thread_id));
return -1;
}
/* fill the conection structure */
cm_data_local.psn = 0;
cm_data_local.qkey = 0;
cm_data_local.qp_num = ntohl(tmp_qp_num);
cm_data_local.max_rd_atomic =
test_config_p->thread_resources_sizes.max_rd_atomic;
rc = modify_qp_init(test_config_p, &cm_data_local,
thread_rsc_p->sync_qp, IBV_QPT_RC);
if (rc) {
VL_DATA_ERR(("thread %d: ib_modify_qp of synch QP to INIT
failed", thread_rsc_p->thread_id));
return rc;
}
rc = modify_qp_rtr(test_config_p, &cm_data_local, thread_rsc_p,
thread_rsc_p->sync_qp, IBV_QPT_RC);
if (rc) {
VL_DATA_ERR(("thread %d: ib_modify_qp of synch QP to RTR
failed", thread_rsc_p->thread_id));
return rc;
}
rc = modify_qp_rts(test_config_p, &cm_data_local,
thread_rsc_p->sync_qp, IBV_QPT_RC);
if (rc) {
VL_DATA_ERR(("thread %d: ib_modify_qp of synch QP to RTS
failed", thread_rsc_p->thread_id));
return rc;
}
return 0;
}
_______________________________________________
openib-general mailing list
openib-general@openib.org
http://openib.org/mailman/listinfo/openib-general
To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general