This patch: - fixes reference counting bug on CQs - EPs should take a ref on their respective CQs, to prevent the CQ from being destroyed before the EP. - remove dependency on IBAL from ND provider. - improve CQ polling by using poll_cq_array rather than poll_cq. - fixed error flow - specify responder resources when accepting connections.
Signed-off-by: Fab Tillier <[email protected]> diff -dwup3 -X excl.txt -r c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\core\al\kernel\al_ndi_cm.c .\core\al\kernel\al_ndi_cm.c --- c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\core\al\kernel\al_ndi_cm.c Thu May 31 11:22:16 2012 +++ .\core\al\kernel\al_ndi_cm.c Tue Jun 19 17:44:59 2012 @@ -1671,6 +1671,7 @@ __ndi_fill_cm_rep( p_cm_rep->qpn = qpn; p_cm_rep->init_depth = p_rep->init_depth; + p_cm_rep->resp_res = p_rep->resp_res; p_cm_rep->failover_accepted = IB_FAILOVER_ACCEPT_UNSUPPORTED; p_cm_rep->flow_ctrl = TRUE; /* HCAs must support end-to-end flow control. */ p_cm_rep->rnr_retry_cnt = QP_ATTRIB_RNR_RETRY; diff -dwup3 -X excl.txt -r c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\core\al\kernel\al_proxy_ndi.c .\core\al\kernel\al_proxy_ndi.c --- c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\core\al\kernel\al_proxy_ndi.c Thu May 31 11:22:16 2012 +++ .\core\al\kernel\al_proxy_ndi.c Fri Aug 03 14:21:53 2012 @@ -99,7 +99,7 @@ __ndi_create_cq( if( !h_ca ) { status = IB_INVALID_CA_HANDLE; - goto proxy_create_cq_err1; + goto proxy_create_cq_err; } cq_create.size = p_ioctl->in.size; @@ -111,13 +111,13 @@ __ndi_create_cq( status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); if( status != IB_SUCCESS ) - goto proxy_create_cq_err2; + goto proxy_create_cq_err; status = create_cq( h_ca, &cq_create, (void*)(ULONG_PTR)p_ioctl->in.context, pfn_ev, &h_cq, p_umv_buf ); if( status != IB_SUCCESS ) - goto proxy_create_cq_err2; + goto proxy_create_cq_err; status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); if( status == IB_SUCCESS ) @@ -131,10 +131,7 @@ __ndi_create_cq( { h_cq->obj.pfn_destroy( &h_cq->obj, NULL ); -proxy_create_cq_err2: - cl_waitobj_deref( cq_create.h_wait_obj ); - -proxy_create_cq_err1: +proxy_create_cq_err: p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; p_ioctl->out.h_cq = AL_INVALID_HANDLE; p_ioctl->out.size = 0; diff -dwup3 -X excl.txt -r c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\ulp\nd\user\NdAdapter.cpp .\ulp\nd\user\NdAdapter.cpp --- c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\ulp\nd\user\NdAdapter.cpp Thu May 31 11:22:11 2012 +++ .\ulp\nd\user\NdAdapter.cpp Wed May 23 18:26:49 2012 @@ -123,7 +123,7 @@ HRESULT CAdapter::Initialize( RtlCopyMemory( &m_Addr.v4, pAddr, sizeof(m_Addr.v4) ); ND_PRINT( TRACE_LEVEL_INFORMATION, ND_DBG_NDI, ("Local address: IP %#x, port %#hx\n", - cl_hton32(m_Addr.v4.sin_addr.S_un.S_addr), cl_hton16(m_Addr.v4.sin_port) ) ); + _byteswap_ulong(m_Addr.v4.sin_addr.S_un.S_addr), _byteswap_ushort(m_Addr.v4.sin_port) ) ); break; case AF_INET6: RtlCopyMemory( &m_Addr.v6, pAddr, sizeof(m_Addr.v6) ); @@ -542,7 +542,7 @@ HRESULT CAdapter::GetLocalAddress( *(struct sockaddr_in*)pAddr = m_Addr.v4; ND_PRINT( TRACE_LEVEL_INFORMATION, ND_DBG_NDI, ("Local address: IP %#x, port %#hx\n", - cl_hton32(m_Addr.v4.sin_addr.S_un.S_addr), cl_hton16(m_Addr.v4.sin_port) ) ); + _byteswap_ulong(m_Addr.v4.sin_addr.S_un.S_addr), _byteswap_ushort(m_Addr.v4.sin_port) ) ); return S_OK; case AF_INET6: @@ -635,8 +635,8 @@ HRESULT CAdapter::OpenCa( ual_get_uvp_name_ioctl_t al_ioctl; /* Initialize assuming no user-mode support */ - cl_memclr( &al_ioctl, sizeof(al_ioctl) ); - cl_memclr( &m_Ifc, sizeof(m_Ifc) ); + RtlZeroMemory( &al_ioctl, sizeof(al_ioctl) ); + RtlZeroMemory( &m_Ifc, sizeof(m_Ifc) ); /* init with the guid */ m_Ifc.guid = CaGuid; @@ -684,7 +684,7 @@ HRESULT CAdapter::OpenCa( pfn_uvp_ifc( IID_UVP, &m_Ifc.user_verbs ); ual_open_ca_ioctl_t ca_ioctl; - cl_memclr( &ca_ioctl, sizeof(ca_ioctl) ); + RtlZeroMemory( &ca_ioctl, sizeof(ca_ioctl) ); /* Pre call to the UVP library */ ib_api_status_t status = IB_ERROR; @@ -729,11 +729,16 @@ HRESULT CAdapter::OpenCa( /* Post uvp call */ if( m_Ifc.user_verbs.post_open_ca ) { - status = m_Ifc.user_verbs.post_open_ca( + ib_api_status_t uvpStatus = m_Ifc.user_verbs.post_open_ca( CaGuid, status, (ib_ca_handle_t*)(ULONG_PTR)&m_uCa, &ca_ioctl.out.umv_buf ); + if( status == IB_SUCCESS ) + { + // Don't lose an error status. + status = uvpStatus; + } } // TODO: Does the UVP need a query call to succeed? @@ -750,7 +755,7 @@ HRESULT CAdapter::QueryCa( ual_query_ca_ioctl_t ca_ioctl; - cl_memclr( &ca_ioctl, sizeof(ca_ioctl) ); + RtlZeroMemory( &ca_ioctl, sizeof(ca_ioctl) ); ca_ioctl.in.h_ca = m_hCa; ca_ioctl.in.p_ca_attr = (ULONG_PTR)pAttr; @@ -852,7 +857,7 @@ HRESULT CAdapter::AllocPd(void) /* Clear the pd_ioctl */ ual_alloc_pd_ioctl_t pd_ioctl; - cl_memclr( &pd_ioctl, sizeof(pd_ioctl) ); + RtlZeroMemory( &pd_ioctl, sizeof(pd_ioctl) ); /* Pre call to the UVP library */ ib_api_status_t status; diff -dwup3 -X excl.txt -r c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\ulp\nd\user\NdConnector.cpp .\ulp\nd\user\NdConnector.cpp --- c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\ulp\nd\user\NdConnector.cpp Thu May 31 11:22:11 2012 +++ .\ulp\nd\user\NdConnector.cpp Tue Jun 19 17:48:26 2012 @@ -357,7 +357,7 @@ HRESULT CConnector::Connect( // Local address. RtlZeroMemory( &ioctl.pdata.src_ip_addr, ATS_IPV4_OFFSET ); - CopyMemory( + RtlCopyMemory( &ioctl.pdata.src_ip_addr[ATS_IPV4_OFFSET>>2], (uint8_t*)&m_pParent->m_Addr.v4.sin_addr, sizeof( m_pParent->m_Addr.v4.sin_addr ) @@ -365,15 +365,18 @@ HRESULT CConnector::Connect( // Destination address. RtlZeroMemory( &ioctl.pdata.dst_ip_addr, ATS_IPV4_OFFSET ); - CopyMemory( + RtlCopyMemory( &ioctl.pdata.dst_ip_addr[ATS_IPV4_OFFSET>>2], (uint8_t*)&m_PeerAddr.v4.sin_addr, sizeof( m_PeerAddr.v4.sin_addr ) ); + ND_PRINT( TRACE_LEVEL_INFORMATION, ND_DBG_NDI, ("local address: IP %#x, port %#hx, dest address: IP %#x, port %#hx\n", - cl_hton32(m_pParent->m_Addr.v4.sin_addr.S_un.S_addr), cl_hton16(m_pParent->m_Addr.v4.sin_port), - cl_hton32(m_PeerAddr.v4.sin_addr.S_un.S_addr), cl_hton16(m_PeerAddr.v4.sin_port) ) ); + _byteswap_ulong(m_pParent->m_Addr.v4.sin_addr.S_un.S_addr), + _byteswap_ushort(m_pParent->m_Addr.v4.sin_port), + _byteswap_ulong(m_PeerAddr.v4.sin_addr.S_un.S_addr), + _byteswap_ushort(m_PeerAddr.v4.sin_port) ) ); break; case AF_INET6: @@ -390,17 +393,18 @@ HRESULT CConnector::Connect( ioctl.pdata.ipv = 0x60; // Local address. - CopyMemory( + RtlCopyMemory( ioctl.pdata.src_ip_addr, m_pParent->m_Addr.v6.sin6_addr.u.Byte, sizeof(ioctl.pdata.src_ip_addr) ); // Destination address. - CopyMemory( ioctl.pdata.dst_ip_addr, + RtlCopyMemory( ioctl.pdata.dst_ip_addr, m_PeerAddr.v6.sin6_addr.u.Byte, sizeof(ioctl.pdata.dst_ip_addr) ); + break; default: @@ -726,7 +730,8 @@ HRESULT CConnector::GetLocalAddress( #endif ND_PRINT( TRACE_LEVEL_INFORMATION, ND_DBG_NDI, ("Local address: IP %#x, port %#hx\n", - cl_hton32(pAddrV4->sin_addr.S_un.S_addr), cl_hton16(pAddrV4->sin_port) ) ); + _byteswap_ulong(pAddrV4->sin_addr.S_un.S_addr), + _byteswap_ushort(pAddrV4->sin_port) ) ); return S_OK; } @@ -843,7 +848,8 @@ HRESULT CConnector::GetPeerAddress( *(struct sockaddr_in*)pAddress = m_PeerAddr.v4; ND_PRINT( TRACE_LEVEL_INFORMATION, ND_DBG_NDI, ("Peer address: IP %#x, port %#hx\n", - cl_hton32(m_PeerAddr.v4.sin_addr.S_un.S_addr), cl_hton16(m_PeerAddr.v4.sin_port) ) ); + _byteswap_ulong(m_PeerAddr.v4.sin_addr.S_un.S_addr), + _byteswap_ushort(m_PeerAddr.v4.sin_port) ) ); break; case AF_INET6: diff -dwup3 -X excl.txt -r c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\ulp\nd\user\NdCq.cpp .\ulp\nd\user\NdCq.cpp --- c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\ulp\nd\user\NdCq.cpp Thu May 31 11:22:11 2012 +++ .\ulp\nd\user\NdCq.cpp Wed May 23 18:26:52 2012 @@ -167,7 +167,7 @@ namespace NetworkDirect /* Clear the IOCTL buffer */ ual_modify_cq_ioctl_t cq_ioctl; - cl_memclr( &cq_ioctl, sizeof(cq_ioctl) ); + RtlZeroMemory( &cq_ioctl, sizeof(cq_ioctl) ); /* Call the uvp pre call if the vendor library provided a valid ca handle */ if( m_uCq && m_pParent->m_Ifc.user_verbs.pre_resize_cq ) @@ -280,15 +280,13 @@ exit: while( nResults-- ) { - ib_wc_t wc; - ib_wc_t* pWc = &wc; - ib_wc_t* pDoneWc; - wc.p_next = NULL; - ib_api_status_t status = - m_pParent->m_Ifc.user_verbs.poll_cq( m_uCq, &pWc, &pDoneWc ); - if( status != IB_SUCCESS ) + uvp_wc_t wc; + int n = m_pParent->m_Ifc.user_verbs.poll_cq_array( m_uCq, 1, &wc ); + if( n <= 0 ) + { break; + } pResults[i] = (ND_RESULT*)wc.wr_id; if( wc.wc_type == IB_WC_RECV ) @@ -377,7 +375,7 @@ exit: /* Clear the IOCTL buffer */ ual_create_cq_ioctl_t cq_ioctl; - cl_memclr( &cq_ioctl, sizeof(cq_ioctl) ); + RtlZeroMemory( &cq_ioctl, sizeof(cq_ioctl) ); /* Pre call to the UVP library */ ib_api_status_t status; diff -dwup3 -X excl.txt -r c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\ulp\nd\user\NdEndpoint.cpp .\ulp\nd\user\NdEndpoint.cpp --- c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\ulp\nd\user\NdEndpoint.cpp Thu May 31 11:22:11 2012 +++ .\ulp\nd\user\NdEndpoint.cpp Wed May 23 18:26:49 2012 @@ -73,6 +73,8 @@ namespace NetworkDirect CEndpoint::CEndpoint(void) : m_nRef( 1 ), m_pParent( NULL ), + m_pInboundCq( NULL ), + m_pOutboundCq( NULL ), m_hQp( 0 ) { } @@ -82,6 +84,16 @@ CEndpoint::~CEndpoint(void) if( m_hQp ) DestroyQp(); + if( m_pInboundCq != NULL ) + { + m_pInboundCq->Release(); + } + + if( m_pOutboundCq != NULL ) + { + m_pOutboundCq->Release(); + } + if( m_pParent ) m_pParent->Release(); } @@ -110,6 +122,12 @@ HRESULT CEndpoint::Initialize( m_pParent = pParent; m_pParent->AddRef(); + m_pInboundCq = pInboundCq; + m_pInboundCq->AddRef(); + + m_pOutboundCq = pOutboundCq; + m_pOutboundCq->AddRef(); + CL_ASSERT( m_pParent->m_Ifc.user_verbs.pre_create_qp != NULL || m_pParent->m_Ifc.user_verbs.post_create_qp != NULL || @@ -782,7 +800,7 @@ HRESULT CEndpoint::CreateQp( /* Setup the qp_ioctl */ ual_create_qp_ioctl_t qp_ioctl; - cl_memclr( &qp_ioctl, sizeof(qp_ioctl) ); + RtlZeroMemory( &qp_ioctl, sizeof(qp_ioctl) ); qp_ioctl.in.qp_create.qp_type = IB_QPT_RELIABLE_CONN; qp_ioctl.in.qp_create.sq_depth = (uint32_t)nOutboundEntries; @@ -879,7 +897,7 @@ void CEndpoint::DestroyQp() m_pParent->m_Ifc.user_verbs.pre_destroy_qp( m_uQp ); ual_destroy_qp_ioctl_t qp_ioctl; - cl_memclr( &qp_ioctl, sizeof(qp_ioctl) ); + RtlZeroMemory( &qp_ioctl, sizeof(qp_ioctl) ); qp_ioctl.in.h_qp = m_hQp; ND_PRINT( TRACE_LEVEL_INFORMATION, ND_DBG_NDI, @@ -920,7 +938,7 @@ HRESULT CEndpoint::ModifyQp( /* Setup the qp_ioctl */ ual_ndi_modify_qp_ioctl_in_t qp_ioctl; - cl_memclr( &qp_ioctl, sizeof(qp_ioctl) ); + RtlZeroMemory( &qp_ioctl, sizeof(qp_ioctl) ); switch( NewState ) { @@ -976,7 +994,7 @@ HRESULT CEndpoint::QueryQp( ND_ENTER( ND_DBG_NDI ); ual_query_qp_ioctl_t qp_ioctl; - cl_memclr( &qp_ioctl, sizeof(qp_ioctl) ); + RtlZeroMemory( &qp_ioctl, sizeof(qp_ioctl) ); qp_ioctl.in.h_qp = m_hQp; /* Call the uvp pre call if the vendor library provided a valid ca handle */ diff -dwup3 -X excl.txt -r c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\ulp\nd\user\NdEndpoint.h .\ulp\nd\user\NdEndpoint.h --- c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\ulp\nd\user\NdEndpoint.h Thu May 31 11:22:11 2012 +++ .\ulp\nd\user\NdEndpoint.h Wed May 23 18:26:49 2012 @@ -192,6 +192,8 @@ protected: volatile LONG m_nRef; CAdapter* m_pParent; + CCq* m_pInboundCq; + CCq* m_pOutboundCq; uint64_t m_hQp; ib_qp_handle_t m_uQp; diff -dwup3 -X excl.txt -r c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\ulp\nd\user\SOURCES .\ulp\nd\user\SOURCES --- c:\dev\openib\ofw\gen1\branches\mlx4_30\trunk\ulp\nd\user\SOURCES Wed Aug 01 17:18:35 2012 +++ .\ulp\nd\user\SOURCES Thu Jul 26 21:26:41 2012 @@ -28,17 +28,13 @@ SOURCES= \ INCLUDES=$(SDK_INC_PATH);..\..\..\inc;..\..\..\inc\user;..\..\..\core\al;\ ..\..\..\core\al\user;$(ND_SDK_PATH)\include;\ - ..\..\..\core\complib\user\$(O); \ - ..\..\..\core\al\user\$(O); \ ..\..\..\core\ibat_ex\user\$(O); !if $(_NT_TARGET_VERSION) < 0x602 INCLUDES=$(INCLUDES) \ $(PLATFORM_SDK_PATH)\include; !endif - - USER_C_FLAGS=$(USER_C_FLAGS) -DEXPORT_AL_SYMBOLS -DCL_NO_TRACK_MEM -DWPP_OLDCC #/GL @@ -50,7 +46,6 @@ TARGETLIBS= \ $(SDK_LIB_PATH)\ws2_32.lib \ $(SDK_LIB_PATH)\iphlpapi.lib \ $(TARGETPATH)\*\ibat_ex.lib \ - $(TARGETPATH)\*\ibal.lib \ $(TARGETPATH)\*\complib.lib \ $(SDK_LIB_PATH)\uuid.lib
ndv2.28.patch
Description: ndv2.28.patch
_______________________________________________ ofw mailing list [email protected] http://lists.openfabrics.org/cgi-bin/mailman/listinfo/ofw
