Built and tested openmpi-1.7rc5 on cygwin, same configuration
than 1.6.3-4 package
./autogen.sh
configure \
LDFLAGS="-Wl,--export-all-symbols -no-undefined" \
--disable-mca-dso \
--disable-sysv-shmem \
--without-udapl \
--enable-cxx-exceptions \
--with-threads=posix \
--without-cs-fs \
--enable-heterogeneous \
--with-mpi-param_check=always \
--enable-contrib-no-build=vt,libompitrace \
--enable-mca-no-build=paffinity,installdirs-windows,timer-windows,shmem-sysv
In addition to the previous 3 patches:
path_null.patch : https://svn.open-mpi.org/trac/ompi/ticket/3371
SHM.patch : posix alternative at "SHM_R | SHM_W"
cygwin-dlname.patch : cygwin specific for .dll instead of .so suffix
I needed additional patches:
ERROR.patch : ERROR is already defined, so another label
is needed for "goto ERROR"
interface.patch : same for interface , it seems a
struct somewhere else
min.patch : min already defined as macro
(I saw in the source also a MIN macro defined)
mpifh.patch : to avoid undefined error
libmpi_mpifh_la_LIBADD needs
$(top_builddir)/ompi/libmpi.la
winsock.patch : defense against <winsock2.h> usage
Full package here :
http://matzeri.altervista.org/cygwin-1.7/openmpi/
Osu-micro-benchmarks-3.7 results here :
http://matzeri.altervista.org/works/ompi/
Regards
Marco
--- origsrc/openmpi-1.6.3/ompi/debuggers/Makefile.am 2012-04-03
09:30:25.000000000 -0500
+++ src/openmpi-1.6.3/ompi/debuggers/Makefile.am 2012-11-15
19:52:47.037390400 -0600
@@ -58,9 +58,9 @@ libdebuggers_la_SOURCES = \
$(headers) \
ompi_debuggers.c
libdebuggers_la_CPPFLAGS = \
- -DOMPI_MSGQ_DLL=\"$(pkglibdir)/libompi_dbg_msgq.so\" \
- -DOMPI_MSGQ_DLL_PREFIX=\"libompi_dbg_msgq\" \
- -DOMPI_MPIHANDLES_DLL_PREFIX=\"libompi_dbg_mpihandles\"
+ -DOMPI_MSGQ_DLL=\"$(pkglibdir)/cygompi_dbg_msgq.dll\" \
+ -DOMPI_MSGQ_DLL_PREFIX=\"cygompi_dbg_msgq\" \
+ -DOMPI_MPIHANDLES_DLL_PREFIX=\"cygompi_dbg_mpihandles\"
libompi_debugger_canary_la_SOURCES = \
ompi_debugger_canary.c
--- orig/openmpi-1.7rc5/ompi/mca/bcol/basesmuma/bcol_basesmuma_buf_mgmt.c
2012-10-05 13:13:34.000000000 +0200
+++ openmpi-1.7rc5/ompi/mca/bcol/basesmuma/bcol_basesmuma_buf_mgmt.c
2012-12-19 11:18:55.123709300 +0100
@@ -213,7 +213,7 @@
pload_mgmt->data_buffs = malloc(malloc_size);
if( !pload_mgmt->data_buffs) {
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
/* setup the input file for the shared memory connection manager */
@@ -234,7 +234,7 @@
input_file,
false);
if( OMPI_SUCCESS != ret ) {
- goto ERROR;
+ goto exit_ERROR;
}
/* now we exchange offset info - don't assume symmetric virtual memory
@@ -247,7 +247,7 @@
(void **)pload_mgmt->data_buffs, mem_offset, 0,
pload_mgmt->size_of_group);
if( OMPI_SUCCESS != ret ) {
- goto ERROR;
+ goto exit_ERROR;
}
/* convert memory offset to virtual address in current rank */
@@ -285,7 +285,7 @@
return OMPI_SUCCESS;
-ERROR:
+exit_ERROR:
return ret;
}
#endif
@@ -387,7 +387,7 @@
pload_mgmt->data_buffs = (mca_bcol_basesmuma_payload_t *)
malloc(malloc_size);
if( !pload_mgmt->data_buffs) {
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
/* allocate some memory to hold the offsets */
@@ -411,7 +411,7 @@
input_file,cs->payload_base_fname,
false);
if( OMPI_SUCCESS != ret ) {
- goto ERROR;
+ goto exit_ERROR;
}
@@ -429,7 +429,7 @@
sm_bcol_module->super.sbgp_partner_module->group_list,
sm_bcol_module->super.sbgp_partner_module->group_comm);
if( OMPI_SUCCESS != ret ) {
- goto ERROR;
+ goto exit_ERROR;
}
/* convert memory offset to virtual address in current rank */
@@ -525,7 +525,7 @@
return OMPI_SUCCESS;
-ERROR:
+exit_ERROR:
return ret;
}
--- orig/openmpi-1.7rc5/ompi/mca/bcol/basesmuma/bcol_basesmuma_component.c
2012-10-25 02:56:21.000000000 +0200
+++ openmpi-1.7rc5/ompi/mca/bcol/basesmuma/bcol_basesmuma_component.c
2012-12-19 11:19:45.037772500 +0100
@@ -142,7 +142,7 @@
roundup_to_power_radix(2,cs->basesmuma_num_mem_banks, &dummy);
if ( 0 == cs->basesmuma_num_mem_banks ) {
ret=OMPI_ERROR;
- goto ERROR;
+ goto exit_ERROR;
}
/*
@@ -152,7 +152,7 @@
roundup_to_power_radix(2,cs->basesmuma_num_regions_per_bank, &dummy);
if ( 0 == cs->basesmuma_num_regions_per_bank ) {
ret=OMPI_ERROR;
- goto ERROR;
+ goto exit_ERROR;
}
/* Number of groups supported */
@@ -243,7 +243,7 @@
return ret;
-ERROR:
+exit_ERROR:
return ret;
}
--- orig/openmpi-1.7rc5/ompi/mca/bcol/basesmuma/bcol_basesmuma_setup.c
2012-10-05 13:13:34.000000000 +0200
+++ openmpi-1.7rc5/ompi/mca/bcol/basesmuma/bcol_basesmuma_setup.c
2012-12-19 11:14:50.036461400 +0100
@@ -76,7 +76,7 @@
sm_bcol_module->super.sbgp_partner_module->group_list,
sm_bcol_module->super.sbgp_partner_module->group_comm);
if( OMPI_SUCCESS != ret ) {
- goto ERROR;
+ goto exit_ERROR;
}
/* get the control stucture offsets within the shared memory
@@ -98,7 +98,7 @@
}
-ERROR:
+exit_ERROR:
/* clean up */
if( NULL != send_buff ) {
free(send_buff);
@@ -149,7 +149,7 @@
fprintf(stderr,"Cannot allocate memory for sbuffer or rbuffer \n");
fflush(stderr);
ret = OMPI_ERROR;
- goto ERROR;
+ goto exit_ERROR;
}
/* get my proc information */
@@ -160,7 +160,7 @@
&(sm_bcol_module->super.sbgp_partner_module->my_index),1,OPAL_UINT32);
if (ORTE_SUCCESS != ret) {
- goto ERROR;
+ goto exit_ERROR;
fprintf(stderr,"ORTE error packing my_index!!\n");
fflush(stderr);
}
@@ -168,7 +168,7 @@
/* pack the offset of the allocated region */
ret = opal_dss.pack(send_buffer,&(mem_offset),1,OPAL_UINT64);
if (ORTE_SUCCESS != ret) {
- goto ERROR;
+ goto exit_ERROR;
}
/* get the offsets from all procs, so can setup the control data
@@ -177,7 +177,7 @@
if (ORTE_SUCCESS != (ret = orte_grpcomm.allgather_list(&peers,
send_buffer, recv_buffer))) {
fprintf(stderr,"orte_grpcomm.allgather_list returned error %d\n", ret);
fflush(stderr);
- goto ERROR;
+ goto exit_ERROR;
}
/* unpack the dummy */
@@ -186,7 +186,7 @@
if (ORTE_SUCCESS != ret) {
fprintf(stderr,"unpack returned error %d for dummy \n",ret);
fflush(stderr);
- goto ERROR;
+ goto exit_ERROR;
}
/* get the control stucture offsets within the shared memory
@@ -202,7 +202,7 @@
if (ORTE_SUCCESS != ret) {
fprintf(stderr,"unpack returned error %d for remote index_in_group
\n",ret);
fflush(stderr);
- goto ERROR;
+ goto exit_ERROR;
}
/* get the offset */
@@ -211,7 +211,7 @@
if (ORTE_SUCCESS != ret) {
fprintf(stderr,"unpack returned error %d for remote memory offset
\n",ret);
fflush(stderr);
- goto ERROR;
+ goto exit_ERROR;
}
array_id=SM_ARRAY_INDEX(leading_dim,0,index_in_group);
@@ -235,7 +235,7 @@
return ret;
-ERROR:
+exit_ERROR:
/* free peer list */
peer=(orte_namelist_t *)opal_list_remove_first(&peers);
@@ -280,14 +280,14 @@
sm_bcol_module->super.sbgp_partner_module->group_list,
sm_bcol_module->super.sbgp_partner_module->group_comm);
if( OMPI_SUCCESS != ret ) {
- goto ERROR;
+ goto exit_ERROR;
}
#if 0
ret=base_bcol_basesmuma_exchange_offsets( sm_bcol_module,
(void **)ctl_mgmt->ctl_buffs, mem_offset, loop_limit, leading_dim);
if( OMPI_SUCCESS != ret ) {
- goto ERROR;
+ goto exit_ERROR;
}
#endif
@@ -329,7 +329,7 @@
return ret;
-ERROR:
+exit_ERROR:
return ret;
}
@@ -381,7 +381,7 @@
ctl_mgmt->ctl_buffs= malloc(malloc_size);
if( !ctl_mgmt->ctl_buffs ) {
ret=OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
/* exchange remote addressing information */
@@ -407,7 +407,7 @@
fprintf(stderr,"Cannot allocate memory for
shared_memory_scratch_space. \n");
fflush(stderr);
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
for(i=0 ; i < sm_bcol_module->super.sbgp_partner_module->group_size ; i++ )
{
@@ -434,7 +434,7 @@
fprintf(stderr,"Cannot allocate memory for ctl_buffs_mgmt. ret = %d
\n",ret);
fflush(stderr);
ret = OMPI_ERROR;
- goto ERROR;
+ goto exit_ERROR;
}
/* initialize each individual element */
@@ -468,7 +468,7 @@
return ret;
-ERROR:
+exit_ERROR:
return ret;
}
@@ -526,7 +526,7 @@
list_data_t *item=OBJ_NEW(list_data_t);
if( !item ) {
ret=OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
item->data=(void *)data_ptr;
opal_list_append(&(cs->ctl_structures),(opal_list_item_t *)item);
@@ -549,12 +549,12 @@
opal_list_remove_last(&(cs->ctl_structures));
if( !sm_bcol_module->no_userdata_ctl) {
ret=OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
ret=base_bcol_basesmuma_setup_ctl_struct(
sm_bcol_module, cs, &(sm_bcol_module->colls_no_user_data));
if( OMPI_SUCCESS != ret ) {
- goto ERROR;
+ goto exit_ERROR;
}
/* intialize userdata_ctl */
@@ -562,13 +562,13 @@
opal_list_remove_last(&(cs->ctl_structures));
if( !sm_bcol_module->userdata_ctl) {
ret=OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
ret=base_bcol_basesmuma_setup_ctl_struct(
sm_bcol_module, cs, &(sm_bcol_module->colls_with_user_data));
if( OMPI_SUCCESS != ret ) {
- goto ERROR;
+ goto exit_ERROR;
}
/* used for blocking recursive doubling barrier */
@@ -581,18 +581,18 @@
ret= base_bcol_basesmuma_exchange_ctl_params(sm_bcol_module, cs,
&(sm_bcol_module->colls_no_user_data),sm_bcol_module->no_userdata_ctl);
if( OMPI_SUCCESS != ret ) {
- goto ERROR;
+ goto exit_ERROR;
}
ret= base_bcol_basesmuma_exchange_ctl_params(sm_bcol_module, cs,
&(sm_bcol_module->colls_with_user_data),sm_bcol_module->userdata_ctl);
if( OMPI_SUCCESS != ret ) {
- goto ERROR;
+ goto exit_ERROR;
}
return ret;
-ERROR:
+exit_ERROR:
return ret;
}
--- orig/openmpi-1.7rc5/ompi/mca/coll/ml/coll_ml_allocation.c 2012-10-05
13:13:43.000000000 +0200
+++ openmpi-1.7rc5/ompi/mca/coll/ml/coll_ml_allocation.c 2012-12-19
11:53:14.762723900 +0100
@@ -47,12 +47,12 @@
if (!memory_block->block){
ML_ERROR(("lmngr failed."));
ret = NULL;
- goto ERROR;
+ goto exit_ERROR;
}
return memory_block;
-ERROR:
+exit_ERROR:
if (memory_block){
free(memory_block);
return ret;
@@ -99,13 +99,13 @@
if (NULL == ml_memblock){
ML_ERROR(("Memory block not initialized"));
ret = OMPI_ERROR;
- goto ERROR;
+ goto exit_ERROR;
}
if (ml_memblock->size_block < (num_buffers * num_banks * buffer_size) ){
ML_ERROR(("Not enough memory for all buffers and banks in the memory
block"));
ret = OMPI_ERROR;
- goto ERROR;
+ goto exit_ERROR;
}
pbuff_descs = (ml_payload_buffer_desc_t*)
malloc(sizeof(ml_payload_buffer_desc_t)
@@ -136,19 +136,19 @@
num_banks);
if (NULL == ml_memblock->bank_release_counters) {
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
ml_memblock->ready_for_memsync = (bool *)malloc(sizeof(bool) * num_banks);
if (NULL == ml_memblock->ready_for_memsync) {
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
ml_memblock->bank_is_busy = (bool *)malloc(sizeof(bool) * num_banks);
if (NULL == ml_memblock->bank_is_busy) {
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
/* Set index for first bank to sync */
@@ -177,7 +177,7 @@
return ret;
-ERROR:
+exit_ERROR:
/* Free all buffer descriptors */
if (pbuff_descs){
free(pbuff_descs);
--- orig/openmpi-1.7rc5/ompi/mca/coll/ml/coll_ml_module.c 2012-10-05
13:13:43.000000000 +0200
+++ openmpi-1.7rc5/ompi/mca/coll/ml/coll_ml_module.c 2012-12-19
11:51:55.369519600 +0100
@@ -581,7 +581,7 @@
*/
ML_VERBOSE(0, ("More than a single leader for a group.\n"));
ret=OMPI_ERROR;
- goto ERROR;
+ goto exit_ERROR;
} else {
local_leader_found=true;
}
@@ -603,7 +603,7 @@
fprintf(stderr,"n procs in %d\n",n_procs_in);
ML_VERBOSE(0, ("number of procs in the group unexpeted. Expected %d
Got %d\n",n_procs_selected,sum));
ret=OMPI_ERROR;
- goto ERROR;
+ goto exit_ERROR;
}
/* check to make sure that all have the same list of ranks.
*/
@@ -612,14 +612,14 @@
ll_p1!=-all_selected[module->group_list[i]] ) {
ret=OMPI_ERROR;
ML_VERBOSE(0, ("Mismatch in rank list - element #%d - %d
\n",i,all_selected[module->group_list[i]]));
- goto ERROR;
+ goto exit_ERROR;
}
}
/* return */
return ret;
-ERROR:
+exit_ERROR:
/* return */
return ret;
}
@@ -964,7 +964,7 @@
comm);
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
ML_VERBOSE(10, ("comm_allreduce_pml failed. root reduction\n"));
- goto ERROR;
+ goto exit_ERROR;
}
/* broadcast the number of groups */
@@ -973,14 +973,14 @@
map_to_comm_ranks,comm);
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
ML_VERBOSE(10, ("comm_bcast_pml failed. num_total_subgroups bcast\n"));
- goto ERROR;
+ goto exit_ERROR;
}
scratch_space=(int *)malloc(4*sizeof(int)*(*num_total_subgroups));
if (OPAL_UNLIKELY(NULL == scratch_space)) {
ML_VERBOSE(10, ("Cannot allocate memory scratch_space.\n"));
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
if( my_rank == root ) {
sum=0;
@@ -996,7 +996,7 @@
map_to_comm_ranks, comm);
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
ML_VERBOSE(10, ("comm_allreduce_pml failed. scratch_space bcast\n"));
- goto ERROR;
+ goto exit_ERROR;
}
if( my_rank != root ) {
if( in_num_total_subgroups != (*num_total_subgroups) ) {
@@ -1009,7 +1009,7 @@
if (OPAL_UNLIKELY(NULL == (*array_of_all_subgroup_ranks))) {
ML_VERBOSE(10, ("Cannot allocate memory
array_of_all_subgroup_ranks.\n"));
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
for(i=0 ; i < (*num_total_subgroups) ; i++ ) {
(*array_of_all_subgroup_ranks)[i].root_rank_in_comm=scratch_space[4*i];
@@ -1031,7 +1031,7 @@
if (OPAL_UNLIKELY(NULL == (*list_of_ranks_in_all_subgroups))) {
ML_VERBOSE(10, ("Cannot allocate memory
*list_of_ranks_in_all_subgroups.\n"));
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
}
ret=comm_bcast_pml(*list_of_ranks_in_all_subgroups, root, sum,
@@ -1039,7 +1039,7 @@
map_to_comm_ranks, comm);
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
ML_VERBOSE(10, ("Bcast failed for list_of_ranks_in_all_subgroups \n"));
- goto ERROR;
+ goto exit_ERROR;
}
/* fill in subgroup ranks */
@@ -1053,7 +1053,7 @@
ML_VERBOSE(10, ("Cannot allocate memory for rank_data \n"));
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
for(j=0 ; j < (*array_of_all_subgroup_ranks)[i].n_ranks ; j++ ) {
(*array_of_all_subgroup_ranks)[i].rank_data[j].rank=
@@ -1204,7 +1204,7 @@
(*array_of_all_subgroup_ranks)[i_sg].list_connected_nodes)) {
ML_VERBOSE(10, ("Cannot allocate memory for
list_connected_nodes - i_cnt %d\n",i_cnt));
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
} else {
(*array_of_all_subgroup_ranks)[i_sg].list_connected_nodes=NULL;
@@ -1228,7 +1228,7 @@
(*array_of_all_subgroup_ranks)[i_sg].rank_data[i_rank].list_connected_subgroups)
) {
ML_VERBOSE(10, ("Cannot allocate memory for rank
list_connected_subgroups - cnt %d\n",cnt));
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
/* reset the conuter, so can fill it in on the fly */
(*array_of_all_subgroup_ranks)[i_sg].rank_data[i_rank].
@@ -1323,7 +1323,7 @@
if (OPAL_UNLIKELY(NULL == topo->sort_list)) {
ML_VERBOSE(10, ("Cannot allocate memory for sort_list.\n"));
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
/* find subgroup index, and rank within that subgroup where I am
@@ -1367,7 +1367,7 @@
return ret;
-ERROR:
+exit_ERROR:
if(scratch_space) {
free(scratch_space);
};
@@ -1453,7 +1453,7 @@
if (OPAL_UNLIKELY(NULL == (*sub_group_meta_data))) {
ML_VERBOSE(10, ("Cannot allocate memory for
sub_group_meta_data.\n"));
rc = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
(*sub_group_meta_data)[(*num_total_subgroups)].root_rank_in_comm=sg_root;
(*sub_group_meta_data)[(*num_total_subgroups)].n_ranks=1;
@@ -1463,7 +1463,7 @@
if (OPAL_UNLIKELY(NULL == temp[knt2] ) ){
ML_VERBOSE(10, ("Cannot allocate memory for
sub_group_meta_data.\n"));
rc = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
sg_id=(*num_total_subgroups);
(*num_total_subgroups)++;
@@ -1494,7 +1494,7 @@
if (OPAL_UNLIKELY(NULL == (*list_of_ranks_in_all_subgroups))) {
ML_VERBOSE(10, ("Cannot allocate memory for
list_of_ranks_in_all_subgroups.\n"));
rc = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
/* loop over new subgroups */
@@ -1523,7 +1523,7 @@
/* return */
return rc;
-ERROR:
+exit_ERROR:
return rc;
}
@@ -1644,7 +1644,7 @@
if (OPAL_UNLIKELY(NULL == bcols_in_use)) {
ML_VERBOSE(10, ("Cannot allocate memory for bcols_in_use.\n"));
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
/* setup pointers to arrays that will hold bcol parameters. Since
* given bols are not instantiated in all processes, need to get this
@@ -1676,7 +1676,7 @@
ranks_map = (int *) malloc(sizeof(int) * ompi_comm_size(ml_module->comm));
if (NULL == ranks_map) {
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
for (i = 0; i < ompi_comm_size(ml_module->comm); i++) {
ranks_map[i] = i;
@@ -1691,7 +1691,7 @@
ranks_map, ml_module->comm);
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
ML_VERBOSE(10, ("comm_allreduce_pml failed. bcols_in_use
reduction\n"));
- goto ERROR;
+ goto exit_ERROR;
}
/*
@@ -1813,7 +1813,7 @@
mca_coll_ml_component.payload_buffer_size,
ml_module->data_offset));
-ERROR:
+exit_ERROR:
if (NULL != ranks_map) {
free(ranks_map);
}
@@ -1971,7 +1971,7 @@
if (NULL != exclude_sbgp_name && NULL != include_sbgp_name) {
ret = OMPI_ERROR;
- goto ERROR;
+ goto exit_ERROR;
}
ML_VERBOSE(10,("include %s exclude %s size %d", include_sbgp_name,
exclude_sbgp_name, n_hierarchies));
@@ -1981,14 +1981,14 @@
if (OPAL_UNLIKELY(NULL == all_selected)) {
ML_VERBOSE(10, ("Cannot allocate memory.\n"));
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
map_to_comm_ranks = (int *) calloc(ompi_comm_size(ml_module->comm),
sizeof(int));
if (OPAL_UNLIKELY(NULL == map_to_comm_ranks)) {
ML_VERBOSE(10, ("Cannot allocate memory.\n"));
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
/*
@@ -2002,7 +2002,7 @@
if (OPAL_UNLIKELY(NULL == copy_procs)) {
ML_VERBOSE(10, ("Cannot allocate memory.\n"));
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
for (i = 0; i < ompi_comm_size(ml_module->comm); i++) {
@@ -2018,7 +2018,7 @@
if (OPAL_UNLIKELY(NULL == index_proc_selected)) {
ML_VERBOSE(10, ("Cannot allocate memory.\n"));
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
/* get my proc pointer - used to identify myself in the list */
@@ -2029,7 +2029,7 @@
if (OPAL_UNLIKELY(NULL == topo->component_pairs)) {
ML_VERBOSE(10, ("Cannot allocate memory.\n"));
ret = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
n_hier = 0;
@@ -2213,7 +2213,7 @@
n_procs_in, map_to_comm_ranks ,ml_module->comm);
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
ML_VERBOSE(10, ("comm_allreduce_pml failed.\n"));
- goto ERROR;
+ goto exit_ERROR;
}
/* do some sanity checks */
@@ -2222,7 +2222,7 @@
n_procs_in, ll_p1, all_selected, module );
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
ML_VERBOSE(10, ("check_global_view_of_subgroups failed.\n"));
- goto ERROR;
+ goto exit_ERROR;
}
}
@@ -2254,7 +2254,7 @@
if( OMPI_SUCCESS != ret ) {
ML_VERBOSE(10, (" Error: get_new_subgroup_data returned %d
\n",ret));
- goto ERROR;
+ goto exit_ERROR;
}
/* am I done ? */
@@ -2310,7 +2310,7 @@
if (OPAL_UNLIKELY(NULL == pair->bcol_modules)) {
ML_VERBOSE(10, ("Failed to create new modules.\n"));
ret = OMPI_ERROR;
- goto ERROR;
+ goto exit_ERROR;
}
if (pair->bcol_component->need_ordering) {
@@ -2322,7 +2322,7 @@
if (OPAL_UNLIKELY(OMPI_SUCCESS !=
append_new_network_context(pair))) {
ML_VERBOSE(10, ("Exit with error. - append new network
context\n"));
ret = OMPI_ERROR;
- goto ERROR;
+ goto exit_ERROR;
}
for (i = 0; i < pair->num_bcol_modules; ++i) {
@@ -2452,11 +2452,11 @@
NULL != include_sbgp_name ? include_sbgp_name :
exclude_sbgp_name
));
ret = OMPI_ERROR;
- goto ERROR;
+ goto exit_ERROR;
}
ML_VERBOSE(10, ("Empty hierarchy..."));
ret = OMPI_SUCCESS;
- goto ERROR;
+ goto exit_ERROR;
}
topo->n_levels = n_hier;
@@ -2479,7 +2479,7 @@
map_to_comm_ranks, ml_module->comm);
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
ML_VERBOSE(10, ("comm_allreduce_pml failed. all_reduce_buffer2_in
reduction\n"));
- goto ERROR;
+ goto exit_ERROR;
}
topo->global_lowest_hier_group_index = all_reduce_buffer2_out[0];
@@ -2504,7 +2504,7 @@
if (OPAL_UNLIKELY(OMPI_SUCCESS != ret)) {
ML_VERBOSE(10, ("comm_allreduce_pml failed: bcols_in_use
reduction %d \n",ret));
- goto ERROR;
+ goto exit_ERROR;
}
/* cache the ML hierarchical description on the tree */
@@ -2517,7 +2517,7 @@
ret = mca_coll_ml_fill_in_route_tab(topo, ml_module->comm);
if (OMPI_SUCCESS != ret) {
ML_ERROR(("mca_coll_ml_fill_in_route_tab returned an
error.\n"));
- goto ERROR;
+ goto exit_ERROR;
}
}
@@ -2528,7 +2528,7 @@
** correctly with this module.
*/
-ERROR:
+exit_ERROR:
ML_VERBOSE(10, ("Discovery done\n"));
@@ -2716,7 +2716,7 @@
if (NULL == all_reachable_ranks) {
ML_VERBOSE(10, ("Cannot allocate memory.\n"));
rc = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
for (i = 0; i < comm_size; ++i) {
@@ -2727,7 +2727,7 @@
if (NULL == route_table) {
ML_VERBOSE(10, ("Cannot allocate memory.\n"));
rc = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
topo->route_vector = (mca_coll_ml_route_info_t *)
@@ -2735,7 +2735,7 @@
if (NULL == topo->route_vector) {
ML_VERBOSE(10, ("Cannot allocate memory.\n"));
rc = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
all_reachable_ranks[my_rank] = IS_RECHABLE;
@@ -2748,7 +2748,7 @@
if (NULL == route_table[level]) {
ML_VERBOSE(10, ("Cannot allocate memory.\n"));
rc = OMPI_ERR_OUT_OF_RESOURCE;
- goto ERROR;
+ goto exit_ERROR;
}
for (i = 0; i < comm_size; ++i) {
@@ -2766,7 +2766,7 @@
comm);
if (OMPI_SUCCESS != rc) {
ML_VERBOSE(10, ("comm_allreduce failed.\n"));
- goto ERROR;
+ goto exit_ERROR;
}
for (i = 0; i < comm_size; ++i) {
@@ -2872,7 +2872,7 @@
return OMPI_SUCCESS;
-ERROR:
+exit_ERROR:
ML_VERBOSE(10, ("Exit with error status - %d.\n", rc));
if (NULL != route_table) {
--- orig/openmpi-1.7rc5/ompi/mca/btl/tcp/btl_tcp_proc.c 2012-10-05
13:13:36.000000000 +0200
+++ openmpi-1.7rc5/ompi/mca/btl/tcp/btl_tcp_proc.c 2012-12-19
11:41:56.040232400 +0100
@@ -218,15 +218,15 @@
}
-static void mca_btl_tcp_initialise_interface(mca_btl_tcp_interface_t*
interface,
+static void mca_btl_tcp_initialise_interface(mca_btl_tcp_interface_t*
tcp_interface,
int ifk_index, int index)
{
- interface->kernel_index = ifk_index;
- interface->peer_interface = -1;
- interface->ipv4_address = NULL;
- interface->ipv6_address = NULL;
- interface->index = index;
- interface->inuse = 0;
+ tcp_interface->kernel_index = ifk_index;
+ tcp_interface->peer_interface = -1;
+ tcp_interface->ipv4_address = NULL;
+ tcp_interface->ipv6_address = NULL;
+ tcp_interface->index = index;
+ tcp_interface->inuse = 0;
}
static mca_btl_tcp_interface_t** mca_btl_tcp_retrieve_local_interfaces(void)
--- orig/openmpi-1.7rc5/ompi/mca/coll/sm/coll_sm_reduce.c 2012-10-05
13:13:42.000000000 +0200
+++ openmpi-1.7rc5/ompi/mca/coll/sm/coll_sm_reduce.c 2012-12-19
12:09:30.466041400 +0100
@@ -52,7 +52,7 @@
/*
* Useful utility routine
*/
-#if !defined(__WINDOWS__)
+#if !defined(__WINDOWS__) && !defined(__CYGWIN__)
static inline int min(int a, int b)
{
return (a < b) ? a : b;
--- orig/openmpi-1.7rc5/ompi/mpi/fortran/mpif-h/Makefile.am 2012-10-05
13:13:15.000000000 +0200
+++ openmpi-1.7rc5/ompi/mpi/fortran/mpif-h/Makefile.am 2012-12-19
13:52:41.619261400 +0100
@@ -49,7 +49,7 @@
#
lib_LTLIBRARIES =
-libmpi_mpifh_la_LIBADD = $(OMPI_MPIEXT_MPIFH_LIBS)
+libmpi_mpifh_la_LIBADD = $(top_builddir)/ompi/libmpi.la
$(OMPI_MPIEXT_MPIFH_LIBS)
libmpi_mpifh_la_LDFLAGS = -version-info $(libmpi_mpifh_so_version)
# Are we building the mpif.h bindings at all?
--- opal/util/path.c~ 2012-04-03 16:29:52.000000000 +0200
+++ opal/util/path.c 2012-10-30 20:31:43.772749400 +0100
@@ -82,7 +82,7 @@
/* If absolute path is given, return it without searching. */
if( opal_path_is_absolute(fname) ) {
- return opal_path_access(fname, "", mode);
+ return opal_path_access(fname, NULL , mode);
}
/* Initialize. */
--- orig/openmpi-1.6.3/opal/mca/shmem/sysv/shmem_sysv_component.c
2012-04-03 16:29:49.000000000 +0200
+++ openmpi-1.6.3/opal/mca/shmem/sysv/shmem_sysv_component.c 2012-11-01
21:54:18.687021300 +0100
@@ -43,6 +43,7 @@
#endif /* HAVE_SYS_IPC_H */
#if HAVE_SYS_SHM_H
#include <sys/shm.h>
+#include <sys/stat.h>
#endif /* HAVE_SYS_SHM_H */
#include "opal/constants.h"
@@ -165,7 +166,7 @@
/* if we are here, then let the run-time test games begin */
if (-1 == (shmid = shmget(IPC_PRIVATE, (size_t)(getpagesize()),
- IPC_CREAT | IPC_EXCL | SHM_R | SHM_W))) {
+ IPC_CREAT | IPC_EXCL | S_IRWXU ))) {
goto out;
}
else if ((void *)-1 == (addr = shmat(shmid, NULL, 0))) {
--- orig/openmpi-1.6.3/opal/mca/shmem/sysv/shmem_sysv_module.c 2012-04-03
16:29:49.000000000 +0200
+++ openmpi-1.6.3/opal/mca/shmem/sysv/shmem_sysv_module.c 2012-11-01
21:55:21.316603500 +0100
@@ -41,6 +41,7 @@
#endif /* HAVE_SYS_IPC_H */
#if HAVE_SYS_SHM_H
#include <sys/shm.h>
+#include <sys/stat.h>
#endif /* HAVE_SYS_SHM_H */
#ifdef HAVE_STRING_H
#include <string.h>
@@ -197,7 +198,7 @@
* real_size here
*/
if (-1 == (ds_buf->seg_id = shmget(IPC_PRIVATE, real_size,
- IPC_CREAT | IPC_EXCL | SHM_R | SHM_W))) {
+ IPC_CREAT | IPC_EXCL | S_IRWXU ))) {
int err = errno;
char hn[MAXHOSTNAMELEN];
gethostname(hn, MAXHOSTNAMELEN - 1);
--- openmpi-1.7rc5/opal/mca/event/libevent2019/libevent/event.h~
2012-10-05 13:12:58.000000000 +0200
+++ openmpi-1.7rc5/opal/mca/event/libevent2019/libevent/event.h 2012-12-18
23:46:01.662800600 +0100
@@ -56,7 +56,7 @@
/* For int types. */
#include <evutil.h>
-#ifdef WIN32
+#if defined(WIN32) && !defined(__CYGWIN__)
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
--- openmpi-1.7rc5/opal/mca/event/libevent2019/libevent/include/event2/util.h~
2012-10-05 13:12:58.000000000 +0200
+++ openmpi-1.7rc5/opal/mca/event/libevent2019/libevent/include/event2/util.h
2012-12-18 23:40:17.394277000 +0100
@@ -67,7 +67,7 @@
#include <netdb.h>
#endif
-#ifdef WIN32
+#if defined(WIN32) && !defined(__CYGWIN__)
#include <winsock2.h>
#else
#include <sys/socket.h>