Package: libopenmpi-dev Version: 1.2.7~rc2-2 Severity: important Hi,
In one of my applications I am using cascaded derived MPI datatypes created with MPI_Type_struct. One of these types is used to just send a part (one MPI_Char) of a struct consisting of an int followed by two chars. I.e, the int at the beginning is/should be ignored. This works fine if I use this data type on its own. Unfortunately I need to send another struct that contains an int and the int-char-char struct from above. Again I construct a custom MPI data type for this. When sending this cascaded data type It seems that the offset of the char in the inner custom type is disregarded on the receiving end and the received data ('1') is stored in the first int instead of the following char. I have tested this code with both lam and mpich. There it worked as expected (saving the '1' in the first char. The last two lines of the output of the attached test case read received global=10 attribute=0 (local=1 public=0) received attribute=1 (local=100 public=0) for openmi instead off received global=10 attribute=1 (local=100 public=0) received attribute=1 (local=100 public=0) for lam and mpich. The same problem is experienced when using version 1.3-2 of openmpi Cheers, Markus -- System Information: Debian Release: 5.0 APT prefers testing APT policy: (700, 'testing'), (650, 'unstable'), (600, 'experimental') Architecture: i386 (i686) Kernel: Linux 2.6.28.4 (SMP w/2 CPU cores) Locale: LANG=de_DE.UTF-8, LC_CTYPE=de_DE.UTF-8 (charmap=UTF-8) Shell: /bin/sh linked to /bin/bash Versions of packages libopenmpi-dev depends on: ii libc6 2.7-18 GNU C Library: Shared libraries ii libopenmpi1 1.2.7~rc2-2 high performance message passing l ii openmpi-common 1.2.7~rc2-2 high performance message passing l libopenmpi-dev recommends no packages. libopenmpi-dev suggests no packages. -- no debconf information
#include"mpi.h" #include<iostream> struct LocalIndex { int local_; char attribute_; char public_; }; struct IndexPair { int global_; LocalIndex local_; }; int main(int argc, char** argv) { MPI_Init(&argc, &argv); int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); if(size<2) { std::cerr<<"no procs has to be >2"<<std::endl; MPI_Abort(MPI_COMM_WORLD, 99); } MPI_Datatype litype, ptype; // Create custom MPI datatypes { int length[3]={1, 1, 1}; MPI_Aint disp[3]; MPI_Datatype types[3] = {MPI_LB, MPI_CHAR, MPI_UB}; LocalIndex rep[2]; MPI_Address(rep, disp); // lower bound of the datatype MPI_Address(&(rep[0].attribute_), disp+1); MPI_Address(rep+1, disp+2); // upper bound od the datatype for(int i=2; i >= 0; --i) disp[i] -= disp[0]; MPI_Type_struct(3, length, disp, types, &litype); MPI_Type_commit(&litype); if(rank==0) { MPI_Aint lb,extent; MPI_Type_get_extent(litype, &lb, &extent); std::cout<<"litype: lb="<<lb<<" extend="<<extent<<std::endl; MPI_Type_get_true_extent(litype, &lb, &extent); std::cout<<"true litype: lb="<<lb<<" extend="<<extent<<std::endl; int size; MPI_Type_size(litype, &size); std::cout<<"litype size="<<size<<std::endl; } } { int length[4] ={1, 1, 1, 1}; MPI_Aint disp[4]; MPI_Datatype types[4] = {MPI_LB, MPI_INT, litype, MPI_UB}; IndexPair rep[2]; MPI_Address(rep, disp); // lower bound of the datatype MPI_Address(&(rep[0].global_), disp+1); MPI_Address(&(rep[0].local_), disp+2); MPI_Address(rep+1, disp+3); // upper bound of the datatype for(int i=3; i >= 0; --i) disp[i] -= disp[0]; MPI_Type_struct(4, length, disp, types, &ptype); MPI_Type_commit(&ptype); if(rank==0) { MPI_Aint lb,extent; MPI_Type_get_extent(ptype, &lb, &extent); std::cout<<"ptype: lb="<<lb<<" extend="<<extent<<std::endl; MPI_Type_get_true_extent(ptype, &lb, &extent); std::cout<<"true: ptype: lb="<<lb<<" extend="<<extent<<std::endl; int size; MPI_Type_size(ptype, &size); std::cout<<"ptype size="<<size<<std::endl; } } IndexPair pair; if(rank==0) { pair.global_=10; pair.local_.local_=1; pair.local_.attribute_='1'; pair.local_.public_='1'; MPI_Send(&pair, 1, ptype, 1, 199, MPI_COMM_WORLD); MPI_Send(&pair.local_, 1, litype, 1, 199, MPI_COMM_WORLD); }else { pair.global_=0; pair.local_.local_=100; pair.local_.attribute_='0'; pair.local_.public_='0'; if(rank==1) { MPI_Status status; MPI_Recv(&pair, 1, ptype, 0, 199, MPI_COMM_WORLD, &status); std::cout<<"received global="<<pair.global_<<" attribute="<< pair.local_.attribute_<<" (local="<<pair.local_.local_ <<" public="<<pair.local_.public_<<")"<<std::endl; pair.local_.local_=100; pair.local_.attribute_='0'; pair.local_.public_='0'; MPI_Recv(&pair.local_, 1, litype, 0, 199, MPI_COMM_WORLD, &status); std::cout<<"received attribute="<<pair.local_.attribute_ <<" (local="<<pair.local_.local_ <<" public="<<pair.local_.public_<<")"<<std::endl; } } MPI_Barrier(MPI_COMM_WORLD); MPI_Finalize(); }