Hi,
I am wondering if mpi-io collective I/O functions
such as MPI_File_write_ordered(), MPI_File_read_ordered(),
MPI_File_seek_shared(), etc., work correctly on pvfs2 files.
Attached is a little program that is trying to check it.
It compiles OK but the shared
file pointer does not appear to change when accessing a PVFS2 file.
Can you easily identify what is wrong.
--- Ekow
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/*
* Testing movement of shared pointers for
* MPI_File_write_ordered() MPI_File_read_ordered(), etc
*/
int main( int argc, char *argv[] )
{
int ierrs = 0, ierr;
int buf[10]; // At most 8 processes to check
int rank;
MPI_Comm comm;
MPI_Status status;
MPI_File fh;
MPI_Offset offset, sharedOffset ;
int flag, count;
MPI_Init( &argc, &argv );
comm = MPI_COMM_WORLD;
MPI_Comm_rank( comm, &rank );
ierr = MPI_File_open( comm, "pvfs2:/mnt/pvfs2/HPCDataDir/testshared.dat",
MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh );
if (ierr) {
MPI_Abort( comm, 911 );
}
MPI_File_get_position(fh, &offset);
MPI_File_get_position_shared(fh, &sharedOffset );
printf("Check 1, Rank %d: offset = %d, sharedOffset = %d\n",
rank, offset, sharedOffset);
fflush(stdout) ;
MPI_Barrier(comm);
buf[0] = rank;
ierr = MPI_File_write_ordered( fh, buf, 1, MPI_INT, &status );
/* Should do collective writes of values of rank into the file;
* namely 0, 1, 2, 3, 4 in that order. The shared file pointer
* should advance by 4 integers or 4 * 4 = 16 bytes*/
if (ierr) {
ierrs ++;
printf("Rank %d: Writing Error occured\n", rank );
}
MPI_File_get_position(fh, &offset);
MPI_File_get_position_shared(fh, &sharedOffset );
printf("Check 2, Rank %d: offset = %d, sharedOffset = %d\n",
rank, offset, sharedOffset);
fflush(stdout) ;
MPI_Barrier(comm);
MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );
/* Should reset shared pointer to beginning of file */
buf[0] = -1;
ierr = MPI_File_read_ordered( fh, buf, 1, MPI_INT, &status );
/* Should do collective read for values of ranks from the file;
* namely 0, 1, 2, 3, 4, ..., into buf[0] of each of the processes
* in the order of their ranks */
if (ierr) { ierrs ++; }
MPI_File_get_position(fh, &offset);
MPI_File_get_position_shared(fh, &sharedOffset );
printf("Check 3, Rank %d: offset = %d, sharedOffset = %d\n",
rank, offset, sharedOffset);
fflush(stdout) ;
MPI_Barrier(comm);
MPI_Get_count( &status, MPI_INT, &count );
if (count != 1) {
ierrs++;
printf( "Rank %d: Error! Expected to read one int but read %d\n",
rank, count );
fflush(stdout);
}
if (buf[0] != rank) {
ierrs++;
printf( "Rank %d: Error! Did not read expected value (%d)\n",
rank, buf[0]);
fflush(stdout);
}
ierr = MPI_File_close( &fh );
if (ierr) { ierrs ++; }
MPI_Barrier( comm );
if (rank == 0) {
ierr = MPI_File_delete( "pvfs2:/mnt/pvfs2/HPCDataDir/testshared.dat",
MPI_INFO_NULL );
if (ierr) { ierrs ++; }
}
printf("Rank %d: Exiting, ierrs = %d\n", rank, ierrs );
MPI_Finalize( );
return EXIT_SUCCESS ;
}
_______________________________________________
Pvfs2-users mailing list
[email protected]
http://www.beowulf-underground.org/mailman/listinfo/pvfs2-users