Dear all,

With this simple code I find I am getting a memory leak when I run on 2 
processors.  Can anyone advise why?

I'm fairly new to MPI (have only done very simple things in the past).  I'm 
trying to do a non-blocking send/recv (from any proc to any proc) but the 
receiving processor doesn't know how much data it is going to be sent, hence 
the the blocking recv of the size in order to allocate the buffer.  Is there a 
better way of doing this?

Thanks,

Mark

#include <mpi.h>

MPI_Request
nonBlockingSend(int *t, int size, const int tag, const int destinationRank)
{
    MPI_Request request1;
    MPI_Isend(&size,1,MPI_INT,destinationRank,0,MPI_COMM_WORLD,&request1);
    MPI_Request request;
    MPI_Isend(t,size,MPI_INT,destinationRank,tag,MPI_COMM_WORLD,&request);
    return request;
}

MPI_Request
nonBlockingRecv(int *&t, int &size, const int tag, const int senderRank)
{
    MPI_Status s1;
    MPI_Recv(&size,1,MPI_INT,senderRank,0,MPI_COMM_WORLD,&s1);
    t = (int *) malloc(size*sizeof(int));
    MPI_Request request;
    MPI_Irecv(t,size,MPI_INT,senderRank,tag,MPI_COMM_WORLD,&request);
    return request;
}

void
communicationComplete(MPI_Request &r)
{
    MPI_Status status;
    MPI_Wait(&r,&status);
}

void
barrier()
{
    MPI_Barrier(MPI_COMM_WORLD);
}

int main(int argc, char *argv[])
{
    MPI_Init(&argc,&argv);
    
    int numProcs,rank;
    MPI_Comm_size(MPI_COMM_WORLD,&numProcs);
    MPI_Comm_rank(MPI_COMM_WORLD,&rank);

    int numIts = 10000000;
    int bufSize = 10;

    // Setup send buffers
    int *sendData = (int *) malloc(bufSize*sizeof(int));
    for(int i=0;i<bufSize;i++)
    sendData[i] = i;
    
    // Perform send and recvs
    for(int i=0;i<numIts;i++)
    {
    if(rank==0)
    {
        for(int proc = 1; proc<numProcs;proc++)
        {
        MPI_Request r = nonBlockingSend(sendData,bufSize,proc,proc);
        communicationComplete(r);
        }
    }
    else
    {
        int *recvData;
        int size;
        MPI_Request r = nonBlockingRecv(recvData,size,rank,0);
        communicationComplete(r);
        free(recvData);
    }
    barrier();
    }
    free(sendData);

    MPI_Finalize();

    return 1;
}




      

Reply via email to