Hi friends,I'm trying to write a simple parallel master/slave program.Here in 
my program the task of master is to distribute a randomly filled array to the 
workers.And each worker have to print a part of the array. That's all what i 
want to do.
The master need to make run the workers each time. Then workers and master need 
to end.
The problem is the program doesnt complete. Probably it stuck in to an endless 
loop.Below is my c code and output.Can anybody help me on this issue?I would 
appreciate.

C CODE
==============================================
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <mpi.h>

#define SIZE 11

int master(void);
int worker(int pid);

int main( int argc, char **argv )
{
        system("clear");
    int pid;

        /* Initialize MPI */
        MPI_Init( &argc, &argv );

        /* Get process id */
        MPI_Comm_rank( MPI_COMM_WORLD, &pid );

    if (pid == 0)
        master();
    else
        worker(pid);

        /* Terminate MPI */
        MPI_Finalize();

    printf("Hit anykey to continue...");
    getchar();
           return 0;
}

int master()
{
    int i, j, iteration, nofp, dojob, x_coor[SIZE];
    srand( time(NULL) );

    /* Get number of processes */
        MPI_Comm_size( MPI_COMM_WORLD, &nofp );

    for(iteration = 1; iteration <= 5; iteration++)
    {
        printf("Iteration-%d --> x_coor[.] = ", iteration);
        for( j = 1; j < SIZE; j++ )
        {
            x_coor[j] = rand() % 50 + 1;
            printf("%d ",x_coor[j]);
        }
        printf("\n");

        /* Seed the workers */
        if (iteration <= 5)
            dojob = 1;
        else
            dojob = 2;
        MPI_Bcast(&dojob, 1, MPI_INT, 0, MPI_COMM_WORLD);
        MPI_Barrier(MPI_COMM_WORLD);
        MPI_Bcast(x_coor, SIZE, MPI_INT, 0, MPI_COMM_WORLD);
        MPI_Barrier(MPI_COMM_WORLD);
    }

    return 0;
}

int worker(int pid)
{
    int i, nofp, x_coor[SIZE], dojob;
    dojob = 0;

    /* Get number of processes */
        MPI_Comm_size( MPI_COMM_WORLD, &nofp );

START:

    dojob = 0;
    for(;;)
    {
        MPI_Bcast(&dojob, 1, MPI_INT, 0, MPI_COMM_WORLD);
        MPI_Barrier(MPI_COMM_WORLD);
        MPI_Bcast(x_coor, SIZE, MPI_INT, 0, MPI_COMM_WORLD);
        MPI_Barrier(MPI_COMM_WORLD);

        if(dojob == 1)
            break;
        else if(dojob == 2)
            return 0;

    }

    /* do the work */
    for ( i = pid; i < SIZE; i=i+nofp )
        printf("x_coor[%d]=%d\n",i,x_coor[i]);

    goto START;
}
OUTPUT==============================================
Iteration-1 --> x_coor[.] = 33 36 7 28 19 19 8 24 16 10 
Iteration-2 --> x_coor[.] = 39 46 48 46 23 9 32 4 17 15 
Iteration-3 --> x_coor[.] = 43 24 34 10 24 47 32 33 34 8 
Iteration-4 --> x_coor[.] = 22 18 45 28 47 14 48 5 39 14 
Iteration-5 --> x_coor[.] = 16 27 9 13 25 31 24 6 34 42 
x_coor[1]=33
x_coor[3]=7
x_coor[5]=19
x_coor[7]=8
x_coor[9]=16
x_coor[1]=39
x_coor[3]=48
x_coor[5]=23
x_coor[7]=32
x_coor[9]=17
x_coor[1]=43
x_coor[3]=34
x_coor[5]=24
x_coor[7]=32
x_coor[9]=34
x_coor[1]=22
x_coor[3]=45
x_coor[5]=47
x_coor[7]=48
x_coor[9]=39
x_coor[1]=16
x_coor[3]=9
x_coor[5]=25
x_coor[7]=24
x_coor[9]=34


Kind regards to all.BARIŞ KEÇECİ
_______________________________________________
users mailing list
users@lists.open-mpi.org
https://rfd.newmexicoconsortium.org/mailman/listinfo/users

Reply via email to