On Mon, 9 Apr 2018, Songtao Chu wrote:

> static char help[] = "\n\n";
> #include <petscdm.h>
> #include <petscdmda.h>
> 
> 
> int main(int argc,char **argv)
> {
>     PetscMPIInt     rank;
>     PetscErrorCode  ierr;
>     Vec             global,local,natural;
>     DM              da;
>     PetscReal       *val;
>     PetscInt        i,x,xm;
> 
> 
>     ierr = PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr;
>     ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
> 
>     ierr = 
> DMDACreate1d(PETSC_COMM_WORLD,DM_BOUNDARY_NONE,5,1,1,NULL,&da);CHKERRQ(ierr);
>     ierr = DMSetUp(da);CHKERRQ(ierr);
>     ierr = DMCreateGlobalVector(da,&global);CHKERRQ(ierr);
> 
>     ierr = DMDAGetCorners(da,&x,NULL,NULL,&xm,NULL,NULL);CHKERRQ(ierr);
>     ierr = DMDAVecGetArray(da, global, &val);CHKERRQ(ierr);
>     ierr = PetscSynchronizedPrintf(PETSC_COMM_SELF, "Rank=%d\n", 
> rank);CHKERRQ(ierr);

Shouldn't this call be removed? Nothing of substance is printed here.

>     for (i = x; i < x + xm; ++i) {
>         val[i] = i;
>     }
>     ierr = DMDAVecRestoreArray(da, global, &val);CHKERRQ(ierr);
> 
>     VecView(global,PETSC_VIEWER_STDOUT_WORLD);
> 
>     ierr = DMDAGetCorners(da,&x,NULL,NULL,&xm,NULL,NULL);CHKERRQ(ierr);
>     ierr = DMDAVecGetArray(da, global, &val);CHKERRQ(ierr);
>     ierr = PetscSynchronizedPrintf(PETSC_COMM_SELF, "Rank=%d\n", 
> rank);CHKERRQ(ierr);

Shouldn't this be on PETSC_COMM_WORLD?

>     for (i = x; i < x + xm; ++i) {
>         ierr = PetscSynchronizedPrintf(PETSC_COMM_SELF, "%4.f ", 
> val[i]);CHKERRQ(ierr);

Again PETSC_COMM_WORLD?


>     }
>     ierr = DMDAVecRestoreArray(da, global, &val);CHKERRQ(ierr);
>     PetscSynchronizedFlush(PETSC_COMM_SELF, PETSC_STDOUT);
> 
>     ierr = PetscFinalize();
>     return ierr;
> }

Attaching the fixed code.

Satish
-------
balay@asterix /home/balay/download-pine
$ ./ex1
Vec Object: 1 MPI processes
  type: seq
0.
1.
2.
3.
4.
Rank=0
   0    1    2    3    4 
balay@asterix /home/balay/download-pine
$ mpiexec -n 2 ./ex1
Vec Object: 2 MPI processes
  type: mpi
Process [0]
0.
1.
2.
Process [1]
3.
4.
Rank=0
   0    1    2 
Rank=1
   3    4 
balay@asterix /home/balay/download-pine
$ mpiexec -n 3 ./ex1
Vec Object: 3 MPI processes
  type: mpi
Process [0]
0.
1.
Process [1]
2.
3.
Process [2]
4.
Rank=0
   0    1 
Rank=1
   2    3 
Rank=2
   4 
balay@asterix /home/balay/download-pine
$ 
static char help[] = "\n\n";
#include <petscdm.h>
#include <petscdmda.h>


int main(int argc,char **argv)
{
    PetscMPIInt     rank;
    PetscErrorCode  ierr;
    Vec             global;
    DM              da;
    PetscReal       *val;
    PetscInt        i,x,xm;


    ierr = PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr;
    ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);

    ierr = 
DMDACreate1d(PETSC_COMM_WORLD,DM_BOUNDARY_NONE,5,1,1,NULL,&da);CHKERRQ(ierr);
    ierr = DMSetUp(da);CHKERRQ(ierr);
    ierr = DMCreateGlobalVector(da,&global);CHKERRQ(ierr);

    ierr = DMDAGetCorners(da,&x,NULL,NULL,&xm,NULL,NULL);CHKERRQ(ierr);
    ierr = DMDAVecGetArray(da, global, &val);CHKERRQ(ierr);
    for (i = x; i < x + xm; ++i) {
        val[i] = i;
    }
    ierr = DMDAVecRestoreArray(da, global, &val);CHKERRQ(ierr);

    VecView(global,PETSC_VIEWER_STDOUT_WORLD);

    ierr = DMDAGetCorners(da,&x,NULL,NULL,&xm,NULL,NULL);CHKERRQ(ierr);
    ierr = DMDAVecGetArray(da, global, &val);CHKERRQ(ierr);
    ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, "Rank=%d\n", 
rank);CHKERRQ(ierr);
    for (i = x; i < x + xm; ++i) {
        ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, "%4.f ", 
val[i]);CHKERRQ(ierr);
    }
    ierr = PetscSynchronizedPrintf(PETSC_COMM_WORLD, "\n", rank);CHKERRQ(ierr);
    PetscSynchronizedFlush(PETSC_COMM_WORLD, PETSC_STDOUT);
    ierr = DMDAVecRestoreArray(da, global, &val);CHKERRQ(ierr);

    ierr = VecDestroy(&global);CHKERRQ(ierr);
    ierr = DMDestroy(&da);CHKERRQ(ierr);
    ierr = PetscFinalize();
    return ierr;
}

Reply via email to