The results of the code listed below are very strange.

Using a single core the degrees of freedom is:

mpirun -np 1 ./untitled
Cycle 0:
   Number of active cells:       24
   Number of degrees of freedom: 889
=====================================

Cycle 1:
   Number of active cells:       192
   Number of degrees of freedom: 6097
=====================================

Cycle 2:
   Number of active cells:       1536
   Number of degrees of freedom: 45025
=====================================

Cycle 3:
   Number of active cells:       12288
   Number of degrees of freedom: 345793
=====================================


But, when using MPI with 8 nodes, it is:

mpirun -np 8 ./untitled

Cycle 0:
   Number of active cells:       24
   Number of degrees of freedom: 889
=====================================

Cycle 1:
   Number of active cells:       192
   Number of degrees of freedom: 6097
=====================================

Cycle 2:
   Number of active cells:       1536
   Number of degrees of freedom: 45240
=====================================

Cycle 3:
   Number of active cells:       12288
   Number of degrees of freedom: 347340
=====================================


As you can see after "Cycle 2", the number of degrees of freedom is 
different. What is wrong in this code?


Test code is:

#include <iostream>

#include <deal.II/distributed/tria.h>
#include <deal.II/dofs/dof_handler.h>
#include <deal.II/fe/fe_q.h>

#include <deal.II/base/mpi.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/conditional_ostream.h>

#include <deal.II/base/index_set.h>

using namespace dealii;

class Problem
{
public:
    Problem();
    ~Problem();

    void run();

private:
    MPI_Comm mpi_communicator;

    parallel::distributed::Triangulation<3> triangulation;

    DoFHandler<3> dof_handler;
    FE_Q<3> fe;

    ConditionalOStream pcout;

    void make_domain_quater_crack();
};

Problem::Problem() :
mpi_communicator (MPI_COMM_WORLD),
triangulation (mpi_communicator,
               typename Triangulation<3>::MeshSmoothing
                       (Triangulation<3>::smoothing_on_refinement |
                        Triangulation<3>::smoothing_on_coarsening)),
dof_handler(triangulation),
fe(3),
pcout (std::cout,
       (Utilities::MPI::this_mpi_process(mpi_communicator) == 0))
{}

Problem::~Problem()
{
    dof_handler.clear();
}

void Problem::make_domain_quater_crack() {
    std::vector<Point<3>> vertices ={
            {0.0,0.0,0.0},
            {1.0,0.0,0.0},
            {2.0,0.0,0.0},
            {0.0,0.0,1.0},
            {1.0,0.0,1.0},
            {2.0,0.0,1.0},
            {0.0,1.0,0.0},
            {0.0,2.0,0.0},
            {0.0,1.0,1.0},
            {0.0,2.0,1.0},
            {1.0,1.0,0.0},
            {2.0,2.0,0.0},
            {1.0,1.0,1.0},
            {2.0,2.0,1.0},
    };

    const std::vector<std::array<int, GeometryInfo<3>::vertices_per_cell>>
    cell_vertices = {
            {0,1,6,10,3,4,8,12},
            {1,2,10,11,4,5,12,13},
            {10,11,6,7,12,13,8,9},
    };

    const unsigned int n_cells = cell_vertices.size();

    std::vector<CellData<3>> cells(n_cells, CellData<3>());
    for (unsigned int i = 0; i < n_cells; ++i)
    {
        for (unsigned int j = 0; j < GeometryInfo<3>::vertices_per_cell;
             ++j)
            cells[i].vertices[j] = cell_vertices[i][j];
        cells[i].material_id = 0;
    }

    triangulation.create_triangulation(vertices, cells, SubCellData());
}

void Problem::run() {
    make_domain_quater_crack();

    unsigned int max_cycle=4;
    for (unsigned int cycle(0); cycle<max_cycle; ++cycle){
        triangulation.refine_global(1);

        dof_handler.distribute_dofs (fe);

        if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0) {
            const unsigned int n_active_cells = 
triangulation.n_global_active_cells();
            const unsigned int n_dofs = dof_handler.n_dofs();
            pcout << "Cycle " << cycle << ':'
                  << std::endl
                  << "   Number of active cells:       "
                  << n_active_cells
                  << std::endl
                  << "   Number of degrees of freedom: "
                  << n_dofs
                  << std::endl;
        }
        pcout << "=====================================" << std::endl;
        pcout << std::endl;
    }
}

int main(int argc, char *argv[])
{
    try
    {
        Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
        Problem test;
        test.run ();
    }
    catch (std::exception &exc)
    {
        std::cerr << std::endl << std::endl
                  << "----------------------------------------------------"
                  << std::endl;
        std::cerr << "Exception on processing: " << std::endl
                  << exc.what() << std::endl
                  << "Aborting!" << std::endl
                  << "----------------------------------------------------"
                  << std::endl;
        return 1;
    }
    catch (...)
    {
        std::cerr << std::endl << std::endl
                  << "----------------------------------------------------"
                  << std::endl;
        std::cerr << "Unknown exception!" << std::endl
                  << "Aborting!" << std::endl
                  << "----------------------------------------------------"
                  << std::endl;
        return 1;
    }
    return 0;
}


-- 
The deal.II project is located at http://www.dealii.org/
For mailing list/forum options, see 
https://groups.google.com/d/forum/dealii?hl=en
--- 
You received this message because you are subscribed to the Google Groups 
"deal.II User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to dealii+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to