Dear Deal.II community, I have been using deal.ii for more than a year. I am solving the reaction-diffusion problem, I am getting a bizarre runtime error in the parallel solver. I am using PETSc CG solver. The error appeared after parallelization. After 3-4 days of debugging, I found the error appears only if I am using '*repetition*' (GridGenerator::subdivided_hyper_rectangle(triangulation,repetitions, a, b,false)) in the rectangular (GridGenerator::subdivided_hyper_rectangle) domain mesh creation. I am attaching a minimal code showing the issue(minimal_diff_code.cc).
I am calling it bizarre because the code runs perfectly with few
combinations of *'repetitions' *and Global mesh Refinement numbers, but
fails with some other combinations. A table(error_Table.pdf) is attached
showing how the error depends on the number of *repetitions* and
*global_refinement,
*and sometimes also on the number of* processes* used*. *I have tested it
in deal.ii 8.5.1 and 9.0.0 also.
I will be thankful if someone could look into this.
Thank you
Navneet
--
The deal.II project is located at http://www.dealii.org/
For mailing list/forum options, see
https://groups.google.com/d/forum/dealii?hl=en
---
You received this message because you are subscribed to the Google Groups
"deal.II User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
To view this discussion on the web visit
https://groups.google.com/d/msgid/dealii/CAK9McD15f4B4e94-pFfdj84HmWAOhe0Btd41Hi0uBAozE4ma9A%40mail.gmail.com.
#include <deal.II/base/timer.h>
#include <deal.II/base/parameter_handler.h>
#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/function.h>
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/index_set.h>
#include <deal.II/base/quadrature_point_data.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/lac/sparse_direct.h>
#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_precondition.h>
#include <deal.II/lac/generic_linear_algebra.h>
#include <deal.II/grid/grid_out.h>
#include <deal.II/grid/tria.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/grid/manifold_lib.h>
#include <deal.II/dofs/dof_handler.h>
#include <deal.II/dofs/dof_accessor.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/fe/fe_system.h>
#include <deal.II/fe/fe_q.h>
#include <deal.II/fe/fe_values.h>
#include <deal.II/fe/mapping_q_eulerian.h>
#include <deal.II/numerics/matrix_tools.h>
#include <deal.II/numerics/data_out.h>
#include <deal.II/numerics/vector_tools.h>
#include <deal.II/physics/transformations.h>
#include <deal.II/physics/elasticity/standard_tensors.h>
#include <deal.II/physics/elasticity/kinematics.h>
#include <iostream>
#include <fstream>
#include <deal.II/distributed/tria.h>
#include <deal.II/distributed/grid_refinement.h>
using namespace dealii;
namespace LA
{
#if defined(DEAL_II_WITH_PETSC) && !defined(DEAL_II_PETSC_WITH_COMPLEX) && \
!(defined(DEAL_II_WITH_TRILINOS) && defined(FORCE_USE_OF_TRILINOS))
using namespace ::LinearAlgebraPETSc;
# define USE_PETSC_LA
#elif defined(DEAL_II_WITH_TRILINOS)
using namespace ::LinearAlgebraTrilinos;
#else
# error DEAL_II_WITH_PETSC or DEAL_II_WITH_TRILINOS required
#endif
} // namespace LA
template <int dim>
class TopLevel
{
public:
TopLevel ();
~TopLevel ();
void run ();
private:
void make_grid ();
void setup_system ();
void assemble_system ();
void UV_solver ();
MPI_Comm mpi_communicator;
parallel::distributed::Triangulation<dim> triangulation;
const unsigned int n_mpi_processes;
const unsigned int this_mpi_process;
ConditionalOStream pcout;
IndexSet locally_owned_dofs;
IndexSet locally_relevant_dofs;
DoFHandler<dim> dof_handler;
const int poly_degree;
FE_Q<dim> fe;
ConstraintMatrix constraints;
SparsityPattern sparsity_pattern;
LA::MPI::SparseMatrix system_matrix;
LA::MPI::Vector relevant_solution,system_rhs;
LA::MPI::Vector distributed_solution;
LA::MPI::Vector distributed_old_solution;
LA::MPI::Vector stim1_vector,stim2_vector;
QGauss<dim> quad_formula;
QGauss<dim-1> face_quad_formula;
unsigned int n_q_points;
unsigned int n_face_q_points;
std::map<int, std::vector<unsigned int >> map_cell_dof_boundary
,map_half_domain;
unsigned int loop_count;
unsigned int total_virtual_loops;
double theta,diffusion;
const double panf_param_a = 0.12;
const double panf_param_k = 25;
const double panf_param_mu2 = 0.3;
const double panf_param_epsilon0 = 0.002;
const double panf_param_mu1 = 0.25;
const double k_Ta = 47.9;
const double left_boundary_x = -25.0, right_boundary_x = 25.0;
const double left_boundary_y = -25.0, right_boundary_y = 25.0;
const double left_boundary_z = -2.0, right_boundary_z = 2.0;
std::vector<types::global_dof_index> dof_to_time_series_points;
const double I_stim1 = .20;
const double I_stim2 = .60;
unsigned int stim1_key, stim2_key;
int output_timestep_skip;
double time_step;
};
template <int dim>
TopLevel<dim>::TopLevel()
:
mpi_communicator (MPI_COMM_WORLD),
triangulation (mpi_communicator),
n_mpi_processes (Utilities::MPI::n_mpi_processes(mpi_communicator)),
this_mpi_process (Utilities::MPI::this_mpi_process(mpi_communicator)),
pcout (std::cout,(this_mpi_process == 0)),
dof_handler(triangulation),
poly_degree(5),
fe(poly_degree),
quad_formula(poly_degree+1),
face_quad_formula(poly_degree+1),
n_q_points(quad_formula.size()),
n_face_q_points(face_quad_formula.size()),
total_virtual_loops(10),
time_step(0.022),
theta(0.5),
diffusion(1),
stim1_key(0),
stim2_key(0),
output_timestep_skip(10)
{}
template <int dim>
TopLevel<dim>::~TopLevel ()
{
dof_handler.clear();
}
template <int dim>
void TopLevel<dim>::make_grid ()
{
Point<dim> a(left_boundary_x, left_boundary_y);
Point<dim> b(right_boundary_x, right_boundary_y);
// PLease change the repetitions here
const std::vector< unsigned int > repetitions{10,10};
GridGenerator::subdivided_hyper_rectangle(triangulation,repetitions, a, b,false);
// change the Global refinement here
triangulation.refine_global(4);
}
template <int dim>
void TopLevel<dim>::setup_system()
{
dof_handler.distribute_dofs(fe);
locally_owned_dofs = dof_handler.locally_owned_dofs();
DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs);
relevant_solution.reinit(locally_owned_dofs,
locally_relevant_dofs,
mpi_communicator);
stim1_vector.reinit(locally_owned_dofs,
locally_relevant_dofs,
mpi_communicator);
stim2_vector.reinit(locally_owned_dofs,
locally_relevant_dofs,
mpi_communicator);
stim1_vector= 1;
distributed_solution.reinit(locally_owned_dofs, mpi_communicator);
system_rhs.reinit(locally_owned_dofs, mpi_communicator);
constraints.clear();
constraints.reinit(locally_relevant_dofs);
DoFTools::make_hanging_node_constraints(dof_handler, constraints);
VectorTools::interpolate_boundary_values(dof_handler,
0,
Functions::ZeroFunction<dim>(),
constraints);
constraints.close();
DynamicSparsityPattern dsp(locally_relevant_dofs);
DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints, false);
SparsityTools::distribute_sparsity_pattern(dsp,
dof_handler.n_locally_owned_dofs_per_processor(),
mpi_communicator,
locally_relevant_dofs);
system_matrix.reinit(locally_owned_dofs,
locally_owned_dofs,
dsp,
mpi_communicator);
}
template <int dim>
void TopLevel<dim>::assemble_system()
{
system_matrix = 0;
FEValues<dim> UV_fe_values (fe, quad_formula,
update_values
| update_JxW_values
| update_gradients
| update_quadrature_points);
const unsigned int dofs_per_cell = fe.dofs_per_cell;
FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
Vector<double> cell_rhs(dofs_per_cell);
std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
std::vector<double> old_sol_q_points (n_q_points);
std::vector<Tensor<1,dim>> old_sol_grad_q_points (n_q_points);
std::vector<double> u_q_points (n_q_points);
std::vector<double> stim1 (n_q_points);
std::vector<double> stim2 (n_q_points);
typename DoFHandler<dim>::active_cell_iterator
cell = dof_handler.begin_active(),
endc = dof_handler.end();
int cell_id = 0;
for (; cell!=endc; ++cell_id, ++cell)
if (cell->is_locally_owned())
{
cell_matrix = 0;
cell_rhs = 0;
UV_fe_values.reinit (cell);
UV_fe_values.get_function_values (relevant_solution, old_sol_q_points);
UV_fe_values.get_function_gradients(relevant_solution, old_sol_grad_q_points);
UV_fe_values.get_function_values (stim1_vector, stim1);
UV_fe_values.get_function_values (stim2_vector, stim2);
for (unsigned int q_point=0; q_point<n_q_points; ++q_point) {
for (unsigned int i=0; i<dofs_per_cell; ++i) {
double phi_i = UV_fe_values.shape_value(i, q_point);
const dealii::Tensor<1, dim> grad_phi_i = UV_fe_values.shape_grad(i, q_point);
for (unsigned int j=0; j<dofs_per_cell; ++j) {
double phi_j = UV_fe_values.shape_value(j, q_point);
const dealii::Tensor<1, dim> grad_phi_j = UV_fe_values.shape_grad(j, q_point);
cell_matrix(i,j)+= phi_i*phi_j*UV_fe_values.JxW (q_point)+
time_step*theta*diffusion*grad_phi_i*grad_phi_j
*UV_fe_values.JxW (q_point);
}
cell_rhs(i) += phi_i*old_sol_q_points[q_point]
*UV_fe_values.JxW (q_point) - time_step*
(1-theta)*diffusion*grad_phi_i*old_sol_grad_q_points[q_point]
*UV_fe_values.JxW (q_point) + time_step*
1*(stim1[q_point]*stim1_key + stim2[q_point]*stim2_key)*phi_i
*UV_fe_values.JxW (q_point);
}
}
cell->get_dof_indices (local_dof_indices);
constraints.distribute_local_to_global(cell_matrix,
cell_rhs,
local_dof_indices,
system_matrix,
system_rhs);
}
system_matrix.compress (VectorOperation::add);
system_rhs.compress (VectorOperation::add);
}
template <int dim>
void TopLevel<dim>::UV_solver ()
{
SolverControl solver_control (dof_handler.n_dofs(), 1e-6);
#ifdef USE_PETSC_LA
LA::SolverCG solver(solver_control, mpi_communicator);
#else
LA::SolverCG solver(solver_control);
#endif
LA::MPI::PreconditionAMG preconditioner;
LA::MPI::PreconditionAMG::AdditionalData data;
#ifdef USE_PETSC_LA
data.symmetric_operator = true;
#else
/* Trilinos defaults are good */
#endif
preconditioner.initialize(system_matrix, data);
solver.solve (system_matrix, distributed_solution, system_rhs,
preconditioner);
std::cout<< system_matrix.m() << "rows" << std::endl;
std::cout<< system_matrix.n() << "column"<< std::endl;
constraints.distribute (distributed_solution);
}
template <int dim>
void TopLevel<dim>::run ()
{
make_grid ();
setup_system ();
unsigned int loop_count,loop;
assemble_system ();
std::cout<< "assembly is done" <<std::endl;
UV_solver();
std::cout<< "solver is done" <<std::endl;
}
int main(int argc, char *argv[])
{
try
{
using namespace dealii;
deallog.depth_console(5);
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
TopLevel<2> st_venant;
st_venant.run ();
}
catch (std::exception &exc)
{
std::cerr << std::endl << std::endl
<< "----------------------------------------------------"
<< std::endl;
std::cerr << "Exception on processing: " << std::endl
<< exc.what() << std::endl
<< "Aborting!" << std::endl
<< "----------------------------------------------------"
<< std::endl;
return 1;
}
catch (...)
{
std::cerr << std::endl << std::endl
<< "----------------------------------------------------"
<< std::endl;
std::cerr << "Unknown exception!" << std::endl
<< "Aborting!" << std::endl
<< "----------------------------------------------------"
<< std::endl;
return 1;
}
return 0;
}
error_Table.pdf
Description: Adobe PDF document
