Hi, I have just written a plan vanilla interface to call the Kelly Error Estimate function passing a parallel distributed vector as input vector type. But strangely enough when I run on MPI on multiple procs in debug mode, I get the following error:
--------------------------------------------------------
An error occurred in line <1351> of file
</home/vikramg/DFT-FE-softwares/softwareCentos/dealiiDev/dealii/include/deal.II/lac/la_parallel_vector.h>
in function
Number
dealii::LinearAlgebra::distributed::Vector<Number>::operator()(unsigned
long long) const [with Number = double]
The violated condition was:
partitioner->in_local_range (global_index) ||
partitioner->ghost_indices().is_element(global_index)
Additional information:
You tried to access element 2 of a distributed vector, but this element
is not stored on the current processor. Note: The range of locally owned
elements is 1377 to 2601, and there are 153 ghost elements that this vector
can access
------------------------------------------------------------------------------------------
I have attached the minimal example and also the stack trace. This error is
probably being triggered somewhere in the estimate function. I am wondering
if I am missing anything here. It runs without any error message on 1
processor.
Regards
Phani
--
The deal.II project is located at http://www.dealii.org/
For mailing list/forum options, see
https://groups.google.com/d/forum/dealii?hl=en
---
You received this message because you are subscribed to the Google Groups
"deal.II User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.
stackTrace
Description: Binary data
//Include all deal.II header file
#include <deal.II/base/quadrature.h>
#include <deal.II/base/function.h>
#include <deal.II/base/logstream.h>
#include <deal.II/base/timer.h>
#include <deal.II/base/table.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/constraint_matrix.h>
#include <deal.II/fe/fe_system.h>
#include <deal.II/fe/fe_q.h>
#include <deal.II/fe/fe_values.h>
#include <deal.II/fe/mapping_q1.h>
#include <deal.II/grid/tria.h>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/dofs/dof_handler.h>
#include <deal.II/numerics/data_out.h>
#include <deal.II/numerics/vector_tools.h>
#include <deal.II/base/tensor_function.h>
#include <deal.II/base/point.h>
#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/utilities.h>
#include <deal.II/lac/lapack_full_matrix.h>
#include <deal.II/numerics/matrix_tools.h>
#include <deal.II/numerics/error_estimator.h>
#include <deal.II/base/parameter_handler.h>
#include <deal.II/grid/grid_refinement.h>
#include <deal.II/grid/tria_boundary_lib.h>
#include <deal.II/grid/grid_tools.h>
#include <deal.II/grid/grid_in.h>
#include <deal.II/grid/grid_out.h>
#include <deal.II/dofs/dof_renumbering.h>
#include <deal.II/dofs/dof_accessor.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/solver_gmres.h>
#include <deal.II/lac/solver_bicgstab.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/lac/parallel_vector.h>
#include <deal.II/lac/parallel_block_vector.h>
#include <deal.II/matrix_free/matrix_free.h>
#include <deal.II/matrix_free/fe_evaluation.h>
#include <deal.II/lac/slepc_solver.h>
#include <deal.II/base/config.h>
#include <deal.II/base/smartpointer.h>
//Include generic C++ headers
#include <fstream>
#include <iostream>
using namespace dealii;
typedef dealii::parallel::distributed::Vector<double> vectorType;
int main (int argc, char *argv[])
{
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv);
//parallel message stream
MPI_Comm mpi_communicator(MPI_COMM_WORLD);
ConditionalOStream pcout(std::cout, (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0));
//compute-time logger
TimerOutput computing_timer(mpi_communicator,pcout, TimerOutput::summary, TimerOutput::wall_times);
double L = 20;
parallel::distributed::Triangulation<3> triangulation(MPI_COMM_WORLD);
GridGenerator::hyper_cube(triangulation,-L,L);
triangulation.refine_global(2);
//GridIn<3> gridin;
//gridin.attach_triangulation(triangulation);
//Read mesh in UCD format generated from Cubit
//std::ifstream f(std::string("mesh_5x5x5.inp").c_str());
//gridin.read_ucd(f);
pcout << "number of elements: "
<< triangulation.n_global_active_cells()
<< std::endl;
FESystem<3> FE(FE_Q<3>(QGaussLobatto<1>(5)), 1);
DoFHandler<3> dofHandler (triangulation);
dofHandler.distribute_dofs(FE);
ConstraintMatrix constraints;
IndexSet locally_owned_dofs=dofHandler.locally_owned_dofs();
IndexSet locally_relevant_dofs;
DoFTools::extract_locally_relevant_dofs(dofHandler, locally_relevant_dofs);
IndexSet ghost_indices=locally_relevant_dofs;
ghost_indices.subtract_set(locally_owned_dofs);
MatrixFree<3,double> matrix_free_data;
constraints.clear();
constraints.reinit(locally_relevant_dofs);
DoFTools::make_hanging_node_constraints(dofHandler,constraints);
matrix_free_data.reinit(dofHandler, constraints, QGaussLobatto<1>(5));
dealii::parallel::distributed::Vector<double> vec;
matrix_free_data.initialize_dof_vector(vec);
vec = 0.0;
Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
KellyErrorEstimator<3>::estimate (dofHandler,
QGauss<2>(3),
typename FunctionMap<3>::type(),
vec,
estimated_error_per_cell);
GridRefinement::refine_and_coarsen_fixed_number(triangulation,
estimated_error_per_cell,
0.3, 0);
triangulation.execute_coarsening_and_refinement();
}
