Dear deal.ii community,

I am encountering an error when running the following program (only 
relevant parts):

*class NedRTStd*
*{*
*public:*
* struct Parameters*
* {*
*              // Some parameters go here*
* };*
* NedRTStd () = delete;*
* NedRTStd (Parameters &parameters);*
* ~NedRTStd ();*

* void run ();*

*private:*
* void setup_grid ();*
* void setup_system_matrix ();*
* void setup_constraints ();*
* void assemble_system ();*
* void solve_direct ();*
* void solve_iterative ();*
* void solve_iterative_preconditioned ();*
* void output_results () const;*

* MPI_Comm mpi_communicator;*

* Parameters &parameters;*

* parallel::distributed::Triangulation<3> triangulation;*

* FESystem<3>        fe; // Will hold 2 blocks (Nedelec-Raviart-Thomas 
pair)*
* DoFHandler<3>      dof_handler;*

* std::vector<IndexSet> owned_partitioning;*
* std::vector<IndexSet> relevant_partitioning;*

* AffineConstraints<double> constraints;*

* LA::MPI::BlockSparseMatrix system_matrix;*

* LA::MPI::BlockVector        locally_relevant_solution;*
* LA::MPI::BlockVector        system_rhs;*

* ConditionalOStream pcout;*
* TimerOutput        computing_timer;*
*};*


*NedRTStd::NedRTStd (Parameters &parameters_)*
*:*
*mpi_communicator(MPI_COMM_WORLD),*
*parameters(parameters_),*
*triangulation(mpi_communicator,*
*   typename Triangulation<3>::MeshSmoothing(*
* Triangulation<3>::smoothing_on_refinement |*
* Triangulation<3>::smoothing_on_coarsening)),*
*fe (FE_Nedelec<3>(1), 1,*
* FE_RaviartThomas<3>(1), 1),*
*dof_handler (triangulation),*
*pcout(std::cout, (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)),*
*computing_timer(mpi_communicator,*
*                    pcout,*
*                    TimerOutput::summary,*
*                    TimerOutput::wall_times)*
*{*
*}*


*void NedRTStd::setup_system_matrix ()*
*{*
* TimerOutput::Scope t(computing_timer, "system and constraint setup");*

* dof_handler.distribute_dofs (fe);*

* if (parameters.renumber_dofs)*
* {*
* DoFRenumbering::Cuthill_McKee (dof_handler);*
* }*

* DoFRenumbering::block_wise (dof_handler);*

*        //////////////////////////////////////*
*        // fe has 2 blocks and 6 components*
*        //////////////////////////////////////*
* std::vector<types::global_dof_index> dofs_per_block (2);*
* DoFTools::count_dofs_per_block (dof_handler, dofs_per_block);*
* const unsigned int n_sigma = dofs_per_block[0],*
*    n_u = dofs_per_block[1];*

* pcout << "Number of active cells: "*
* << triangulation.n_global_active_cells()*
* << " (on " << triangulation.n_levels() << " levels)"*
* << std::endl*
* << "Number of degrees of freedom: "*
* << dof_handler.n_dofs()*
* << " (" << n_sigma << '+' << n_u << ')'*
* << std::endl;*

* owned_partitioning.resize(2);*
* owned_partitioning[0] = dof_handler.locally_owned_dofs().get_view(0, 
n_sigma);*
* owned_partitioning[1] = 
dof_handler.locally_owned_dofs().get_view(n_sigma, n_sigma + n_u);*

* IndexSet locally_relevant_dofs;*
* DoFTools::extract_locally_relevant_dofs(dof_handler, 
locally_relevant_dofs);*
* relevant_partitioning.resize(2);*
* relevant_partitioning[0] = locally_relevant_dofs.get_view(0, n_sigma);*
* relevant_partitioning[1] = locally_relevant_dofs.get_view(n_sigma, 
n_sigma + n_u);*

* setup_constraints (); // Only hanging nodes here*

* {*
* BlockDynamicSparsityPattern dsp(dofs_per_block, dofs_per_block);*

* DoFTools::make_sparsity_pattern( dof_handler, dsp, constraints, false);*
* SparsityTools::distribute_sparsity_pattern(dsp,*
* dof_handler.locally_owned_dofs_per_processor(),*
* mpi_communicator,*
* locally_relevant_dofs);*

* system_matrix.clear();*
* system_matrix.reinit(owned_partitioning, dsp, mpi_communicator);*
* }*

*        //////////////////////////////////////*
*        // Here something happens...*

*        //////////////////////////////////////*
* locally_relevant_solution.reinit(owned_partitioning,*
* relevant_partitioning,*
* mpi_communicator);*

*        //////////////////////////////////////*
*        // Here also*

*        //////////////////////////////////////*
* system_rhs.reinit(owned_partitioning, mpi_communicator);*
*}*

I get the following exception:

-------------------------------------------------------- 
An error occurred in line <122> of file 
</home/ks/lib_compile/dealii-9.1.1/source/lac/petsc_vector_base.cc> in 
function 
   dealii::PETScWrappers::VectorBase::VectorBase() 
The violated condition was:  
   MultithreadInfo::is_running_single_threaded() 
Additional information:  
   PETSc does not support multi-threaded access, set the thread limit to 1 
in MPI_InitFinalize(). 

.
.
.
--------------------------------------------------------

I don't have a clue what happened here since in tutorial step-55 a similar 
thing works (I can not tell what I am doing wrong). I do not invoke any 
threading deliberately.

Sorry for posting this if inappropriate - you are of course not responsible 
for debugging my code - but any hint would be appreciated.

Best,
Konrad

-- 
The deal.II project is located at http://www.dealii.org/
For mailing list/forum options, see 
https://groups.google.com/d/forum/dealii?hl=en
--- 
You received this message because you are subscribed to the Google Groups 
"deal.II User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
To view this discussion on the web visit 
https://groups.google.com/d/msgid/dealii/8479f25b-fdbe-4393-a33b-83749e355c7b%40googlegroups.com.

Reply via email to