Dear Deal-ii Users,

Greetings. I am trying to change step-25 for MPI based on the method 
described in step-40. Unfortunately, I got stuck with an error :( 

>
>
>
>
>
>
>
>
> *An error occurred in line <1424> of file 
> </home/ehsan/apps/candi/deal.II-toolchain/deal.II-v8.4.0/include/deal.II/lac/petsc_matrix_base.h>
>  
> in function    void 
> dealii::PETScWrappers::MatrixBase::add(dealii::PETScWrappers::MatrixBase::size_type,
>  
> dealii::PETScWrappers::MatrixBase::size_type, const size_type*, const 
> PetscScalar*, bool, 
> bool)+---------------------------------+-----------+------------+------------+The
>  
> violated condition was:     ierr == 0The name and call sequence of the 
> exception was:    ExcPETScError(ierr)Additional Information: An error with 
> error number 63 occurred while calling a PETSc function*

*I've attached the code* if you like to see a full description of the 
error. I cannot understand why I'm getting this error because I'm just 
trying to change step-25 based on step-40. It was thinking it would be easy 
but it seems I was mistaken. Do you have any suggestion about this error? I 
find out that I am inserting a new nonzero component in a sparse matrix and 
it violates the sparsitypattern but I cannot find out where I'm doing this 
because the code is aborting and I cannot debug it correctly in 
Eclipse-Mars II.
Also, is it really possible to change step-25 for MPI by using step-40 or 
I'm completely on a wrong road?
Thanks for your help in advance.

Best Regards,
Ehsan

-- 
The deal.II project is located at http://www.dealii.org/
For mailing list/forum options, see 
https://groups.google.com/d/forum/dealii?hl=en
--- 
You received this message because you are subscribed to the Google Groups 
"deal.II User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to dealii+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.
/* ---------------------------------------------------------------------
 *
 * Copyright (C) 2006 - 2015 by the deal.II authors
 *
 * This file is part of the deal.II library.
 *
 * The deal.II library is free software; you can use it, redistribute
 * it, and/or modify it under the terms of the GNU Lesser General
 * Public License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 * The full text of the license can be found in the file LICENSE at
 * the top level of the deal.II distribution.
 *
 * ---------------------------------------------------------------------

 *
 * Author: Ivan Christov, Wolfgang Bangerth, Texas A&M University, 2006
 */



#include <deal.II/base/timer.h>//
#include <deal.II/base/conditional_ostream.h>//
#include <deal.II/base/index_set.h>//
#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/function.h>
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
#include <deal.II/lac/generic_linear_algebra.h>//this and the following are 
dependent
namespace LA//
{
#if defined(DEAL_II_WITH_PETSC) && !(defined(DEAL_II_WITH_TRILINOS) && 
defined(FORCE_USE_OF_TRILINOS))
  using namespace dealii::LinearAlgebraPETSc;
#  define USE_PETSC_LA
#elif defined(DEAL_II_WITH_TRILINOS)
  using namespace dealii::LinearAlgebraTrilinos;
#else
#  error DEAL_II_WITH_PETSC or DEAL_II_WITH_TRILINOS required
#endif
}
#include <deal.II/lac/vector.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/sparsity_tools.h>//
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/constraint_matrix.h>
#include <deal.II/lac/petsc_parallel_sparse_matrix.h>//
#include <deal.II/lac/petsc_parallel_vector.h>//
#include <deal.II/lac/petsc_solver.h>//
#include <deal.II/lac/petsc_precondition.h>//

#include <deal.II/grid/tria.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/dofs/dof_handler.h>
#include <deal.II/dofs/dof_accessor.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/fe/fe_q.h>
#include <deal.II/fe/fe_values.h>
#include <deal.II/numerics/error_estimator.h>//
#include <deal.II/numerics/vector_tools.h>
#include <deal.II/numerics/matrix_tools.h>
#include <deal.II/numerics/data_out.h>

#include <deal.II/distributed/tria.h>//
#include <deal.II/distributed/grid_refinement.h>//

#include <fstream>
#include <iostream>

namespace Step25
{
  using namespace dealii;



  template <int dim>
  class SineGordonProblem
  {
  public:
    SineGordonProblem ();
    void run ();

  private:
    void make_grid_and_dofs ();
    void assemble_system ();
    void right_hand_side ();
    void compute_nl_term (const LA::MPI::Vector &old_data,
                          const LA::MPI::Vector &new_data,
                                                  LA::MPI::Vector       
&nl_term) const;//
    void compute_nl_matrix (const LA::MPI::Vector &old_data,
                            const LA::MPI::Vector &new_data,
                                                        LA::MPI::SparseMatrix 
&nl_matrix) const;//
    unsigned int solve ();
    void output_results (const unsigned int timestep_number) const;

    MPI_Comm                                  mpi_communicator;//
    parallel::distributed::Triangulation<dim>   triangulation;//

    FE_Q<dim>            fe;
    DoFHandler<dim>      dof_handler;

    IndexSet                                  locally_owned_dofs;
    IndexSet                                  locally_relevant_dofs;
                               //SparsityPattern      sparsity_pattern;
    LA::MPI::SparseMatrix system_matrix;//LA::MPI::SparseMatrix 
system_matrix;//system_matrix is the matrix that we want to invert it.
    LA::MPI::SparseMatrix laplace_matrix;
        LA::MPI::SparseMatrix mass_matrix;
    const unsigned int n_global_refinements;

    double time;
    const double final_time, time_step, theta, DeltaGbar;
    //solution_update is equal to delta u in NR
    LA::MPI::Vector  locally_relevant_solution;//
    LA::MPI::Vector  locally_relevant_solution_update;
        LA::MPI::Vector  locally_relevant_old_solution;
        LA::MPI::Vector  system_rhs;
        LA::MPI::Vector  M_x_velocity;

    const unsigned int output_timestep_skip;
    ConditionalOStream                        pcout;//
    TimerOutput                               computing_timer;//
  };



  template <int dim>
  class InitialValues : public Function<dim>
  {
  public:
    InitialValues () : Function<dim>() {}
    virtual double value(const Point<dim>   &p,
                          const unsigned int  /*component = 0*/) const;
  };
  /////////////////////////////////////////////////////
  template <int dim>
  double InitialValues<dim>::value (const Point<dim>  &p,
                             const unsigned int /*component*/) const
  {
    return 0.5*tanh(p[0]/sqrt(2.))+0.5; //ZeroFunction<dim>().value (p, 
component);
  }


  template <int dim>
  SineGordonProblem<dim>::SineGordonProblem ()
    :
    mpi_communicator (MPI_COMM_WORLD),//
    triangulation (mpi_communicator,
                   typename Triangulation<dim>::MeshSmoothing
                   (Triangulation<dim>::smoothing_on_refinement |
                   Triangulation<dim>::smoothing_on_coarsening)),//
    fe (2),
    dof_handler (triangulation),
    n_global_refinements (4),
    time (0),
    final_time (5),
    time_step (0.2),
    theta (0.5),
        DeltaGbar (-0.045837),//-0.045837
    output_timestep_skip (1),
    pcout (std::cout,(Utilities::MPI::this_mpi_process(mpi_communicator)== 0)),
    computing_timer (mpi_communicator,
                     pcout,
                     TimerOutput::summary,
                     TimerOutput::wall_times)//

  {}


  template <int dim>
  void SineGordonProblem<dim>::make_grid_and_dofs ()
  {
        TimerOutput::Scope t(computing_timer, "setup_GridGen");//

    GridGenerator::hyper_cube (triangulation, -10, 10);
    triangulation.refine_global (n_global_refinements);

    pcout << "   Number of active cells: "
              << triangulation.n_active_cells()
              << std::endl
              << "   Total number of cells: "
              << triangulation.n_cells()
              << std::endl;

    dof_handler.distribute_dofs (fe);

    locally_owned_dofs = dof_handler.locally_owned_dofs ();
    DoFTools::extract_locally_relevant_dofs (dof_handler,
                                             locally_relevant_dofs);//

    locally_owned_dofs.print(std::cout);//

    DynamicSparsityPattern dsp(locally_relevant_dofs);//dof_handler.n_dofs() 
replaced by locally_relevant_dofs???????
    DoFTools::make_sparsity_pattern (dof_handler, dsp);
    //sparsity_pattern.copy_from (dsp);
    SparsityTools::distribute_sparsity_pattern (dsp,
                                                
dof_handler.n_locally_owned_dofs_per_processor(),
                                                mpi_communicator,
                                                locally_relevant_dofs);

    system_matrix.reinit  (locally_owned_dofs, locally_owned_dofs, dsp, 
mpi_communicator);
    mass_matrix.reinit    (locally_owned_dofs, locally_owned_dofs, dsp, 
mpi_communicator);
    laplace_matrix.reinit (locally_owned_dofs, locally_owned_dofs, dsp, 
mpi_communicator);


    /*MatrixCreator::create_mass_matrix (dof_handler,
                                       QGauss<dim>(3),
                                       mass_matrix);
    MatrixCreator::create_laplace_matrix (dof_handler,
                                          QGauss<dim>(3),
                                          laplace_matrix);*/

    locally_relevant_solution.reinit       (locally_owned_dofs, 
locally_relevant_dofs, mpi_communicator);//
    locally_relevant_solution_update.reinit     (locally_owned_dofs, 
locally_relevant_dofs, mpi_communicator);//
    locally_relevant_old_solution.reinit   (locally_owned_dofs, 
locally_relevant_dofs, mpi_communicator);//
    M_x_velocity.reinit    (locally_owned_dofs, locally_relevant_dofs, 
mpi_communicator);//
    system_rhs.reinit     (locally_owned_dofs, mpi_communicator);//
  }

 template<int dim>
 void SineGordonProblem<dim>::assemble_system ()
 {
   TimerOutput::Scope t(computing_timer, "assembly");//

   QGauss<dim>  quadrature_formula(3);

   FEValues<dim> fe_values(fe, quadrature_formula,
                          update_values | update_gradients | update_JxW_values);
   const unsigned int   dofs_per_cell = fe.dofs_per_cell;
   const unsigned int   n_q_points = quadrature_formula.size();
   FullMatrix<double>   cell_mass_matrix(dofs_per_cell, dofs_per_cell);
   FullMatrix<double>   cell_laplace_matrix(dofs_per_cell, dofs_per_cell);
   Vector<double>       cell_rhs (dofs_per_cell);
   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
   typename DoFHandler<dim>::active_cell_iterator
   cell = dof_handler.begin_active(),
   endc = dof_handler.end();
   for (; cell!=endc; ++cell)
           if (cell->is_locally_owned())//
     {
       fe_values.reinit(cell);
       cell_mass_matrix = 0;
       cell_laplace_matrix = 0;
       cell_rhs = 0;
       for (unsigned int q_index=0; q_index<n_q_points; ++q_index)
         {
           for (unsigned int i=0; i<dofs_per_cell; ++i)
            for (unsigned int j=0; j<dofs_per_cell; ++j)
             cell_mass_matrix(i,j) += (fe_values.shape_value(i, q_index) *
                                    fe_values.shape_value(j, q_index) *
                                    fe_values.JxW(q_index));

           for (unsigned int i=0; i<dofs_per_cell; ++i)
             for (unsigned int j=0; j<dofs_per_cell; ++j)
               cell_laplace_matrix(i,j) += (fe_values.shape_grad(i, q_index) *
                                    fe_values.shape_grad(j, q_index) *
                                    fe_values.JxW(q_index));

           for (unsigned int i=0; i<dofs_per_cell; ++i)
             cell_rhs(i) += (fe_values.shape_value (i, q_index) *
                             1 *
                             fe_values.JxW (q_index));

         }
       cell->get_dof_indices(local_dof_indices);
       for (unsigned int i=0; i<dofs_per_cell; ++i)
         for (unsigned int j=0; j<dofs_per_cell; ++j)
           mass_matrix.add(local_dof_indices[i],
                              local_dof_indices[j],
                              cell_mass_matrix(i,j));
       for (unsigned int i=0; i<dofs_per_cell; ++i)
       for (unsigned int j=0; j<dofs_per_cell; ++j)
           laplace_matrix.add(local_dof_indices[i],
                              local_dof_indices[j],
                              cell_laplace_matrix(i,j));

       for (unsigned int i=0; i<dofs_per_cell; ++i)
         system_rhs(local_dof_indices[i]) += cell_rhs(i);
     }
   mass_matrix.compress (VectorOperation::add);//
   laplace_matrix.compress (VectorOperation::add);//
   system_rhs.compress (VectorOperation::add);//
 }

  template <int dim>
  void SineGordonProblem<dim>::right_hand_side ()//we should change this 
assemble_system
  {
        TimerOutput::Scope t(computing_timer, "RHS");//

        DynamicSparsityPattern dsp(locally_relevant_dofs);

    //First we assemble the Jacobian matrix F′h(Un,l), where Un,l is stored in 
the vector solution for convenience.
        system_matrix.copy_from (mass_matrix);//system_matrix is the matrix 
that we want to invert it.
    system_matrix.add (laplace_matrix, std::pow(time_step*theta,1));

    //instead of previous line, it seems I have to use the following three 
lines!!!!
        //LA::MPI::SparseMatrix tmp_matrix (locally_owned_dofs, 
locally_owned_dofs, dsp, mpi_communicator);
    LA::MPI::SparseMatrix tmp_matrix;
        tmp_matrix.reinit (locally_owned_dofs, locally_owned_dofs, dsp, 
mpi_communicator);

    compute_nl_matrix (locally_relevant_old_solution, 
locally_relevant_solution, tmp_matrix);
    system_matrix.add(tmp_matrix, -std::pow(time_step*theta,1));
    //Then, we compute the right-hand side vector −Fh(Un,l).
    system_rhs = 0;

    tmp_matrix.copy_from (mass_matrix);
    tmp_matrix.add (laplace_matrix, std::pow(time_step*theta,1));

    LA::MPI::Vector tmp_vector (locally_owned_dofs, locally_relevant_dofs, 
mpi_communicator);
    tmp_matrix.vmult (tmp_vector, locally_relevant_solution);
    system_rhs += tmp_vector;


    tmp_matrix.add(laplace_matrix, -std::pow(time_step, 1));

    tmp_matrix.vmult (tmp_vector, locally_relevant_old_solution);
    system_rhs -= tmp_vector;

    //system_rhs.add (-time_step, M_x_velocity);

    compute_nl_term (locally_relevant_old_solution, locally_relevant_solution, 
tmp_vector);
    system_rhs.add (std::pow(time_step,1), tmp_vector);

    system_rhs *= -1;
  }


  template <int dim>
  void SineGordonProblem<dim>::compute_nl_term (const LA::MPI::Vector &old_data,
                                                const LA::MPI::Vector &new_data,
                                                                                
                LA::MPI::Vector       &nl_term) const
  {
        nl_term = 0;
    const QGauss<dim> quadrature_formula (3);
    FEValues<dim>     fe_values (fe, quadrature_formula,
                                 update_values |
                                 update_JxW_values |
                                 update_quadrature_points);

    const unsigned int dofs_per_cell = fe.dofs_per_cell;
    const unsigned int n_q_points    = quadrature_formula.size();

    Vector<double> local_nl_term (dofs_per_cell);//???????
    std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
    std::vector<double> old_data_values (n_q_points);
    std::vector<double> new_data_values (n_q_points);

    typename DoFHandler<dim>::active_cell_iterator
    cell = dof_handler.begin_active(),
    endc = dof_handler.end();
    for (; cell!=endc; ++cell)
        if (cell->is_locally_owned())//
      {
        local_nl_term = 0;
        fe_values.reinit (cell);
        fe_values.get_function_values (old_data, old_data_values);// you should 
take care of it, because it may will cause error
        fe_values.get_function_values (new_data, new_data_values);

        for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
          for (unsigned int i=0; i<dofs_per_cell; ++i)
 
////////////////////////////////////////////////////////////////////////////////////////////////
            local_nl_term(i) += ((2*std::pow(theta * new_data_values[q_point] +
                                          (1-theta) * 
old_data_values[q_point],1)-6*
                        std::pow(theta * new_data_values[q_point] +
              (1-theta) * old_data_values[q_point],2)+4*std::pow(theta * 
new_data_values[q_point] +
              (1-theta) * 
old_data_values[q_point],3)+6*DeltaGbar*std::pow(theta * 
new_data_values[q_point] +
              (1-theta) * 
old_data_values[q_point],1)-6*DeltaGbar*std::pow(theta * 
new_data_values[q_point] +
              (1-theta) * old_data_values[q_point],2)) *fe_values.shape_value 
(i, q_point) *
                                 fe_values.JxW (q_point));
/////////////////////////////////////////////////////////////////////////////////////////////////
        cell->get_dof_indices (local_dof_indices);

        for (unsigned int i=0; i<dofs_per_cell; ++i)
          nl_term(local_dof_indices[i]) += local_nl_term(i);
      }
  }


  template <int dim>
  void SineGordonProblem<dim>::compute_nl_matrix (const LA::MPI::Vector 
&old_data,
                                                  const LA::MPI::Vector 
&new_data,
                                                                                
                  LA::MPI::SparseMatrix &nl_matrix) const
  {
    QGauss<dim>   quadrature_formula (3);
    FEValues<dim> fe_values (fe, quadrature_formula,
                             update_values | update_JxW_values | 
update_quadrature_points);

    const unsigned int dofs_per_cell = fe.dofs_per_cell;
    const unsigned int n_q_points    = quadrature_formula.size();

    FullMatrix<double> local_nl_matrix (dofs_per_cell, 
dofs_per_cell);//???????????
    std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
    std::vector<double> old_data_values (n_q_points);
    std::vector<double> new_data_values (n_q_points);

    typename DoFHandler<dim>::active_cell_iterator
    cell = dof_handler.begin_active(),
    endc = dof_handler.end();

    for (; cell!=endc; ++cell)
        if (cell->is_locally_owned())//
      {
        local_nl_matrix = 0;
        fe_values.reinit (cell);
        fe_values.get_function_values (old_data, old_data_values);
        fe_values.get_function_values (new_data, new_data_values);

        for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
          for (unsigned int i=0; i<dofs_per_cell; ++i)
            for (unsigned int j=0; j<dofs_per_cell; ++j)
//////////////////////////////////////////////////////////////////////////////////////////////////////////
              local_nl_matrix(i,j) += ((2-12*(std::pow(theta * 
new_data_values[q_point] +
                                      (1-theta) * 
old_data_values[q_point],1)-std::pow(theta *
                                      new_data_values[q_point] +(1-theta) * 
old_data_values[q_point],2))
                          +6*DeltaGbar*(1-2*std::pow(theta * 
new_data_values[q_point] +
                                      (1-theta) * old_data_values[q_point],1))) 
*
                                       fe_values.shape_value (i, q_point) *
                                       fe_values.shape_value (j, q_point) *
                                       fe_values.JxW (q_point));
/////////////////////////////////////////////////////////////////////////////////////////////////////////
        cell->get_dof_indices (local_dof_indices);

        for (unsigned int i=0; i<dofs_per_cell; ++i)
          for (unsigned int j=0; j<dofs_per_cell; ++j)
            nl_matrix.add(local_dof_indices[i], local_dof_indices[j],
                          local_nl_matrix(i,j));
      }
  }




  template <int dim>
  unsigned int
  SineGordonProblem<dim>::solve ()//completely changed
  {
            TimerOutput::Scope t(computing_timer, "solve");
            LA::MPI::Vector
            completely_distributed_solution_update (locally_owned_dofs, 
mpi_communicator);
            SolverControl solver_control (dof_handler.n_dofs(), 1e-12);
        #ifdef USE_PETSC_LA
            LA::SolverCG cg(solver_control, mpi_communicator);
        #else
            LA::SolverCG cg(solver_control);
        #endif
            LA::MPI::PreconditionAMG preconditioner;
            LA::MPI::PreconditionAMG::AdditionalData data;
        #ifdef USE_PETSC_LA
            data.symmetric_operator = true;
        #else
            / * Trilinos defaults are good * /
        #endif
            preconditioner.initialize(system_matrix, data);
            cg.solve (system_matrix, completely_distributed_solution_update, 
system_rhs,
                          preconditioner);
            pcout << "   Solved in " << solver_control.last_step()
                  << " iterations." << std::endl;
           // constraints.distribute (completely_distributed_solution_update);
            locally_relevant_solution_update = 
completely_distributed_solution_update;

    return solver_control.last_step();
  }


  template <int dim>
  void
  SineGordonProblem<dim>::output_results (const unsigned int timestep_number) 
const
  {

          DataOut<dim> data_out;
          data_out.attach_dof_handler (dof_handler);
          data_out.add_data_vector (locally_relevant_solution, "solution");

          Vector<float> subdomain (triangulation.n_active_cells());
          for (unsigned int i=0; i<subdomain.size(); ++i)
            subdomain(i) = triangulation.locally_owned_subdomain();
          data_out.add_data_vector (subdomain, "subdomain");

          data_out.build_patches ();
          const std::string filename =
            "solution-" + Utilities::int_to_string (timestep_number, 3);
          std::ofstream output ((filename +
                                 "." + Utilities::int_to_string 
(Utilities::MPI::
                                                                 
this_mpi_process(mpi_communicator),4) + ".vtu").c_str());
          data_out.write_vtu (output);
          if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
            {
              std::vector<std::string> filenames;
              for (unsigned int i=0;
                   i<Utilities::MPI::n_mpi_processes (mpi_communicator); ++i)
                filenames.push_back ("solution-" +
                                     Utilities::int_to_string (timestep_number, 
3) +
                                     "." +
                                     Utilities::int_to_string (i, 4) +
                                     ".vtu");
              std::ofstream master_output ((filename + ".pvtu").c_str());
              data_out.write_pvtu_record (master_output, filenames);
            }
  }


  template <int dim>
  void SineGordonProblem<dim>::run ()
  {
    make_grid_and_dofs ();//grid generator and setup_system
    assemble_system ();
   /* {
      ConstraintMatrix constraints;
      constraints.close();
      VectorTools::project (dof_handler,
                            constraints,
                            QGauss<dim>(3),
                            InitialValues<dim> (1, time),
                            solution);//first guess is produced here.
    }*/ ///////why

    LA::MPI::Vector temp_vec1 (locally_owned_dofs, mpi_communicator);//
    VectorTools::interpolate (dof_handler, InitialValues<dim>(), temp_vec1);
    locally_relevant_solution=temp_vec1;//???????


    output_results (0);

//////////////////////////////////////////////////////////////////////////////////////////////
    unsigned int timestep_number = 1;
    for (time+=time_step; time<=final_time; time+=time_step, ++timestep_number)
      {
        locally_relevant_old_solution = locally_relevant_solution;//finite 
difference for time step

        pcout << std::endl
                  << "Time step #" << timestep_number << "; "
                  << "advancing to t = " << time << "."
                  << std::endl;

        double initial_rhs_norm = 0.;
        bool first_iteration = true;
        do// do...while is similar to a while loop, except that a do...while 
loop is guaranteed to execute at least one time.
          {
            right_hand_side ();

            if (first_iteration == true)
              initial_rhs_norm = system_rhs.l2_norm();

            const unsigned int n_iterations = solve ();//solve() function 
returns last iteration of NR

            locally_relevant_solution += locally_relevant_solution_update;//NR 
method for space locally_relevant_solution

            if (first_iteration == true)
              pcout << "    " << n_iterations;
            else
              pcout << '+' << n_iterations;
            first_iteration = false;
          }
        while (system_rhs.l2_norm() > 1e-6 * initial_rhs_norm);

        pcout << " CG iterations per nonlinear step."
                  << std::endl;
        //do...while is ended here and we have an approximation of U^{n}
        if (timestep_number % output_timestep_skip == 0)
        {
                if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 32)
                {
                      TimerOutput::Scope t(computing_timer, "output");
              output_results (timestep_number);
                }
        }////////changes
        computing_timer.print_summary ();
        computing_timer.reset ();
        pcout << std::endl;
      }// end of for
  }// end of run function
}// end of namespace


int main (int argc, char *argv[])//
{
  try
    {
      using namespace dealii;
      using namespace Step25;

      deallog.depth_console (0);
      Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 
1);//(argc, argv, numbers::invalid_unsigned_int) ?= for using
      //multi-threads method. Note that PETSC cannot be used for multi-threads 
method

      SineGordonProblem<2> sg_problem;
      sg_problem.run ();
    }
  catch (std::exception &exc)
    {
      std::cerr << std::endl << std::endl
                << "----------------------------------------------------"
                << std::endl;
      std::cerr << "Exception on processing: " << std::endl
                << exc.what() << std::endl
                << "Aborting!" << std::endl
                << "----------------------------------------------------"
                << std::endl;

      return 1;
    }
  catch (...)
    {
      std::cerr << std::endl << std::endl
                << "----------------------------------------------------"
                << std::endl;
      std::cerr << "Unknown exception!" << std::endl
                << "Aborting!" << std::endl
                << "----------------------------------------------------"
                << std::endl;
      return 1;
    }

  return 0;
}



Reply via email to