Dear experienced deal.II users and developers,
I want to solve a heat equation in the time domain with distributed memory
using MPI, but the results are incorrect. In order to do so, I reference
tutorial step-23 for time updating method and step-40 for implementing MPI.
May I ask whether my boundary condition is right or not? Should we do
compress() after apply_boundary_values()? Thanks in advance!
With best regards,
Mark
In PETsc or Trilinos, how could we set the boundary conditions and initial
conditions?
Since old_solution_T would be read in other places (not shown here), it is
initialized with ghost cells; while old_solutuon_T_cal is only used for
written, it does not have ghost cells. see code as following,
//
old_solution_T_cal.reinit (locally_owned_dofs,mpi_communicator);
old_solution_T.reinit
(locally_owned_dofs,locally_relevant_dofs,mpi_communicator);
//
..........
template <int dim>
void ThermalDiffusion<dim>::run ()
{
setup_system();
assemble_system();
VectorTools::project (dof_handler, constraints, QGauss<dim>(degree),
InitialValues_T<dim>(),
old_solution_T_cal);
old_solution_T = old_solution_T_cal;
LA::MPI::Vector tmp (locally_owned_dofs,mpi_communicator);
LA::MPI::Vector forcing_terms (locally_owned_dofs,mpi_communicator);
for (timestep_number=1, time=time_step;
time<= global_simulation_end_time;
time+=time_step, ++timestep_number)
{
pcout << "Time step " << timestep_number
<< " at t=" << time
<< std::endl;
//-----------------------------------
//run to solve T
//-----------------------------------
//
//time dependent
//assign right hand side
mass_matrix_T.vmult (system_rhs, old_solution_T_cal);
laplace_matrix_T.vmult (tmp, old_solution_T_cal);
system_rhs.add (-time_step * (1-theta), tmp);
assemble_rhs_T (time);
forcing_terms = dynamic_rhs_T;
forcing_terms *= time_step * theta;
assemble_rhs_T (time - time_step);
tmp = dynamic_rhs_T;
forcing_terms.add (time_step*(1-theta),tmp);
system_rhs.add (1,forcing_terms);
//assign system matrix
system_matrix.copy_from (mass_matrix_T);
system_matrix.add (time_step * theta, laplace_matrix_T);
* { BoundaryValues_Temperature<dim>
boundary_values_function_T; //boundary_values_function.set_time
(time); std::map<types::global_dof_index,double>
boundary_values_T; VectorTools::interpolate_boundary_values
(dof_handler,
BOUNDARY_NUM,
boundary_values_function_T,
boundary_values_T); MatrixTools::apply_boundary_values
(boundary_values_T,
system_matrix,
solution_T,
system_rhs, false); }*
solve_T ();
if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 32)
{
TimerOutput::Scope t(computing_timer,"output");
output_results ();
}
computing_timer.print_summary ();
computing_timer.reset();
pcout << std::endl;
old_solution_T = solution_T;
}
}
}
//definition of solve_T()
template <int dim>
void ThermalDiffusion<dim>::solve_T ()
{
TimerOutput::Scope t(computing_timer,"solve_T");
LA::MPI::Vector completely_distributed_solution
(locally_owned_dofs,mpi_communicator);
SolverControl solver_control (dof_handler.n_dofs(),1e-12);
#ifdef USE_PETSC_LA
LA::SolverCG solver(solver_control,mpi_communicator);
#else
LA::SolverCG solver(solver_control);
#endif
LA::MPI::PreconditionAMG preconditioner;
LA::MPI::PreconditionAMG::AdditionalData data;
#ifdef USE_PETSC_LA
data.symmetric_operator = true;
#else
/* Trilinos defaults are good */
#endif
preconditioner.initialize(system_matrix,data);
solver.solve
(system_matrix,completely_distributed_solution,system_rhs,preconditioner);
pcout << " Solved in " << solver_control.last_step()
<< " iterations."<< std::endl;
constraints.distribute (solution_T);
solution_T = completely_distributed_solution;
pcout << "success...solve_T()..." <<std::endl;
}
--
The deal.II project is located at http://www.dealii.org/
For mailing list/forum options, see
https://groups.google.com/d/forum/dealii?hl=en
---
You received this message because you are subscribed to the Google Groups
"deal.II User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.