Dear Community,
I have been trying to use the SolutionTransfer in parallel to write
checkpoints and read the checkpoint to restart the simulation. On their
own, each function works fine. However, when I enable both read and write
checkpoint together, I get the error Not all SolutionTransfer objects have
been deserialized after last call to load. I am using two solution transfer
objects as I have two dog_handlers. I tried debugging and gdb always refers
to the error in distributed/tria.cc:
this->cell_attached_data.n_attached_deserialize == 0. The piece of code is
given below. Thank you in advance for your time.
template <int dim>
void
CFDDEMSolver<dim>::write_checkpoint()
{
std::string prefix =
this->simulation_parameters.restart_parameters.filename;
if (Utilities::MPI::this_mpi_process(this->mpi_communicator) == 0)
{
this->simulation_control->save(prefix);
this->pvdhandler.save(prefix);
}
std::vector<const TrilinosWrappers::MPI::Vector *> sol_set_transfer;
sol_set_transfer.push_back(&this->present_solution);
for (unsigned int i = 0; i < this->previous_solutions.size(); ++i)
{
sol_set_transfer.push_back(&this->previous_solutions[i]);
}
std::vector<const TrilinosWrappers::MPI::Vector *> vf_set_transfer;
vf_set_transfer.push_back(&this->nodal_void_fraction_relevant);
for (unsigned int i = 0; i < this->previous_void_fraction.size(); ++i)
{
vf_set_transfer.push_back(&this->previous_void_fraction[i]);
}
// Prepare for Serialization
parallel::distributed::SolutionTransfer<dim,
TrilinosWrappers::MPI::Vector>
system_trans_vectors(this->dof_handler);
system_trans_vectors.prepare_for_serialization(sol_set_transfer);
parallel::distributed::SolutionTransfer<dim,
TrilinosWrappers::MPI::Vector>
vf_system_trans_vectors(this->void_fraction_dof_handler);
vf_system_trans_vectors.prepare_for_serialization(vf_set_transfer);
const auto parallel_triangulation =
dynamic_cast<parallel::distributed::Triangulation<dim> *>(
&*this->triangulation);
parallel_triangulation->signals.pre_distributed_save.connect(std::bind(
&Particles::ParticleHandler<dim>::register_store_callback_function,
&this->particle_handler));
if (auto parallel_triangulation =
dynamic_cast<parallel::distributed::Triangulation<dim> *>(
&*this->triangulation))
{
std::string triangulationName = prefix + ".triangulation";
parallel_triangulation->save(prefix + ".triangulation");
}
}
template <int dim>
void
CFDDEMSolver<dim>::read_checkpoint()
{
std::string prefix =
this->simulation_parameters.restart_parameters.filename;
this->simulation_control->read(prefix);
this->pvdhandler.read(prefix);
const auto parallel_triangulation =
dynamic_cast<parallel::distributed::Triangulation<dim> *>(
&*this->triangulation);
parallel_triangulation->signals.post_distributed_load.connect(
std::bind(&Particles::ParticleHandler<dim>::register_load_callback_function,
&this->particle_handler,
true));
const std::string filename = prefix + ".triangulation";
std::ifstream in(filename.c_str());
if (!in)
AssertThrow(false,
ExcMessage(
std::string(
"You are trying to restart a previous computation, "
"but the restart file <") +
filename + "> does not appear to exist!"));
try
{
if (auto parallel_triangulation =
dynamic_cast<parallel::distributed::Triangulation<dim> *>(
this->triangulation.get()))
parallel_triangulation->load(filename.c_str());
}
catch (...)
{
AssertThrow(false,
ExcMessage("Cannot open snapshot mesh file or read the "
"triangulation stored there."));
}
this->setup_dofs();
// Velocity Vectors
std::vector<TrilinosWrappers::MPI::Vector *> x_system(
1 + this->previous_solutions.size());
TrilinosWrappers::MPI::Vector distributed_system(this->locally_owned_dofs,
this->mpi_communicator);
x_system[0] = &(distributed_system);
std::vector<TrilinosWrappers::MPI::Vector> distributed_previous_solutions;
distributed_previous_solutions.reserve(this->previous_solutions.size());
for (unsigned int i = 0; i < this->previous_solutions.size(); ++i)
{
distributed_previous_solutions.emplace_back(
TrilinosWrappers::MPI::Vector(this->locally_owned_dofs,
this->mpi_communicator));
x_system[i + 1] = &distributed_previous_solutions[i];
}
parallel::distributed::SolutionTransfer<dim,
TrilinosWrappers::MPI::Vector>
system_trans_vectors(this->dof_handler);
system_trans_vectors.deserialize(x_system);
this->present_solution = distributed_system;
for (unsigned int i = 0; i < this->previous_solutions.size(); ++i)
{
this->previous_solutions[i] = distributed_previous_solutions[i];
}
// Void Fraction Vectors
std::vector<TrilinosWrappers::MPI::Vector *> vf_system(
1 + this->previous_void_fraction.size());
TrilinosWrappers::MPI::Vector vf_distributed_system(
this->locally_owned_dofs_voidfraction, this->mpi_communicator);
vf_system[0] = &(vf_distributed_system);
std::vector<TrilinosWrappers::MPI::Vector>
vf_distributed_previous_solutions;
vf_distributed_previous_solutions.reserve(
this->previous_void_fraction.size());
for (unsigned int i = 0; i < this->previous_void_fraction.size(); ++i)
{
vf_distributed_previous_solutions.emplace_back(
TrilinosWrappers::MPI::Vector(this->locally_owned_dofs_voidfraction,
this->mpi_communicator));
vf_system[i + 1] = &vf_distributed_previous_solutions[i];
}
parallel::distributed::SolutionTransfer<dim,
TrilinosWrappers::MPI::Vector>
vf_system_trans_vectors(this->void_fraction_dof_handler);
vf_system_trans_vectors.deserialize(vf_system);
this->nodal_void_fraction_relevant = vf_distributed_system;
for (unsigned int i = 0; i < this->previous_void_fraction.size(); ++i)
{
this->previous_void_fraction[i] =
vf_distributed_previous_solutions[i];
}
}
--
The deal.II project is located at http://www.dealii.org/
For mailing list/forum options, see
https://groups.google.com/d/forum/dealii?hl=en
---
You received this message because you are subscribed to the Google Groups
"deal.II User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
To view this discussion on the web visit
https://groups.google.com/d/msgid/dealii/82d71ed9-05af-4e88-9fb6-387ebfa28940n%40googlegroups.com.