Dear all, I am trying to solve semilagrangian advection problem? I have traingulation with periodic face. I wanted to copy_triangulation and trace back mesh. The thing is with *parallel distributed triangulation* it refuse to copy for refine mesh, that is written in documentation as well. When I try to use fullydistributed triangulation it gives me error with periodicity. Is there a way that I can copy triangulation with periodicity or I am missing something? The code works if I create new_triangulation with parallel distributed triangulation and work on it. But it is expensive step. Kindly find code in attachment.
Regards, Heena -- The deal.II project is located at http://www.dealii.org/ For mailing list/forum options, see https://groups.google.com/d/forum/dealii?hl=en --- You received this message because you are subscribed to the Google Groups "deal.II User Group" group. To unsubscribe from this group and stop receiving emails from it, send an email to dealii+unsubscr...@googlegroups.com. To view this discussion on the web visit https://groups.google.com/d/msgid/dealii/d70b7825-d3aa-43ee-b14a-65e0b371a3dfo%40googlegroups.com.
/* * semilagrangian.hpp * * Created on: Jun 10, 2020 * Author: heena */ #ifndef INCLUDE_SEMILAGRANGIAN_HPP_ #define INCLUDE_SEMILAGRANGIAN_HPP_ #include <deal.II/base/conditional_ostream.h> #include <deal.II/base/function.h> #include <deal.II/base/index_set.h> #include <deal.II/base/logstream.h> #include <deal.II/base/mpi.h> #include <deal.II/base/multithread_info.h> #include <deal.II/base/quadrature_lib.h> #include <deal.II/base/quadrature_point_data.h> #include <deal.II/base/tensor_function.h> #include <deal.II/base/timer.h> #include <deal.II/base/utilities.h> #include <deal.II/lac/affine_constraints.h> #include <deal.II/lac/dynamic_sparsity_pattern.h> #include <deal.II/lac/full_matrix.h> #include <deal.II/lac/lapack_full_matrix.h> #include <deal.II/lac/precondition.h> #include <deal.II/lac/solver_cg.h> #include <deal.II/lac/sparse_matrix.h> #include <deal.II/lac/sparsity_tools.h> #include <deal.II/lac/trilinos_precondition.h> #include <deal.II/lac/trilinos_solver.h> #include <deal.II/lac/trilinos_sparse_matrix.h> #include <deal.II/lac/trilinos_sparsity_pattern.h> #include <deal.II/lac/trilinos_vector.h> #include <deal.II/lac/vector.h> // Distributed triangulation #include <deal.II/distributed/fully_distributed_tria.h> #include <deal.II/distributed/grid_refinement.h> #include <deal.II/distributed/tria.h> #include <deal.II/grid/grid_generator.h> #include <deal.II/grid/grid_out.h> #include <deal.II/grid/grid_tools.h> #include <deal.II/grid/tria.h> #include <deal.II/grid/tria_accessor.h> #include <deal.II/grid/tria_iterator.h> #include <deal.II/dofs/dof_accessor.h> #include <deal.II/dofs/dof_handler.h> #include <deal.II/dofs/dof_renumbering.h> #include <deal.II/dofs/dof_tools.h> #include <deal.II/fe/fe_q.h> #include <deal.II/fe/fe_values.h> #include <deal.II/numerics/data_out.h> #include <deal.II/numerics/matrix_tools.h> #include <deal.II/numerics/vector_tools.h> #include <boost/geometry/index/rtree.hpp> #include <boost/geometry/strategies/strategies.hpp> #include <deal.II/base/bounding_box.h> #include <deal.II/base/config.h> #include <deal.II/base/parameter_acceptor.h> #include <deal.II/base/parameter_handler.h> #include <deal.II/base/point.h> #include <deal.II/boost_adaptors/bounding_box.h> #include <deal.II/boost_adaptors/point.h> #include <deal.II/boost_adaptors/segment.h> #include <memory> // STL #include <cmath> #include <fstream> #include <iostream> #include <map> // My Headers #include "advection_field.hpp" #include "advectiondiffusion_basis.hpp" #include "advectiondiffusion_basis_reconstruction.hpp" #include "config.h" #include "dirichlet_bc.hpp" #include "initial_value.hpp" #include "matrix_coeff.hpp" #include "neumann_bc.hpp" #include "right_hand_side.hpp" namespace Timedependent_AdvectionDiffusionProblem { using namespace dealii; template <int dim> class Semilagrangian : public ParameterAcceptor { public: Semilagrangian() = delete; Semilagrangian(unsigned int n_refine, bool is_periodic); ~Semilagrangian(); void run(); private: void make_grid(); void copy_grid(); MPI_Comm mpi_communicator; parallel::distributed::Triangulation<dim> triangulation; parallel::distributed::Triangulation<dim> test_triangulation; /*! * Time-dependent vector coefficient (velocity). */ Coefficients::AdvectionField<dim> advection_field; /*! * Index Set */ IndexSet locally_owned_dofs; IndexSet locally_relevant_dofs; ConditionalOStream pcout; TimerOutput computing_timer; double time; double time_step; unsigned int timestep_number; /*! * parameter to determine the "implicitness" of the method. * Zero is fully implicit and one is (almost explicit). */ const double theta; /*! * Final simulation time. */ double T_max; /*! * Number of global refinements. */ const unsigned int n_refine; /*! * If this flag is true then periodic boundary conditions * are used. */ bool is_periodic; }; template <int dim> Semilagrangian<dim>::Semilagrangian( unsigned int n_refine, bool is_periodic) : mpi_communicator(MPI_COMM_WORLD), triangulation(mpi_communicator, typename Triangulation<dim>::MeshSmoothing( Triangulation<dim>::smoothing_on_refinement | Triangulation<dim>::smoothing_on_coarsening)), test_triangulation(mpi_communicator, typename Triangulation<dim>::MeshSmoothing( Triangulation<dim>::smoothing_on_refinement | Triangulation<dim>::smoothing_on_coarsening)), fe(1), dof_handler(triangulation), pcout(std::cout, (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)), computing_timer(mpi_communicator, pcout, TimerOutput::summary, TimerOutput::wall_times), time(0.0), time_step(1. / 100), timestep_number(0), /* * theta=1 is implicit Euler, * theta=0 is explicit Euler, * theta=0.5 is Crank-Nicolson */ theta(0.0), T_max(1.0), n_refine(n_refine), is_periodic(is_periodic) {} template <int dim> void Semilagrangian<dim>::make_grid() { TimerOutput::Scope t(computing_timer, "coarse mesh generation"); GridGenerator::hyper_cube(triangulation, 0, 1, /* colorize */ true); if (is_periodic) { std::vector<GridTools::PeriodicFacePair< typename parallel::distributed::Triangulation<dim>::cell_iterator>> periodicity_vector; for (unsigned int d = 0; d < dim; ++d) { GridTools::collect_periodic_faces(triangulation, /*b_id1*/ 2 * (d + 1) - 2, /*b_id2*/ 2 * (d + 1) - 1, /*direction*/ d, periodicity_vector); } triangulation.add_periodicity(periodicity_vector); } // if triangulation.refine_global(n_refine); } template <int dim> void Semilagrangian<dim>::copy_grid() { TimerOutput::Scope t(computing_timer, "coarse mesh generation"); GridGenerator::hyper_cube(test_triangulation, 0, 1, /* colorize */ true); if (is_periodic) { std::vector<GridTools::PeriodicFacePair< typename parallel::distributed::Triangulation<dim>::cell_iterator>> periodicity_vector; for (unsigned int d = 0; d < dim; ++d) { GridTools::collect_periodic_faces(test_triangulation, /*b_id1*/ 2 * (d + 1) - 2, /*b_id2*/ 2 * (d + 1) - 1, /*direction*/ d, periodicity_vector); } test_triangulation.add_periodicity(periodicity_vector); } // if test_triangulation.refine_global(n_refine); std::vector<BoundingBox<2>> all_boxes(test_triangulation.n_locally_owned_active_cells()); unsigned int i = 0; for (const auto &cell : test_triangulation.active_cell_iterators()) if (cell->is_locally_owned()) all_boxes[i++] = cell->bounding_box(); const auto tree = pack_rtree(all_boxes); const auto local_boxes = extract_rtree_level(tree, 10); while (T_max >= 0.09) { for (const auto &cell : test_triangulation.active_cell_iterators()) { for (unsigned int i = 0; i < GeometryInfo<dim>::vertices_per_cell; i++) { Point<2> &v = cell->vertex(i); // initial vertex auto v0 = v; // advection.set_time(time-time_step); // v = v0 + time_step * advection_field.value(v); // std::cout << v << std::endl; std::cout << v0 << std::endl; std::cout << advection_field.value(v) << std::endl; std::cout << "----------------------------------" << std::endl; GridOut grid_out; std::ofstream output("mesh-" + std::to_string(T_max) + ".vtu"); grid_out.write_vtu(test_triangulation, output); T_max = T_max - time_step; } } } } template <int dim> void Semilagrangian<dim>::run() { pcout << std::endl << "===========================================" << std::endl; pcout << "Running (with Trilinos) on " << Utilities::MPI::n_mpi_processes(mpi_communicator) << " MPI rank(s)..." << std::endl; make_grid(); copy_grid(); } } // namespace Timedependent_AdvectionDiffusionProblem #endif /* INCLUDE_SEMILAGRANGIAN_HPP_ */