I was able to reproduce this behaviour with the following code (also 
attached); the CMakeLists file is also attached. The code hangs after 
printing 'Scaled variable 0'.

Let me mention that I have used a different algorithm to obtain locally 
relevant dofs, rather than directly using the function from DoFTools. My 
algorithm is as follows:

Loop over owned interior cells
  Loop over faces
  If neighbor cell is ghost:
    Add all neighbors dofs on this face to relevant dofs

With this algorithm, relevant dofs are not all the ghost cell's dofs, but 
only those lying on a subdomain interface. This is implemented in lines 
49-69 in the file main.cc. I verified that this algorithm works correctly 
for a small mesh, I don't presume this is wrong.

#include <deal.II/dofs/dof_handler.h>
#include <deal.II/dofs/dof_accessor.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/fe/mapping_q1.h>
#include <deal.II/fe/fe_dgq.h>
#include <deal.II/fe/fe_face.h>
#include <deal.II/grid/grid_generator.h>

#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/lac/generic_linear_algebra.h>

#include <array>
#include <vector>

/**
 * See README file for details
 */
using namespace dealii;
namespace LA
{
        using namespace ::LinearAlgebraPETSc;
}

int main(int argc, char **argv)
{
        Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);

        parallel::distributed::Triangulation<2> triang(MPI_COMM_WORLD);
        GridGenerator::hyper_cube(triang);
        triang.refine_global(5);
        const MappingQ1<2> mapping;
        const uint degree = 1;
        FE_DGQ<2> fe(degree);
        FE_FaceQ<2> fe_face(degree);
        const std::array<uint, GeometryInfo<2>::faces_per_cell> 
face_first_dof{0, degree, 0, (degree+1)*degree};
        const std::array<uint, GeometryInfo<2>::faces_per_cell> 
face_dof_increment{degree+1, degree+1, 1, 1};
        
        DoFHandler<2> dof_handler(triang);
        dof_handler.distribute_dofs(fe);
        IndexSet locally_owned_dofs = dof_handler.locally_owned_dofs();
        IndexSet locally_relevant_dofs;
        
        locally_relevant_dofs = locally_owned_dofs; // initialise with 
owned dofs
        uint face_id, face_id_neighbor, i; // face ids wrt owner and 
neighbor
        std::vector<uint> dof_ids_neighbor(fe.dofs_per_cell);
        for(auto &cell: dof_handler.active_cell_iterators()){
                if(!(cell->is_locally_owned())) continue;
                for(face_id=0; face_id<GeometryInfo<2>::faces_per_cell; 
face_id++){
                        if(cell->face(face_id)->at_boundary()) continue;
                        if(cell->neighbor(face_id)->is_ghost()){
                                // current face lies at subdomain interface
                                // add dofs on this face (wrt neighbor) to 
locally relevant dofs
                                
cell->neighbor(face_id)->get_dof_indices(dof_ids_neighbor);

                                face_id_neighbor = 
cell->neighbor_of_neighbor(face_id);
                                for(i=0; i<fe_face.dofs_per_face; i++){
                                        locally_relevant_dofs.add_index(
                                                dof_ids_neighbor[
                                                
face_first_dof[face_id_neighbor] +
                                                
i*face_dof_increment[face_id_neighbor]
                                                ]
                                        );
                                } // loop over face dofs
                        }
                } // loop over internal faces
        } // loop over locally owned cells
        
        ConditionalOStream pcout(std::cout, 
(Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)==0));
        
        pcout << "Yosh\n";
        
        std::array<LA::MPI::Vector, 4> vecs;
        std::array<LA::MPI::Vector, 4> gh_vecs;
        LA::MPI::Vector scaler;
        for(uint var=0; var<4; var++){
                vecs[var].reinit(locally_owned_dofs, MPI_COMM_WORLD);
                gh_vecs[var].reinit(locally_owned_dofs, 
locally_relevant_dofs, MPI_COMM_WORLD);
        }
        scaler.reinit(locally_owned_dofs, MPI_COMM_WORLD);
        
        for(uint i: locally_owned_dofs){
                scaler[i] = 1.0*i;
        }
        std::vector<uint> dof_ids(fe.dofs_per_cell);
        
        // setting ops
        for(auto &cell: dof_handler.active_cell_iterators()){
                if(!(cell->is_locally_owned())) continue;
                
                cell->get_dof_indices(dof_ids);
                
                pcout << "\tCell " << cell->index() << "\n";
                pcout << "\t\tSetting\n";
                for(uint var=0; var<4; var++){
                        for(uint i: dof_ids){
                                vecs[var][i] = 1.0*i;
                        }
                        vecs[var].compress(VectorOperation::insert);
                }
                
                // addition ops
                pcout << "\t\tAdding\n";
                for(uint var=0; var<4; var++){
                        for(uint i: dof_ids){
                                vecs[var][i] += 1.0*i;
                        }
                        vecs[var].compress(VectorOperation::add);
                }
                
                // more ops
                pcout << "\t\tMore additions\n";
                for(uint var=0; var<4; var++){
                        for(uint i: dof_ids){
                                vecs[var][i] += -5.0*i;
                        }
                        vecs[var].compress(VectorOperation::add);
                }
        } // loop over owned cells
        // scaling and communicating
        pcout << "Scaling and communicating\n";
        for(uint var=0; var<4; var++){
                vecs[var].scale(scaler);
                pcout << "Scaled variable " << var << "\n";
                gh_vecs[var] = vecs[var];
                pcout << "Communicated variable " << var << "\n";
        }
        pcout << "Completed all\n";
        
        return 0;
}

-- 
The deal.II project is located at http://www.dealii.org/
For mailing list/forum options, see 
https://groups.google.com/d/forum/dealii?hl=en
--- 
You received this message because you are subscribed to the Google Groups 
"deal.II User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to dealii+unsubscr...@googlegroups.com.
To view this discussion on the web visit 
https://groups.google.com/d/msgid/dealii/e77c8e03-3c6c-461e-96c7-ee7c1a53c3a1%40googlegroups.com.
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12)
FIND_PACKAGE(deal.II 9.1.0 QUIET
        HINTS ${deal.II_DIR} ${DEAL_II_DIR} ../ ../../ $ENV{DEAL_II_DIR}
)
IF(NOT ${deal.II_FOUND})
        MESSAGE(FATAL_ERROR "\n"
        "*** Could not locate a (sufficiently recent) version of deal.II. 
***\n\n"
        "You may want to either pass a flag -DDEAL_II_DIR=/path/to/deal.II to 
cmake\n"
        "or set an environment variable \"DEAL_II_DIR\" that contains this 
path."
)
ENDIF()

DEAL_II_INITIALIZE_CACHED_VARIABLES()

project(issue0)

ADD_EXECUTABLE(issue0.out main.cc)

DEAL_II_SETUP_TARGET(issue0.out)

# for make debug
ADD_CUSTOM_TARGET(debug
        COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Debug ${CMAKE_SOURCE_DIR}
        COMMAND ${CMAKE_COMMAND} --build ${CMAKE_BINARY_DIR} --target all
        COMMENT "Switch CMAKE_BUILD_TYPE to Debug"
)

# for make release
ADD_CUSTOM_TARGET(release
        COMMAND ${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE=Release ${CMAKE_SOURCE_DIR}
        COMMAND ${CMAKE_COMMAND} --build ${CMAKE_BINARY_DIR} --target all
        COMMENT "Switch CMAKE_BUILD_TYPE to Release"
)
#include <deal.II/dofs/dof_handler.h>
#include <deal.II/dofs/dof_accessor.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/fe/mapping_q1.h>
#include <deal.II/fe/fe_dgq.h>
#include <deal.II/fe/fe_face.h>
#include <deal.II/grid/grid_generator.h>

#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/lac/generic_linear_algebra.h>

#include <array>
#include <vector>

/**
 * See README file for details
 */
using namespace dealii;
namespace LA
{
        using namespace ::LinearAlgebraPETSc;
}

int main(int argc, char **argv)
{
        Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);

        parallel::distributed::Triangulation<2> triang(MPI_COMM_WORLD);
        GridGenerator::hyper_cube(triang);
        triang.refine_global(5);
        const MappingQ1<2> mapping;
        const uint degree = 1;
        FE_DGQ<2> fe(degree);
        FE_FaceQ<2> fe_face(degree);
        const std::array<uint, GeometryInfo<2>::faces_per_cell> face_first_dof{0, degree, 0, (degree+1)*degree};
        const std::array<uint, GeometryInfo<2>::faces_per_cell> face_dof_increment{degree+1, degree+1, 1, 1};
        
        DoFHandler<2> dof_handler(triang);
        dof_handler.distribute_dofs(fe);
        IndexSet locally_owned_dofs = dof_handler.locally_owned_dofs();
        IndexSet locally_relevant_dofs;
        
        locally_relevant_dofs = locally_owned_dofs; // initialise with owned dofs
        uint face_id, face_id_neighbor, i; // face ids wrt owner and neighbor
        std::vector<uint> dof_ids_neighbor(fe.dofs_per_cell);
        for(auto &cell: dof_handler.active_cell_iterators()){
                if(!(cell->is_locally_owned())) continue;
                for(face_id=0; face_id<GeometryInfo<2>::faces_per_cell; face_id++){
                        if(cell->face(face_id)->at_boundary()) continue;
                        if(cell->neighbor(face_id)->is_ghost()){
                                // current face lies at subdomain interface
                                // add dofs on this face (wrt neighbor) to locally relevant dofs
                                cell->neighbor(face_id)->get_dof_indices(dof_ids_neighbor);

                                face_id_neighbor = cell->neighbor_of_neighbor(face_id);
                                for(i=0; i<fe_face.dofs_per_face; i++){
                                        locally_relevant_dofs.add_index(
                                                dof_ids_neighbor[
                                                face_first_dof[face_id_neighbor] +
                                                i*face_dof_increment[face_id_neighbor]
                                                ]
                                        );
                                } // loop over face dofs
                        }
                } // loop over internal faces
        } // loop over locally owned cells
        
        ConditionalOStream pcout(std::cout, (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)==0));
        
        pcout << "Yosh\n";
        
        std::array<LA::MPI::Vector, 4> vecs;
        std::array<LA::MPI::Vector, 4> gh_vecs;
        LA::MPI::Vector scaler;
        for(uint var=0; var<4; var++){
                vecs[var].reinit(locally_owned_dofs, MPI_COMM_WORLD);
                gh_vecs[var].reinit(locally_owned_dofs, locally_relevant_dofs, MPI_COMM_WORLD);
        }
        scaler.reinit(locally_owned_dofs, MPI_COMM_WORLD);
        
        for(uint i: locally_owned_dofs){
                scaler[i] = 1.0*i;
        }
        std::vector<uint> dof_ids(fe.dofs_per_cell);
        
        // setting ops
        for(auto &cell: dof_handler.active_cell_iterators()){
                if(!(cell->is_locally_owned())) continue;
                
                cell->get_dof_indices(dof_ids);
                
                pcout << "\tCell " << cell->index() << "\n";
                pcout << "\t\tSetting\n";
                for(uint var=0; var<4; var++){
                        for(uint i: dof_ids){
                                vecs[var][i] = 1.0*i;
                        }
                        vecs[var].compress(VectorOperation::insert);
                }
                
                // addition ops
                pcout << "\t\tAdding\n";
                for(uint var=0; var<4; var++){
                        for(uint i: dof_ids){
                                vecs[var][i] += 1.0*i;
                        }
                        vecs[var].compress(VectorOperation::add);
                }
                
                // more ops
                pcout << "\t\tMore additions\n";
                for(uint var=0; var<4; var++){
                        for(uint i: dof_ids){
                                vecs[var][i] += -5.0*i;
                        }
                        vecs[var].compress(VectorOperation::add);
                }
        } // loop over owned cells
        // scaling and communicating
        pcout << "Scaling and communicating\n";
        for(uint var=0; var<4; var++){
                vecs[var].scale(scaler);
                pcout << "Scaled variable " << var << "\n";
                gh_vecs[var] = vecs[var];
                pcout << "Communicated variable " << var << "\n";
        }
        pcout << "Completed all\n";
}

Reply via email to