#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/index_set.h>
#include <deal.II/base/mpi.h>

#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
#include <deal.II/lac/slepc_solver.h>

#include <random>

using namespace dealii;

class OneBodyHamiltonianOperator
{
public:
  /**
   * Declare type for container size.
   */
  using size_type = types::global_dof_index;

  OneBodyHamiltonianOperator(const dealii::IndexSet &a_local_row_set,
                             const uint32_t          a_my_proc,
                             const uint32_t          a_num_procs);

  /// Destructor
  ~OneBodyHamiltonianOperator();

private:
  PETScWrappers::MPI::SparseMatrix m_H1;
  DynamicSparsityPattern           m_dynamic_sparsity_pattern;
};

OneBodyHamiltonianOperator::OneBodyHamiltonianOperator(
  const IndexSet &a_local_row_set,
  const uint32_t  a_my_proc,
  const uint32_t  a_num_procs)
{
  m_dynamic_sparsity_pattern.reinit(a_local_row_set.size(),
                                    a_local_row_set.size(),
                                    a_local_row_set);

  for (uint32_t i = 0; i < a_local_row_set.n_elements(); ++i)
    {
      for (uint32_t j = 0; j < a_local_row_set.size(); ++j)
        {
          m_dynamic_sparsity_pattern.add(a_local_row_set.nth_index_in_set(i),
                                         j);
        }
    }

  m_H1.reinit(a_local_row_set,
              a_local_row_set,
              m_dynamic_sparsity_pattern,
              MPI_COMM_WORLD);

  for (uint32_t i = 0; i < a_local_row_set.n_elements(); ++i)
    {
      for (uint32_t j = 0; j < m_dynamic_sparsity_pattern.row_length(
                                 a_local_row_set.nth_index_in_set(i));
           ++j)
        {
          m_H1.add(a_local_row_set.nth_index_in_set(i),
                   m_dynamic_sparsity_pattern.column_number(
                     a_local_row_set.nth_index_in_set(i), j),
                   1); // added 1 for test value
        }
    }

  m_H1.compress(VectorOperation::add);

  SolverControl                    solver_control(5, 1e-11);
  SLEPcWrappers::SolverKrylovSchur eigensolver(solver_control);
  int                              n_eigenvalues = 5;

  std::vector<PETScWrappers::MPI::Vector> eigenvectors(
    n_eigenvalues,
    PETScWrappers::MPI::Vector(MPI_COMM_WORLD,
                               a_local_row_set.n_elements(),
                               a_local_row_set.n_elements()));

  std::default_random_engine             generator;
  std::uniform_real_distribution<double> distribution(0.0, 1.0);

  for (uint32_t i = 0; i < eigenvectors.size(); ++i)
    for (uint32_t j = 0; j < eigenvectors[i].size(); ++j)
      eigenvectors[i][j] = distribution(generator);

  for (auto &vector : eigenvectors)
    vector.compress(VectorOperation::insert);

  eigensolver.set_initial_space(eigenvectors);
  eigensolver.set_problem_type(EPS_GHEP);

  std::vector<PetscScalar> eigenvalues(n_eigenvalues);

  eigensolver.solve(m_H1,
                    eigenvalues,
                    eigenvectors,
                    eigenvectors.size()); // dies here
}

OneBodyHamiltonianOperator::~OneBodyHamiltonianOperator()
{}

void
test(const int &n_proc, const int &my_proc, const ConditionalOStream &pcout)
{
  auto     nbas      = 64;
  auto     nchannels = 2;
  IndexSet local_row_set =
    Utilities::create_evenly_distributed_partitioning(my_proc,
                                                      n_proc,
                                                      nbas * nchannels);

  OneBodyHamiltonianOperator H1(local_row_set, my_proc, n_proc);
}

int
main(int argc, char **argv)
{
  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);

  MPI_Comm   comm(MPI_COMM_WORLD);
  const auto my_proc = Utilities::MPI::this_mpi_process(comm);
  const auto n_proc  = Utilities::MPI::n_mpi_processes(comm);

  ConditionalOStream pcout(std::cout, (my_proc == 0));

  test(n_proc, my_proc, pcout);

  return 0;
}
