Hello,

I just installed PyFR 0.8.0.

The 2d euler vortex example is working perfectly with Intel MPI 15, but I 
am having issues trying to run it with OpenMPI. (I tried with
OpenMPI 1.8.5 and 1.8.1).

This is the error I am getting:

bash-4.2$ pyfr import euler_vortex_2d.msh euler_vortex_2d.pyfrm
bash-4.2$ pyfr partition 2 euler_vortex_2d.pyfrm .
bash-4.2$ mpirun -n 2 pyfr run -p euler_vortex_2d.pyfrm euler_vortex_2d.ini
Traceback (most recent call last):
  File "/ssd/apps/python-3.4.3/bin/pyfr", line 9, in <module>
    load_entry_point('pyfr==0.8.0', 'console_scripts', 'pyfr')()
  File 
"/ssd/apps/python-3.4.3/lib/python3.4/site-packages/mpmath/ctx_mp.py", line 
1301, in g
    return f(*args, **kwargs)
  File 
"/ssd/apps/python-3.4.3/lib/python3.4/site-packages/pyfr-0.8.0-py3.4.egg/pyfr/scripts/main.py",
 
line 126, in main
  File 
"/ssd/apps/python-3.4.3/lib/python3.4/site-packages/pyfr-0.8.0-py3.4.egg/pyfr/scripts/main.py",
 
line 247, in process_run
  File 
"/ssd/apps/python-3.4.3/lib/python3.4/site-packages/pyfr-0.8.0-py3.4.egg/pyfr/scripts/main.py",
 
line 228, in _process_common
  File 
"/ssd/apps/python-3.4.3/lib/python3.4/site-packages/pyfr-0.8.0-py3.4.egg/pyfr/rank_allocator.py",
 
line 14, in get_rank_allocation
  File 
"/ssd/apps/python-3.4.3/lib/python3.4/site-packages/pyfr-0.8.0-py3.4.egg/pyfr/rank_allocator.py",
 
line 32, in __init__
RuntimeError: Mesh has 2 partitions but running with 1 MPI ranks
Traceback (most recent call last):
  File "/ssd/apps/python-3.4.3/bin/pyfr", line 9, in <module>
    load_entry_point('pyfr==0.8.0', 'console_scripts', 'pyfr')()
  File 
"/ssd/apps/python-3.4.3/lib/python3.4/site-packages/mpmath/ctx_mp.py", line 
1301, in g
    return f(*args, **kwargs)
  File 
"/ssd/apps/python-3.4.3/lib/python3.4/site-packages/pyfr-0.8.0-py3.4.egg/pyfr/scripts/main.py",
 
line 126, in main
  File 
"/ssd/apps/python-3.4.3/lib/python3.4/site-packages/pyfr-0.8.0-py3.4.egg/pyfr/scripts/main.py",
 
line 247, in process_run
  File 
"/ssd/apps/python-3.4.3/lib/python3.4/site-packages/pyfr-0.8.0-py3.4.egg/pyfr/scripts/main.py",
 
line 228, in _process_common
  File 
"/ssd/apps/python-3.4.3/lib/python3.4/site-packages/pyfr-0.8.0-py3.4.egg/pyfr/rank_allocator.py",
 
line 14, in get_rank_allocation
  File 
"/ssd/apps/python-3.4.3/lib/python3.4/site-packages/pyfr-0.8.0-py3.4.egg/pyfr/rank_allocator.py",
 
line 32, in __init__
RuntimeError: Mesh has 2 partitions but running with 1 MPI ranks
-------------------------------------------------------
Primary job  terminated normally, but 1 process returned
a non-zero exit code.. Per user-direction, the job has been aborted.
-------------------------------------------------------
--------------------------------------------------------------------------
mpirun detected that one or more processes exited with non-zero status, 
thus causing
the job to be terminated. The first process to do so was:

  Process name: [[41831,1],0]
  Exit code:    1
--------------------------------------------------------------------------

It is weird it says it is "running with 1 MPI ranks". These are the 
variables defined when mpirun happens:

declare -x MPIHOME="/ssd/apps/openmpi-1.8.1-intel15"
declare -x OMPI_APP_CTX_NUM_PROCS="2"
declare -x OMPI_COMMAND="test.sh"
declare -x OMPI_COMM_WORLD_LOCAL_RANK="0"
declare -x OMPI_COMM_WORLD_LOCAL_SIZE="2"
declare -x OMPI_COMM_WORLD_NODE_RANK="0"
declare -x OMPI_COMM_WORLD_RANK="0"
declare -x OMPI_COMM_WORLD_SIZE="2"
declare -x 
OMPI_FILE_LOCATION="/tmp/openmpi-sessions-prestrepo@goethe-0_0/46108/1/0"
declare -x OMPI_FIRST_RANKS="0"
declare -x OMPI_MCA_db="^pmi"
declare -x OMPI_MCA_ess="env"
declare -x OMPI_MCA_ess_base_jobid="3021733889"
declare -x OMPI_MCA_ess_base_vpid="0"
declare -x OMPI_MCA_grpcomm="^pmi"
declare -x 
OMPI_MCA_initial_wdir="/ssd/home/prestrepo/PyFR-0.8.0/examples/my-euler_vortex_2d"
declare -x OMPI_MCA_mpi_yield_when_idle="0"
declare -x OMPI_MCA_orte_app_num="0"
declare -x OMPI_MCA_orte_bound_at_launch="1"
declare -x OMPI_MCA_orte_ess_node_rank="0"
declare -x OMPI_MCA_orte_ess_num_procs="2"
declare -x 
OMPI_MCA_orte_hnp_uri="3021733888.0;tcp://128.210.9.6,192.168.2.2:40127"
declare -x 
OMPI_MCA_orte_local_daemon_uri="3021733888.0;tcp://128.210.9.6,192.168.2.2:40127"
declare -x OMPI_MCA_orte_num_nodes="1"
declare -x OMPI_MCA_orte_num_restarts="0"
declare -x OMPI_MCA_orte_peer_fini_barrier_id="2"
declare -x OMPI_MCA_orte_peer_init_barrier_id="1"
declare -x OMPI_MCA_orte_peer_modex_id="0"
declare -x 
OMPI_MCA_orte_precondition_transports="6dc9667b72b7564c-f67f87f2f974c41a"
declare -x OMPI_MCA_orte_tmpdir_base="/tmp"
declare -x OMPI_MCA_pubsub="^pmi"
declare -x OMPI_MCA_shmem_RUNTIME_QUERY_hint="mmap"
declare -x OMPI_NUM_APP_CTX="1"
declare -x OMPI_UNIVERSE_SIZE="1"
declare -x MPIHOME="/ssd/apps/openmpi-1.8.1-intel15"
declare -x OMPI_APP_CTX_NUM_PROCS="2"
declare -x OMPI_COMMAND="test.sh"
declare -x OMPI_COMM_WORLD_LOCAL_RANK="1"
declare -x OMPI_COMM_WORLD_LOCAL_SIZE="2"
declare -x OMPI_COMM_WORLD_NODE_RANK="1"
declare -x OMPI_COMM_WORLD_RANK="1"
declare -x OMPI_COMM_WORLD_SIZE="2"
declare -x 
OMPI_FILE_LOCATION="/tmp/openmpi-sessions-prestrepo@goethe-0_0/46108/1/1"
declare -x OMPI_FIRST_RANKS="0"
declare -x OMPI_MCA_db="^pmi"
declare -x OMPI_MCA_ess="env"
declare -x OMPI_MCA_ess_base_jobid="3021733889"
declare -x OMPI_MCA_ess_base_vpid="1"
declare -x OMPI_MCA_grpcomm="^pmi"
declare -x 
OMPI_MCA_initial_wdir="/ssd/home/prestrepo/PyFR-0.8.0/examples/my-euler_vortex_2d"
declare -x OMPI_MCA_mpi_yield_when_idle="0"
declare -x OMPI_MCA_orte_app_num="0"
declare -x OMPI_MCA_orte_bound_at_launch="1"
declare -x OMPI_MCA_orte_ess_node_rank="1"
declare -x OMPI_MCA_orte_ess_num_procs="2"
declare -x 
OMPI_MCA_orte_hnp_uri="3021733888.0;tcp://128.210.9.6,192.168.2.2:40127"
declare -x 
OMPI_MCA_orte_local_daemon_uri="3021733888.0;tcp://128.210.9.6,192.168.2.2:40127"
declare -x OMPI_MCA_orte_num_nodes="1"
declare -x OMPI_MCA_orte_num_restarts="0"
declare -x OMPI_MCA_orte_peer_fini_barrier_id="2"
declare -x OMPI_MCA_orte_peer_init_barrier_id="1"
declare -x OMPI_MCA_orte_peer_modex_id="0"
declare -x 
OMPI_MCA_orte_precondition_transports="6dc9667b72b7564c-f67f87f2f974c41a"
declare -x OMPI_MCA_orte_tmpdir_base="/tmp"
declare -x OMPI_MCA_pubsub="^pmi"
declare -x OMPI_MCA_shmem_RUNTIME_QUERY_hint="mmap"
declare -x OMPI_NUM_APP_CTX="1"
declare -x OMPI_UNIVERSE_SIZE="1"

Thank you in advance for your help.

Pablo Restrepo.

-- 
You received this message because you are subscribed to the Google Groups "PyFR 
Mailing List" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
To post to this group, send an email to [email protected].
Visit this group at http://groups.google.com/group/pyfrmailinglist.
For more options, visit https://groups.google.com/d/optout.

Reply via email to