Below are all places that could contain mca related settings.

grep -i mca /usr/mpi/gcc/openmpi-1.4-qlc/etc/openmpi-mca-params.conf
# This is the default system-wide MCA parameters defaults file.
# Specifically, the MCA parameter "mca_param_files" defaults to a
# "$HOME/.openmpi/mca-params.conf:$sysconf/openmpi-mca-params.conf"
# mca_param_files is not changed, this file is used to set system-wide
# MCA parameters.  This file can therefore be used to set system-wide
# default MCA parameters for all users.  Of course, users can override
# for setting system-specific MCA parameters for those users who don't
# during their startup to determine what default values for MCA
# The format is straightforward: one per line, mca_param_name =
# See "ompi_info --param all all" for a full listing of Open MPI MCA

ls -l $HOME/.openmpi/mca-params.conf
ls: /home/dave/.openmpi/mca-params.conf: No such file or directory

/usr/mpi/gcc/openmpi-1.4-qlc/bin/mpirun -host c005 -np 1 env|grep -i mca
OMPI_MCA_orte_precondition_transports=db26e173df59af40-3609f2dcfea5746b
OMPI_MCA_orte_local_daemon_uri=999882752.0;tcp://10.1.5.5:60361
OMPI_MCA_orte_hnp_uri=999882752.0;tcp://10.1.5.5:60361
OMPI_MCA_mpi_yield_when_idle=0
OMPI_MCA_orte_app_num=0
OMPI_MCA_ess=env
OMPI_MCA_orte_ess_num_procs=1
OMPI_MCA_orte_ess_jobid=999882753
OMPI_MCA_orte_ess_vpid=0

On Wed, Jul 28, 2010 at 10:50 PM, Ralph Castain <r...@open-mpi.org> wrote:
> Something doesn't add up - the default for ompi is to -not- bind. Check your 
> default mca param file and your environment. Do you have any mca params set 
> in them?
>
>
> On Jul 28, 2010, at 9:40 PM, David Akin wrote:
>
>> Here's the exact command I'm running when all threads *are* pinned to
>> a single core:
>>
>> /usr/mpi/gcc/openmpi-1.4-qlc/bin/mpirun -host c005,c006 -np 2
>> OMP_NUM_THREADS=4 hybrid4.gcc
>>
>> Can anyone verify they have the same issue?
>>
>> On Wed, Jul 28, 2010 at 7:52 PM, Ralph Castain <r...@open-mpi.org> wrote:
>>> How are you running it when the threads are all on one core?
>>>
>>> If you are specifying --bind-to-core, then of course all the threads will 
>>> be on one core since we bind the process (not the thread). If you are 
>>> specifying -mca mpi_paffinity_alone 1, then the same behavior results.
>>>
>>> Generally, if you want to bind threads, the only way to do it is with a 
>>> rank file. We -might- figure out a way to provide an interface for 
>>> thread-level binding, but I'm not sure about that right now. As things 
>>> stand, OMPI has no visibility into the fact that your app spawned threads.
>>>
>>>
>>> On Jul 28, 2010, at 5:47 PM, David Akin wrote:
>>>
>>>> All,
>>>> I'm trying to get the OpenMP portion of the code below to run
>>>> multicore on a couple of 8 core nodes.
>>>>
>>>> Good news: multiple threads are being spawned on each node in the run.
>>>> Bad news: each of the threads only runs on a single core, leaving 7
>>>> cores basically idle.
>>>> Sorta good news: if I provide a rank file I get the threads running on
>>>> different cores within each node (PITA.
>>>>
>>>> Here's the first lines of output.
>>>>
>>>> /usr/mpi/gcc/openmpi-1.4-qlc/bin/mpirun -host c005,c006 -np 2 -rf
>>>> rank.file -x OMP_NUM_THREADS=4 hybrid4.gcc
>>>>
>>>> Hello from thread 2 out of 4 from process 1 out of 2 on c006.local
>>>> another parallel region:       name:c006.local MPI_RANK_ID=1 
>>>> OMP_THREAD_ID=2
>>>> Hello from thread 3 out of 4 from process 1 out of 2 on c006.local
>>>> another parallel region:       name:c006.local MPI_RANK_ID=1 
>>>> OMP_THREAD_ID=3
>>>> Hello from thread 1 out of 4 from process 1 out of 2 on c006.local
>>>> another parallel region:       name:c006.local MPI_RANK_ID=1 
>>>> OMP_THREAD_ID=1
>>>> Hello from thread 1 out of 4 from process 0 out of 2 on c005.local
>>>> another parallel region:       name:c005.local MPI_RANK_ID=0 
>>>> OMP_THREAD_ID=1
>>>> Hello from thread 3 out of 4 from process 0 out of 2 on c005.local
>>>> Hello from thread 2 out of 4 from process 0 out of 2 on c005.local
>>>> another parallel region:       name:c005.local MPI_RANK_ID=0 
>>>> OMP_THREAD_ID=3
>>>> another parallel region:       name:c005.local MPI_RANK_ID=0 
>>>> OMP_THREAD_ID=2
>>>> Hello from thread 0 out of 4 from process 0 out of 2 on c005.local
>>>> another parallel region:       name:c005.local MPI_RANK_ID=0 
>>>> OMP_THREAD_ID=0
>>>> Hello from thread 0 out of 4 from process 1 out of 2 on c006.local
>>>> another parallel region:       name:c006.local MPI_RANK_ID=1 
>>>> OMP_THREAD_ID=0
>>>> another parallel region:       name:c005.local MPI_RANK_ID=0 
>>>> OMP_THREAD_ID=3
>>>> another parallel region:       name:c005.local MPI_RANK_ID=0 
>>>> OMP_THREAD_ID=2
>>>> another parallel region:       name:c005.local MPI_RANK_ID=0 
>>>> OMP_THREAD_ID=0
>>>> another parallel region:       name:c006.local MPI_RANK_ID=1 
>>>> OMP_THREAD_ID=3
>>>> another parallel region:       name:c005.local MPI_RANK_ID=0 
>>>> OMP_THREAD_ID=3
>>>> another parallel region:       name:c005.local MPI_RANK_ID=0 
>>>> OMP_THREAD_ID=2
>>>> another parallel region:       name:c006.local MPI_RANK_ID=1 
>>>> OMP_THREAD_ID=0
>>>> another parallel region:       name:c006.local MPI_RANK_ID=1 
>>>> OMP_THREAD_ID=1
>>>> .
>>>> .
>>>> .
>>>>
>>>> Here's the simple code:
>>>> #include <stdio.h>
>>>> #include "mpi.h"
>>>> #include <omp.h>
>>>>
>>>> int main(int argc, char *argv[]) {
>>>>  int numprocs, rank, namelen;
>>>>  char processor_name[MPI_MAX_PROCESSOR_NAME];
>>>>  int iam = 0, np = 1;
>>>>  char name[MPI_MAX_PROCESSOR_NAME];   /* MPI_MAX_PROCESSOR_NAME ==
>>>> 128         */
>>>>  int O_ID;                            /* OpenMP thread ID
>>>>         */
>>>>  int M_ID;                            /* MPI rank ID
>>>>         */
>>>>  int rtn_val;
>>>>
>>>>  MPI_Init(&argc, &argv);
>>>>  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
>>>>  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
>>>>  MPI_Get_processor_name(processor_name, &namelen);
>>>>
>>>>  #pragma omp parallel default(shared) private(iam, np,O_ID)
>>>>  {
>>>>    np = omp_get_num_threads();
>>>>    iam = omp_get_thread_num();
>>>>    printf("Hello from thread %d out of %d from process %d out of %d on 
>>>> %s\n",
>>>>           iam, np, rank, numprocs, processor_name);
>>>>    int i=0;
>>>>    int j=0;
>>>>    double counter=0;
>>>>    for(i =0;i<99999999;i++)
>>>>            {
>>>>             O_ID = omp_get_thread_num();          /* get OpenMP
>>>> thread ID                 */
>>>>             MPI_Get_processor_name(name,&namelen);
>>>>             rtn_val = MPI_Comm_rank(MPI_COMM_WORLD,&M_ID);
>>>>             printf("another parallel region:       name:%s
>>>> MPI_RANK_ID=%d OMP_THREAD_ID=%d\n", name,M_ID,O_ID);
>>>>             for(j = 0;j<999999999;j++)
>>>>              {
>>>>               counter=counter+i;
>>>>              }
>>>>            }
>>>>
>>>>  }
>>>>
>>>>  MPI_Finalize();
>>>>
>>>> }
>>>> _______________________________________________
>>>> users mailing list
>>>> us...@open-mpi.org
>>>> http://www.open-mpi.org/mailman/listinfo.cgi/users
>>>
>>>
>>> _______________________________________________
>>> users mailing list
>>> us...@open-mpi.org
>>> http://www.open-mpi.org/mailman/listinfo.cgi/users
>>>
>>
>> _______________________________________________
>> users mailing list
>> us...@open-mpi.org
>> http://www.open-mpi.org/mailman/listinfo.cgi/users
>
>
> _______________________________________________
> users mailing list
> us...@open-mpi.org
> http://www.open-mpi.org/mailman/listinfo.cgi/users
>

Reply via email to