Hello Everyone,
We recently upgraded from slurm-14.11.2 to slurm 14.11.7 and
now it looks like the default is to schedule 1 job per node and we would like 1
job per core. Below is a copy of our config.
Thanks!
-scz
ControlMachine=xxxxxx
ControlAddr=xxx.xxx.xxx.xxx
AuthType=auth/munge
CacheGroups=0
CryptoType=crypto/munge
DisableRootJobs=NO
MailProg=/bin/mail
MaxJobCount=100000
MpiDefault=none
ProctrackType=proctrack/pgid
ReturnToService=1
SlurmctldPidFile=/var/run/slurmctld.pid
SlurmctldPort=6817
SlurmdPidFile=/var/run/slurmd.pid
SlurmdPort=6818
SlurmdSpoolDir=/var/spool/slurmd
SlurmUser=slurm
StateSaveLocation=/site/slurm/save_state/slurmctld
SwitchType=switch/none
TaskPlugin=task/none
TopologyPlugin=topology/none
UsePAM=0
CompleteWait=0
InactiveLimit=0
KillWait=30
MinJobAge=300
SlurmctldTimeout=120
SlurmdTimeout=300
Waittime=0
DefMemPerCPU=6
FastSchedule=1
SchedulerType=sched/builtin
SchedulerPort=7321
SelectType=select/linear
PreemptMode=SUSPEND,GANG
PreemptType=preempt/partition_prio
SchedulerParameters=max_job_bf=15,interval=20
PriorityType=priority/basic
AccountingStorageType=accounting_storage/none
AccountingStoreJobComment=YES
ClusterName=xxxxxxxx
JobCompType=jobcomp/none
JobAcctGatherFrequency=30
JobAcctGatherType=jobacct_gather/linux
SlurmctldDebug=3
SlurmctldLogFile=/site/slurm/log/slurmctld.log
SlurmdDebug=3
SlurmdLogFile=/site/slurm/log/slurmd.log