#====================================================================== # Generic OMPI core performance testing template configuration #====================================================================== [MTT] description = [testbake] trial = 1 min_disk_free = 5000
# Put other values here as relevant to your environment. hostfile = /root/hostfile max_np = 8 textwrap = 76 #drain_timeout = 5 scratch = /usr/local/hamster/mtt-build logfile =/usr/local/hamster/m/log.file #====================================================================== # MPI details #====================================================================== [MPI Details: OMPI] exec = &test_prefix_pretty()/bin/mpirun @hosts@ -np &test_np() @mca@&test_executable_abspath() &test_argv() hosts = &if(&have_hostfile(), "--hostfile " . &hostfile(),&if(&have_hostlist(), "--host " . &hostlist(), "")) #====================================================================== # MPI get phase #====================================================================== [MPI get: OMPI] mpi_details = OMPI module = Download download_url = http://hdsh020.lss.emc.com/hamster/mtt/openmpi-1.7.2.tar.gz download_version = 1.7.2 #====================================================================== # Install MPI phase #====================================================================== [MPI install: OMPI/GNU-standard] mpi_get = OMPI mpi_details = OMPI save_stdout_on_success = 1 merge_stdout_stderr = 1 module = OMPI ompi_make_all_arguments = -j 2 ompi_compiler_name = gnu ompi_compiler_version = &get_gcc_version() ompi_configure_arguments = <<EOT --with-devel-headers -disable-heterogeneous EOT ompi_autogen = 0 #====================================================================== # Test get phase #====================================================================== [Test get: testsuite] module = Download download_url = http://hdsh020.lss.emc.com/hamster/mtt/osu-micro-benchmarks-4.2.tar.gz #====================================================================== # Test build phase #====================================================================== [Test build: testsuite] test_get = testsuite save_stdout_on_success = 1 merge_stdout_stderr = 1 stderr_save_lines = 1000 module = Shell shell_build_command = <<EOT make CC=&test_prefix_pretty()/bin/mpicc EOT #====================================================================== # Test Run phase #====================================================================== [Test run] pass = &and(&cmd_wifexited(), &eq(&cmd_wexitstatus(), 0)) #timeout = 5:00 timeout = &max(10, &multiply(50, &test_np())) save_stdout_on_pass = 1 merge_stdout_stderr = 1 stdout_save_lines = 100 stderr_save_lines = 100 [Test run: testsuite] include_section = Test run test_build = testsuite specify_module = Simple #simple_pass:tests = osu-micro-benchmarks-4.2/mpi/pt2pt/osu_bw #simple_pass:tests = osu_bw osu_latency osu_bibw simple_pass:tests = &find_executables("mpi/pt2pt") #np = &env_max_hosts() np = 2 #====================================================================== # Reporter #====================================================================== [Reporter: text file backup] module = TextFile textfile_filename = report-$phase-$section-$mpi_name-$mpi_version.txt # User-defined report headers/footers textfile_summary_header = <<EOT hostname: &shell("hostname") uname: &shell("uname -a") who am i: &shell("who am i") EOT textfile_summary_footer = textfile_detail_header = Debug Report textfile_detail_footer = textfile_textwrap = 78 # Send digested summary of mtt execution by email email_to = j...@gopivotal.com email_subject = OMPI test has completed, status: $overall_mtt_status email_detailed_report = 1 email_footer = <<EOT Test Scratch Directory is &scratch_root() EOT On Wed, Dec 18, 2013 at 9:58 PM, Jeff Squyres (jsquyres) <jsquy...@cisco.com > wrote: > (adding the mtt-users mailing list; see > http://www.open-mpi.org/community/lists/mtt.php) > > To use MTT, you do not need to be a core member of Open MPI. The wiki > page you listed simply describes how core Open MPI members use MTT. > > If you want to use MTT, it is easiest to first setup the client to do some > simple test and output to a text file (vs. submitting results to a server). > Get that working first. Then expand the client to run more and more tests > (while still outputting results to text files). You can run any tests you > want -- there's nothing special about the MPI tests that we use internally > to the Open MPI project. You just need to be able to describe them in the > MTT client INI file, and specify conditions for passing/failing/skipping. > > Once you have all the tests working in the way that you want, you can > choose to install an MTT database server if you wish. This will give you > web pages like we have at http://mtt.open-mpi.org/. > > > > > On Dec 16, 2013, at 11:07 PM, jimmy cao <j...@gopivotal.com> wrote: > > > I am trying to deploy MTT in my cluster. > > From the webpage (https://svn.open-mpi.org/trac/mtt/wiki/OMPITesting), > it seems only on how to install MTT client, it also needs Open MPI core > membership. > > > > Is there some guide on how to install MTT on my cluster (both server and > client)? > > > > Best, > > Jimmy > > > > > > > > > -- > Jeff Squyres > jsquy...@cisco.com > For corporate legal information go to: > http://www.cisco.com/web/about/doing_business/legal/cri/ > > _______________________________________________ > mtt-users mailing list > mtt-us...@open-mpi.org > http://www.open-mpi.org/mailman/listinfo.cgi/mtt-users > -- Best Regards! Haijun Cao (曹海军) ----------------------------------------------------------- Tel: +86-188 1787 9959 Email:amao....@gmail.com -----------------------------------------------------------