Hi! On Fedora/RHEL we default to ulimit -Su 1024 (just soft-limit, to better avoid non-root for bombs apparently). goroutines.go test by default attempts to spawn 10000 threads, which means not only that goroutines.go test fails (no big deal), but that random other tests that happen to be tested at the same time (I'm testing with make -j48) sometimes fail too.
The following patch makes sure the tests spawns at most $[`ulimit -u`/4] threads on Linux native, and just limits number of threads to 64 for cross-testing and other OSes. Tested on x86_64-linux with various values of ulimit -Su. Ok for trunk? 2011-03-08 Jakub Jelinek <ja...@redhat.com> * go.test/go-test.exp: For goroutines.go test pass max($[`ulimit -u`/4], 10000) as first argument, or 64 as a safe default. --- gcc/testsuite/go.test/go-test.exp.jj 2011-01-15 11:26:32.000000000 +0100 +++ gcc/testsuite/go.test/go-test.exp 2011-03-08 13:23:36.078402148 +0100 @@ -265,6 +265,23 @@ proc go-gc-tests { } { verbose -log "$test: go_execute_args is $go_execute_args" set index [string last " $progargs" $test_line] set test_line [string replace $test_line $index end] + } elseif { [string match "*go.test/test/chan/goroutines.go" $test] } { + # goroutines.go spawns by default 10000 threads, which is too much + # for many OSes. + set go_execute_args 64 + if { [ishost "*-linux*" ] && ![is_remote host] && ![is_remote target] } { + # On Linux when using low ulimit -u limit, use maximum of + # a quarter of that limit and 10000 + set go_execute_args [lindex [remote_exec host {sh -c ulimit\ -u}] 1] + if { [string is integer -strict $go_execute_args] } { + set go_execute_args [expr $go_execute_args / 4] + if { $go_execute_args > 10000 } { set go_execute_args 10000 } + if { $go_execute_args < 16 } { set go_execute_args 16 } + } else { + set go_execute_args 64 + } + } + verbose -log "$test: go_execute_args is $go_execute_args" } if { $test_line == "// \$G \$D/\$F\.go && \$L \$F\.\$A && \./\$A\.out >tmp.go &&" \ Jakub