In perl.git, the branch blead has been updated

<http://perl5.git.perl.org/perl.git/commitdiff/90d141c4a9322a9b132806feee6144ea36c9c7fb?hp=07308ed1589cc2f7837b5d3a1303d200a49b9338>

- Log -----------------------------------------------------------------
commit 90d141c4a9322a9b132806feee6144ea36c9c7fb
Merge: 07308ed 7e71d4a
Author: David Mitchell <[email protected]>
Date:   Sun Oct 26 16:54:08 2014 +0000

    [MERGE] add t/perf/ hierarchy
    
    Add some new files that help with testing and measuring
    performance-related issues.

commit 7e71d4a496397f730c66926f409f9fde1bd11448
Author: David Mitchell <[email protected]>
Date:   Tue Oct 21 15:44:44 2014 +0100

    fix 't/TEST -benchmark'
    
    This has never worked, as it would look for t/benchmark/*.t files in
    the wrong place.

M       t/TEST

commit c7f294b6d22e1201c9d1f826a73160fe467274a0
Author: David Mitchell <[email protected]>
Date:   Tue Oct 21 15:43:01 2014 +0100

    add note about t/perf/ to t/README

M       t/README

commit 24fb648de5857b382fd223b97cbad437c5b724b1
Author: David Mitchell <[email protected]>
Date:   Tue Oct 21 15:26:08 2014 +0100

    add t/perf/benchmarks, t/perf/benchmarks.t
    
    t/perf/benchmarks is a file intended to contain snippets of code
    that can be usefully benchmarked or otherwise profiled.
    
    The basic idea is that any time you add an optimisation that is intended
    to make a particular construct faster, then you should add that construct
    to this file.
    
    Under the normal test suite, the test file benchmarks.t does a basic
    compile and run of each of these snippets; not to test performance,
    but just to ensure that the code doesn't have errors.
    
    Over time, it is intended that various measurement and profiling tools
    will be written that can run selected (or all) snippets in various
    environments. These will not be run as part of a normal test suite run.

M       MANIFEST
A       t/perf/benchmarks
A       t/perf/benchmarks.t

commit b5cbe44b7ce9b6ee60eaca7d1784c0e36070154f
Author: David Mitchell <[email protected]>
Date:   Tue Oct 21 14:03:21 2014 +0100

    add t/perf/speed.t
    
    This test file is similar to /re/speed.t, but to test general-purpose
    optimisations.
    
    The idea is to run snippets of code that are 100s or 1000s times slower
    if a particular optimisation is broken. We are not so much interested
    in the individual tests passing, as in the whole file failing with a
    watchdog timeout (or just observing that it running more slowly)

M       MANIFEST
A       t/perf/speed.t

commit 009e0f196385e89101898c54b16e208857612bfc
Author: David Mitchell <[email protected]>
Date:   Tue Oct 21 13:49:10 2014 +0100

    t/perf/optree.t: expand blurb
    
    explain (kind of) why this file is called optree.t

M       t/perf/optree.t

commit 8b405cba157a912a7bf5dcc8b16a5f63c220b328
Author: David Mitchell <[email protected]>
Date:   Tue Oct 21 13:41:16 2014 +0100

    rename t/op/opt.t -> t/perf/optree.t
    
    Now that we have a directory, t/perf/, for perfomance /optimsation
    tests, move this test file there, and rename to something slightly
    clearer.

M       MANIFEST
D       t/op/opt.t
A       t/perf/optree.t

commit 560a595899e8c166737114d0d0b77920d9e26dc5
Author: David Mitchell <[email protected]>
Date:   Tue Oct 21 13:25:25 2014 +0100

    add t/perf/, t/perf/opcount.t
    
    Add a new directory designed to hold performance / optimising tests
    and infrastructure, and add the first test file, opcount.t, that
    checks that a sub has the right numbers of particular op types

M       MANIFEST
M       Makefile.SH
M       t/TEST
M       t/harness
A       t/perf/opcount.t
-----------------------------------------------------------------------

Summary of changes:
 MANIFEST                      |  6 +++-
 Makefile.SH                   |  4 ++-
 t/README                      |  4 +++
 t/TEST                        |  4 +--
 t/harness                     |  2 +-
 t/perf/benchmarks             | 45 ++++++++++++++++++++++++++
 t/perf/benchmarks.t           | 47 +++++++++++++++++++++++++++
 t/perf/opcount.t              | 74 +++++++++++++++++++++++++++++++++++++++++++
 t/{op/opt.t => perf/optree.t} |  3 +-
 t/perf/speed.t                | 51 +++++++++++++++++++++++++++++
 10 files changed, 234 insertions(+), 6 deletions(-)
 create mode 100644 t/perf/benchmarks
 create mode 100644 t/perf/benchmarks.t
 create mode 100644 t/perf/opcount.t
 rename t/{op/opt.t => perf/optree.t} (98%)
 create mode 100644 t/perf/speed.t

diff --git a/MANIFEST b/MANIFEST
index ce5f1a4..5a26bdb 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -5250,7 +5250,6 @@ t/op/negate.t                     See if unary minus works
 t/op/not.t                     See if not works
 t/op/numconvert.t              See if accessing fields does not change numeric 
values
 t/op/oct.t                     See if oct and hex work
-t/op/opt.t                     Test presence of some op optimisations
 t/op/ord.t                     See if ord works
 t/op/or.t                      See if || works in weird situations
 t/op/overload_integer.t                See if overload::constant for integer 
works after "use".
@@ -5340,6 +5339,11 @@ t/op/warn.t                      See if warn works
 t/op/while.t                   See if while loops work
 t/op/write.t                   See if write works (formats work)
 t/op/yadayada.t                        See if ... works
+t/perf/benchmarks              snippets of benchmarking/profiling code
+t/perf/benchmarks.t            test t/perf/benchmarks syntax
+t/perf/opcount.t               See if optimised subs have the right op counts
+t/perf/optree.t                        Test presence of some op optimisations
+t/perf/speed.t                 See if optimisations are keeping things fast
 t/perl.supp                    Perl valgrind suppressions
 t/porting/args_assert.t                Check that all PERL_ARGS_ASSERT* macros 
are used
 t/porting/authors.t            Check that all authors have been acknowledged
diff --git a/Makefile.SH b/Makefile.SH
index 3c3efec..7043f3d 100755
--- a/Makefile.SH
+++ b/Makefile.SH
@@ -1513,7 +1513,9 @@ minitest: $(MINIPERL_EXE)
        @echo "to build lib/Config.pm, or the Unicode data files."
        @echo " "
        - cd t && (rm -f $(PERL_EXE); $(LNS) ../$(MINIPERL_EXE) $(PERL_EXE)) \
-               && $(RUN_PERL) TEST base/*.t comp/*.t cmd/*.t run/*.t io/*.t 
re/*.t opbasic/*.t op/*.t uni/*.t </dev/tty
+               && $(RUN_PERL) TEST base/*.t comp/*.t cmd/*.t run/*.t \
+               io/*.t re/*.t opbasic/*.t op/*.t uni/*.t perf/*.t \
+               </dev/tty
 
 # Test via harness
 
diff --git a/t/README b/t/README
index e35af99..97ba6fd 100644
--- a/t/README
+++ b/t/README
@@ -29,3 +29,7 @@ t/base/ directory fail.
 Tests in the t/comp/, t/cmd/, t/run/, t/io/, t/op/ and t/uni/ directories
 should also be runnable by miniperl and not require Config.pm, but
 failures to comply will not cause TEST to abort like for t/base/.
+
+Tests in t/perf/ are designed to test performance and optimisations,
+and also contain additional tools and files designed to run outside
+of the test suite
diff --git a/t/TEST b/t/TEST
index 5d25af6..9772490 100755
--- a/t/TEST
+++ b/t/TEST
@@ -428,7 +428,7 @@ unless (@ARGV) {
     # then comp, to validate that require works
     # then run, to validate that -M works
     # then we know we can -MTestInit for everything else, making life simpler
-    foreach my $dir (qw(base comp run cmd io re opbasic op uni mro)) {
+    foreach my $dir (qw(base comp run cmd io re opbasic op uni mro perf)) {
        _find_tests($dir);
     }
     unless ($::core) {
@@ -464,7 +464,7 @@ unless (@ARGV) {
     push @ARGV, _tests_from_manifest($extensions, $known_extensions);
     unless ($::core) {
        _find_tests('japh') if $::torture;
-       _find_tests('t/benchmark') if $::benchmark or $ENV{PERL_BENCHMARK};
+       _find_tests('benchmark') if $::benchmark or $ENV{PERL_BENCHMARK};
        _find_tests('bigmem') if $ENV{PERL_TEST_MEMORY};
     }
 }
diff --git a/t/harness b/t/harness
index cb3d8d7..30f4b1a 100644
--- a/t/harness
+++ b/t/harness
@@ -134,7 +134,7 @@ if (@ARGV) {
     unless (@tests) {
        my @seq = <base/*.t>;
 
-       my @next = qw(comp run cmd io re opbasic op uni mro lib porting);
+       my @next = qw(comp run cmd io re opbasic op uni mro lib porting perf);
        push @next, 'japh' if $torture;
        push @next, 'win32' if $^O eq 'MSWin32';
        push @next, 'benchmark' if $ENV{PERL_BENCHMARK};
diff --git a/t/perf/benchmarks b/t/perf/benchmarks
new file mode 100644
index 0000000..6424934
--- /dev/null
+++ b/t/perf/benchmarks
@@ -0,0 +1,45 @@
+#!perl
+
+# This file specifies a hash-of-hashes that define snippets of code that
+# can be run by various measurement and profiling tools.
+#
+# The basic idea is that any time you add an optimisation that is intended
+# to make a particular construct faster, then you should add that construct
+# to this file.
+#
+# Under the normal test suite, the test file benchmarks.t does a basic
+# compile and run of each of these snippets; not to test performance,
+# but just to ensure that the code doesn't have errors.
+#
+# Over time, it is intended that various measurement and profiling tools
+# will be written that can run selected (or all) snippets in various
+# environments. These will not be run as part of a normal test suite run.
+#
+# This file is designed to be read in by 'do' (and in such a way that
+# multiple versions of this file from different releases can be read in
+# by a single process).
+#
+# Each key of the top-level hash is a token that describes a particular
+# test. Code will be compiled in the package named after the token, so it
+# should match /^\w+$/a. It is intended that this can be used on the
+# command line of tools to select particular tests, .
+#
+# Each value is also a hash, with three fields:
+#
+#   desc is a description of the test
+#   setup is a string containing setup code
+#   code  is a string containing the code to run in a loop
+#
+# So typically a benchmark tool might do something like
+#
+#   eval "package $token; $setup; for (1..1000000) { $code }"
+
+
+{
+    arg_assignment => {
+        desc    => 'assignment to local vars from @_',
+        setup   => 'sub arg_assignment { my ($a, $b, $c) = @_ }',
+        code    => 'arg_assignment(1,2,3)',
+    },
+};
+
diff --git a/t/perf/benchmarks.t b/t/perf/benchmarks.t
new file mode 100644
index 0000000..c5ec3df
--- /dev/null
+++ b/t/perf/benchmarks.t
@@ -0,0 +1,47 @@
+#!./perl
+#
+# Execute the various code snippets in t/perf/benchmarks
+# to ensure that they are all syntactically correct
+
+BEGIN {
+    chdir 't';
+    require './test.pl';
+    @INC = ('.', '../lib');
+}
+
+use warnings;
+use strict;
+
+
+my $file = 'perf/benchmarks';
+my $benchmarks = do $file;
+die $@ if $@;
+die "$! while trying to read '$file'" if $!;
+die "'$file' did not return a hash ref\n" unless ref $benchmarks eq 'HASH';
+
+plan keys(%$benchmarks) * 3;
+
+
+# check the hash of hashes is minimally consistent in format
+
+for my $token (sort keys %$benchmarks) {
+    like($token, qr/^[a-zA-z]\w*$/a, "legal token: $token");
+    my $keys = join('-', sort keys %{$benchmarks->{$token}});
+    is($keys, 'code-desc-setup', "legal keys:  $token");
+}
+
+# check that each bit of code compiles and runs
+
+for my $token (sort keys %$benchmarks) {
+    my $b = $benchmarks->{$token};
+    my $code = "package $token; $b->{setup}; for (1..1) { $b->{code} } 1;";
+    ok(eval $code, "running $token")
+        or do {
+            diag("code:");
+            diag($code);
+            diag("gave:");
+            diag($@);
+        }
+}
+
+
diff --git a/t/perf/opcount.t b/t/perf/opcount.t
new file mode 100644
index 0000000..8897604
--- /dev/null
+++ b/t/perf/opcount.t
@@ -0,0 +1,74 @@
+#!./perl
+#
+# opcount.t
+#
+# Test whether various constructs have the right numbers of particular op
+# types. This is chiefly to test that various optimisations are not
+# inadvertently removed.
+#
+# For example the array access in sub { $a[0] } should get optimised from
+# aelem into aelemfast. So we want to test that there are 1 aelemfast, 0
+# aelem and 1 ex-aelem ops in the optree for that sub.
+
+BEGIN {
+    chdir 't';
+    require './test.pl';
+    skip_all_if_miniperl("No B under miniperl");
+    @INC = '../lib';
+}
+
+plan 3;
+
+use B ();
+
+
+{
+    my %counts;
+
+    # for a given op, increment $count{opname}. Treat null ops
+    # as "ex-foo" where possible
+
+    sub B::OP::test_opcount_callback {
+        my ($op) = @_;
+        my $name = $op->name;
+        if ($name eq 'null') {
+            my $targ = $op->targ;
+            if ($targ) {
+                $name = "ex-" . substr(B::ppname($targ), 3);
+            }
+        }
+        $counts{$name}++;
+    }
+
+    # Given a code ref and a hash ref of expected op counts, check that
+    # for each opname => count pair, whether that op appears that many
+    # times in the op tree for that sub. If $debug is 1, display all the
+    # op counts for the sub.
+
+    sub test_opcount {
+        my ($debug, $desc, $coderef, $expected_counts) = @_;
+
+        %counts = ();
+        B::walkoptree(B::svref_2object($coderef)->ROOT,
+                        'test_opcount_callback');
+
+        if ($debug) {
+            note(sprintf "%3d %s", $counts{$_}, $_) for sort keys %counts;
+        }
+
+        for (sort keys %$expected_counts) {
+            is ($counts{$_}//0, $expected_counts->{$_}, "$desc: $_");
+        }
+    }    
+}
+
+# aelem => aelemfast: a basic test that this test file works
+
+test_opcount(0, "basic aelemfast",
+                sub { $a[0] = 1 }, 
+                {
+                    aelem      => 0,
+                    aelemfast  => 1,
+                    'ex-aelem' => 1,
+                }
+            );
diff --git a/t/op/opt.t b/t/perf/optree.t
similarity index 98%
rename from t/op/opt.t
rename to t/perf/optree.t
index ef8649f..dac0a25 100644
--- a/t/op/opt.t
+++ b/t/perf/optree.t
@@ -1,6 +1,7 @@
 #!./perl
 
-# Use B to test that optimisations are not inadvertently removed.
+# Use B to test that optimisations are not inadvertently removed,
+# by examining particular nodes in the optree.
 
 BEGIN {
     chdir 't';
diff --git a/t/perf/speed.t b/t/perf/speed.t
new file mode 100644
index 0000000..43d09bb
--- /dev/null
+++ b/t/perf/speed.t
@@ -0,0 +1,51 @@
+#!./perl
+#
+# All the tests in this file are ones that run exceptionally slowly
+# (each test taking seconds or even minutes) in the absence of particular
+# optimisations. Thus it is a sort of canary for optimisations being
+# broken.
+#
+# Although it includes a watchdog timeout, this is set to a generous limit
+# to allow for running on slow systems; therefore a broken optimisation
+# might be indicated merely by this test file taking unusually long to
+# run, rather than actually timing out.
+#
+
+use strict;
+use warnings;
+use 5.010;
+
+sub run_tests;
+
+$| = 1;
+
+
+BEGIN {
+    chdir 't' if -d 't';
+    @INC = ('../lib');
+    require Config; import Config;
+    require './test.pl';
+}
+
+plan tests => 1;
+
+use warnings;
+use strict;
+
+watchdog(60);
+
+SKIP: {
+    # RT #121975 COW speedup lost after e8c6a474
+
+    # without COW, this test takes minutes; with COW, its less than a
+    # second
+    #
+    skip  "PERL_NO_COW", 1 if $Config{ccflags} =~ /PERL_NO_COW/;
+
+    my ($x, $y);
+    $x = "x" x 1_000_000;
+    $y = $x for 1..1_000_000;
+    pass("COW 1Mb strings");
+}
+
+1;

--
Perl5 Master Repository

Reply via email to