In perl.git, the branch blead has been updated

<http://perl5.git.perl.org/perl.git/commitdiff/b2a691af8242035dab97bd4c05b77ec3ad88955b?hp=69929f1d944a854768112310eafb7cca2020bd6c>

- Log -----------------------------------------------------------------
commit b2a691af8242035dab97bd4c05b77ec3ad88955b
Merge: 69929f1 ec7e59f
Author: Karl Williamson <[email protected]>
Date:   Thu Mar 5 21:49:17 2015 -0700

    Add fixes for testing EBCDIC to blead
    
    This branch contains fixes to test files to pass on both ASCII and
    EBCDIC platforms.  Some of these are a couple years old, and have been
    lingering in the EBCDIC branch until now.
    
    These changes are all to core code that is not dual-lifed, and that
    doesn't have any other complications.  Future commits will deal with
    the ones left out of this commit.
    
    Some commits generalize the test to work on any platform, including a
    non-EBCDIC one, should there ever be one, unlikely as that may be.  The
    subjects of these commits say something like "generalize to non-ASCII
    platforms."
    
    Some commits rely on EBCDIC and ASCII properties, and would have to be
    revised if Perl were ever to work on a platform with some other
    character set.  These typically mention EBCDIC in their subject lines.
    
    Some commits merely skip some tests on EBCDIC platforms.  These were
    because of several possible reasons.  One is that the test is specific
    to a particular EBCDIC code page, and I did not wish to expend my time
    in handling all those.  Another similar one is that there would be a
    significant amount of work in converting the test, too much for me to
    undertake, and I don't think the value added was worth it.  And some are
    just because of the inherent qualities of EBCDIC don't fit.  For example
    UTF-EBCDIC doesn't have the capacity to handle very large code points
    that can be handled on 64-bit ASCII platforms.  Or the tests are just
    for porting when we generate a Perl to ship; which we never will do on
    an EBCDIC platform.

commit ec7e59fb750d2ce33b999be5facb60d03b39a429
Author: Karl Williamson <[email protected]>
Date:   Wed Feb 4 13:30:48 2015 -0700

    t/lib/croak/op: Fix to work on EBCDIC platforms

M       t/lib/croak/op

commit 666b3df019f7f993b705b99cc47d5c4f3b754e0f
Author: Karl Williamson <[email protected]>
Date:   Wed Nov 26 19:55:23 2014 -0700

    t/op/sprintf.t: Generalize for EBCDIC
    
    This adds a mechanism for tests to be ASCII or EBCDIC specific

M       t/op/sprintf.t

commit dd5aa7ebc1596d7d5f6940325eed090866489d98
Author: Karl Williamson <[email protected]>
Date:   Wed Nov 26 10:54:54 2014 -0700

    porting/checkcfgvar.t: Skip on EBCDIC platforms
    
    This is failing on EBCDIC, probably because of sort order differences.
    But we won't be packaging a release from one of these platforms; easiest
    to just skip.

M       t/porting/checkcfgvar.t

commit b35338b63dfa5471402eab32a90decb92220b74c
Author: Karl Williamson <[email protected]>
Date:   Wed Nov 26 10:47:17 2014 -0700

    t/op/bop.t: Generalize for EBCDIC platforms
    
    This also indents some lines that should have been indented or from a
    newly formed block.

M       t/op/bop.t

commit 61900f46d20c06eb5ce7ef3901c4907472310e06
Author: Karl Williamson <[email protected]>
Date:   Mon Nov 24 11:19:22 2014 -0700

    t/uni/case.pl: Fix to work on non-ASCII platforms
    
    This involves changes to the three test files that call it

M       t/uni/case.pl
M       t/uni/lower.t
M       t/uni/title.t
M       t/uni/upper.t

commit d6fdb726ca09b2535606ec9b195d4983cbcf80e0
Author: Karl Williamson <[email protected]>
Date:   Sat Oct 25 21:42:34 2014 -0600

    t/op/tie.t: Skip a couple tests on EBCDIC
    
    These have different results on that platform.

M       t/op/tie.t

commit 67d0123320d7d6168d0280fe413571d48aba0d13
Author: Karl Williamson <[email protected]>
Date:   Sat Oct 25 21:27:54 2014 -0600

    t/op/inc.t: Generalize for EBCDIC

M       t/op/inc.t

commit 81452d217627ce0989f666ac599acdc39ae9929d
Author: Karl Williamson <[email protected]>
Date:   Sat Oct 25 21:27:20 2014 -0600

    t/op/ver.t: Generalize for EBCDIC

M       t/op/ver.t

commit 20062bee5a773333758349c845a963a4dc41cb80
Author: Karl Williamson <[email protected]>
Date:   Sat Oct 25 21:26:20 2014 -0600

    t/lib/warnings/regcomp: SKIP some EBCDIC tests
    
    The result varies depending on EBCDIC code page

M       t/lib/warnings/regcomp

commit 9b513004090114f9a637a1c8d04bdc1d2c91ca5d
Author: Karl Williamson <[email protected]>
Date:   Wed Jan 14 12:42:41 2015 -0700

    lib/overload.t: Generalize for EBCDIC

M       lib/overload.t

commit aa0bd1ca4055207ac3a3da601a4c05d1adef6971
Author: Karl Williamson <[email protected]>
Date:   Wed Oct 22 10:17:40 2014 -0600

    PerlIO-encoding/t/encoding.t: Generalize for non-ASCII platform

M       ext/PerlIO-encoding/t/encoding.t

commit bdf3ec5efeabede3c441b149686008dba865417b
Author: Karl Williamson <[email protected]>
Date:   Tue Oct 21 18:55:27 2014 -0600

    t/lib/warnings/op: Skip some tests on EBCDIC
    
    This splits a longer test into two smaller, the first is skipped on
    EBCDIC because its result varies depending on code page.

M       t/lib/warnings/op

commit 46b78aa8ea94ffeb9e204b96e2bd525424868f9b
Author: Karl Williamson <[email protected]>
Date:   Thu Oct 16 21:16:24 2014 -0600

    t/lib/warnings/toke: Skip some tests on EBCDIC
    
    The results depends on the particular EBCDIC platform being used, so I
    don't think it is worth the effort.

M       t/lib/warnings/toke

commit 2da5b9bef2ef557a6978ec45042e29fa38e9bade
Author: Karl Williamson <[email protected]>
Date:   Thu Oct 16 21:07:11 2014 -0600

    POSIX/t/export.t: Generalize for non-ASCII platforms

M       ext/POSIX/t/export.t

commit fd91387e10ab7d50a5c0909b00e5d25a9cbfa9bc
Author: Karl Williamson <[email protected]>
Date:   Thu Oct 16 21:03:50 2014 -0600

    Pod-Functions/Functions_pm.PL: Generalize for non-ASCII platforms

M       ext/Pod-Functions/Functions_pm.PL

commit 9b7ae71ac101a3cc31f1bd240a7a3dd426ada2fc
Author: Karl Williamson <[email protected]>
Date:   Thu Oct 16 21:00:56 2014 -0600

    Hash-Util tests: Generalize for non-ASCII platforms

M       ext/Hash-Util-FieldHash/t/05_perlhook.t
M       ext/Hash-Util/t/Util.t

commit 26b21b99df35931fee21504247fd1ac98edc0493
Author: Karl Williamson <[email protected]>
Date:   Thu Oct 16 20:50:29 2014 -0600

    t/op/override.t: Generalize for non-ASCII platforms

M       t/op/override.t

commit 6c0c4c6562c00d0afb4ace8208466866b85eaad7
Author: Karl Williamson <[email protected]>
Date:   Sun May 11 19:34:08 2014 -0600

    t/op/chars.t: Generalize to not use code page specific
    
    In EBCDIC, \c? resolves to the APC character.  We can use table lookup
    to find out what that is on the current platform, without having to know
    what code page we are using.

M       t/op/chars.t

commit 425224e1aa3143d7b00f920815709497b1ad8120
Author: Karl Williamson <[email protected]>
Date:   Wed Jun 26 18:23:27 2013 -0600

    regexp_unicode_prop.t: Generalize for non-ASCII platforms

M       t/re/regexp_unicode_prop.t

commit b806f728a908c99fe4964c174a7be5f5808efa9c
Author: Karl Williamson <[email protected]>
Date:   Wed Jun 26 15:25:37 2013 -0600

    t/lib/feature/implicit: Generalize for EBCDIC platforms

M       t/lib/feature/implicit

commit c266f7b3af0c491c48c5cf9e914e953734571ce6
Author: Karl Williamson <[email protected]>
Date:   Mon May 20 22:01:02 2013 -0600

    ext/XS-APItest/t/hash.t: Generalize to run on non-ASCII platforms

M       ext/XS-APItest/t/hash.t

commit 740b32c3da0d3adbb96e6d0088ca25c4ede1b242
Author: Karl Williamson <[email protected]>
Date:   Mon May 20 21:54:01 2013 -0600

    t/lib/feature/implicit: Generalize for non-ASCII platforms

M       t/lib/feature/implicit

commit 33eaa0ecb97c73a5c6445aecb6d2a2f96b37a76d
Author: Karl Williamson <[email protected]>
Date:   Fri Apr 19 13:58:12 2013 -0600

    t/op/coreamp.t: Generalize for non-ASCII platfomrs

M       t/op/coreamp.t

commit b41acb1e5af84f866819f86552533d33e53683fb
Author: Karl Williamson <[email protected]>
Date:   Wed Apr 17 21:49:10 2013 -0600

    t/re/re_tests: Some tests are platform-specific

M       t/re/re_tests

commit ecb8e4d9998a70b806df79a61422f1ef50f60185
Author: Karl Williamson <[email protected]>
Date:   Wed Apr 17 08:22:36 2013 -0600

    t/io/crlf.t: Generalize for non-ASCII platforms

M       t/io/crlf.t

commit 4215ab170c785ff0ab6d5ce600321cab9119037a
Author: Karl Williamson <[email protected]>
Date:   Tue Apr 16 12:13:07 2013 -0600

    ext/B/t/b.t: Generalize for non-ASCII platforms

M       ext/B/t/b.t

commit 8012741c6fc4e36174032a1f8bcf1a35c3bcf320
Author: Karl Williamson <[email protected]>
Date:   Tue Apr 16 11:50:04 2013 -0600

    t/op/warn.t: Generalize for non-ASCII platforms

M       t/op/warn.t

commit d23552d7483588e2d6832104125db89f103d960f
Author: Karl Williamson <[email protected]>
Date:   Tue Apr 16 10:18:02 2013 -0600

    re/reg_email.t: Generalize for non-ASCII platforms
    
    This replaces all the hard-coded hex character values.  It uses the new
    (?[ ]) notation.  I checked that the compiled regex matches the exact
    same code points as before these changes.

M       t/re/reg_email.t

commit d494534a3eb27afdca325ad344fcd9ac0fa53c88
Author: Karl Williamson <[email protected]>
Date:   Sat Apr 13 16:19:20 2013 -0600

    ext/XS-APItest/t/svpeek.t: Extend to EBCDIC platforms

M       ext/XS-APItest/t/svpeek.t

commit ca92e4c33b1733da3b118f2f7eebb56afe9bd242
Author: Karl Williamson <[email protected]>
Date:   Sat Apr 13 16:14:35 2013 -0600

    ext/XS-APItest/t/svpv_magic.t: Generalize for non-ASCII platforms

M       ext/XS-APItest/t/svpv_magic.t

commit d591e493a33e88c91fa96535ee572f21b4575e8c
Author: Karl Williamson <[email protected]>
Date:   Sat Apr 13 15:35:52 2013 -0600

    lib/utf8.t: Generalize for non-ASCII platforms
    
    This includes choosing a different code point that has 3 bytes in both
    UTF-8 and UTF-EBCDIC, so that the pos numbers work for both.

M       lib/utf8.t

commit 28642c96cf5096bd5790d99277a8ec36578dbb15
Author: Karl Williamson <[email protected]>
Date:   Sat Apr 13 15:16:44 2013 -0600

    t/uni/parser.t: Extend to EBCDIC platforms

M       t/uni/parser.t

commit 1157e7160df3349dcc06ff44f1ec37daf8be7a9c
Author: Karl Williamson <[email protected]>
Date:   Sat Apr 13 14:41:46 2013 -0600

    t/uni/method.t: Extend to EBCDIC platforms
    
    I couldn't figure out a way to not use the hard-coded values

M       t/uni/method.t

commit 73676ccb48c0c7233e12ac8f20195e9e4926bf2c
Author: Karl Williamson <[email protected]>
Date:   Sat Apr 13 14:26:09 2013 -0600

    t/op/magic.t: Generalize for non-ASCII platforms

M       t/op/magic.t

commit c9e22b5f66fdf72097a7cf45fac8ab2dff9eb11e
Author: Karl Williamson <[email protected]>
Date:   Sat Apr 13 13:36:41 2013 -0600

    t/io/through.t: Generalize for EBCDIC platforms
    
    This uses hard-coded values for EBCDIC because of the shell issues

M       t/io/through.t

commit 9a375827f28249ed0c6a6c477df0ab4f89df5250
Author: Karl Williamson <[email protected]>
Date:   Sun Apr 7 10:45:14 2013 -0600

    t/op/goto.t: Generalize for non-ASCII platforms

M       t/op/goto.t

commit 6d937e95f0584cd22f688adc0df3b4cf1a392fc8
Author: Karl Williamson <[email protected]>
Date:   Wed Apr 3 20:15:17 2013 -0600

    t/re/pat.t: Generalize for EBCDIC

M       t/re/pat.t

commit f6fca3198e7995dd27df5b55111cb9c7142dd86b
Author: Karl Williamson <[email protected]>
Date:   Fri Apr 5 23:34:50 2013 -0600

    t/uni/overload.t: Generalize for non-ASCII platforms

M       t/uni/overload.t

commit c326159a49c4bf2470dc9a29b30caa296cbbd41f
Author: Karl Williamson <[email protected]>
Date:   Fri Apr 5 23:34:13 2013 -0600

    t/uni/method.t: Generalize for non-ASCII platforms

M       t/uni/method.t

commit 7abb7a1816c9420728edc90522e3b35c382daf19
Author: Karl Williamson <[email protected]>
Date:   Fri Apr 5 23:33:28 2013 -0600

    t/op/utf8magic.t: Generalize for non-ASCII platforms

M       t/op/utf8magic.t

commit ee3893662647148cb9cea55151d7a70831c148ff
Author: Karl Williamson <[email protected]>
Date:   Fri Apr 5 23:32:57 2013 -0600

    t/op/evalbytes.t: Generalize for non-ASCII platforms

M       t/op/evalbytes.t

commit 0dc941edef7722e711d8dec55166524a21ef13d2
Author: Karl Williamson <[email protected]>
Date:   Sat Apr 6 13:01:54 2013 -0600

    t/op/utfhash.t: Fix to work on EBCDIC 1047
    
    This .t thought it could have an __END__ in the middle of DATA input,
    and the first read would read in up to that, and the second would read
    the rest.  In bisecting, I couldn't find a time when this ever worked.
    Now this has a marker, and does a split on that marker, choosing the
    first or second half depending.
    
    Note that this only works on ASCII and EBCDIC 1047 platforms.  It could
    be extended for the other code pages Perl purportedly supports.

M       t/op/utfhash.t

commit 6aec1cbce591f72d7d2a84ddf67770686b04ae11
Author: Karl Williamson <[email protected]>
Date:   Mon Apr 1 22:29:16 2013 -0600

    t/re/pat_re_eval.t: Generalize for non-ASCII platforms

M       t/re/pat_re_eval.t

commit efb52d862c609093c9def6f4bbf6215b73bb3d7e
Author: Karl Williamson <[email protected]>
Date:   Mon Apr 1 21:08:20 2013 -0600

    t/op/split.t: EBCDIC fixes

M       t/op/split.t

commit f36cc39db15434394acba04693c251356b7764f1
Author: Karl Williamson <[email protected]>
Date:   Mon Apr 1 20:43:03 2013 -0600

    re/pat_advanced.t: EBCDIC fixes
    
    This includes not skipping some EBCDIC that formerly was, since we now
    have testing infrastructure that makes this easy.

M       t/re/pat_advanced.t

commit aa7000826199676690f6ab15ffeae05867f08b7f
Author: Karl Williamson <[email protected]>
Date:   Mon Apr 1 20:01:04 2013 -0600

    t/io/utf8.t: EBCDIC fixes

M       t/io/utf8.t

commit 93d84a32a3b6373ca71cdd5549e314902c7c538c
Author: Karl Williamson <[email protected]>
Date:   Sat Mar 30 12:32:09 2013 -0600

    t/uni/fold.t: Generalize for non-ASCII platforms

M       t/uni/fold.t

commit b98878208a3c9cdebe078ff7dee3bdb16a89846e
Author: Karl Williamson <[email protected]>
Date:   Wed Mar 27 16:55:55 2013 -0600

    lib/feature/bundle: Generalize for non-ASCII platforms

M       t/lib/feature/bundle

commit efc41636f5f4ce822cb89fa427605b486166a7f3
Author: Karl Williamson <[email protected]>
Date:   Wed Mar 27 16:08:04 2013 -0600

    XS-APItest/t/fetch_pad_names.t: Skip if EBCDIC
    
    This could be ported, but there's a lot of stuff to convert

M       ext/XS-APItest/t/fetch_pad_names.t

commit 4deba822d105ded9bee2a0c8e50f46af3ca1aa93
Author: Karl Williamson <[email protected]>
Date:   Wed Mar 27 12:05:53 2013 -0600

    ext/XS-APItest/t/utf8.t: Fix so passes EBCDIC
    
    This involves skipping much of the tests, because it would be painful to
    generalize for the different EBCDIC code pages.

M       ext/XS-APItest/t/utf8.t

commit 0516404baa9906c79101b3a98ce5c43791c354c2
Author: Karl Williamson <[email protected]>
Date:   Wed Mar 27 11:27:06 2013 -0600

    ext/re/t/re_funcs_u.t: Generalize for non-ASCII platforms

M       ext/re/t/re_funcs_u.t

commit 5e8d268216881d9191f0637dd6cb32393e3fa689
Author: Karl Williamson <[email protected]>
Date:   Wed Mar 27 10:33:44 2013 -0600

    t/op/print.t: Skip an EBCDIC test
    
    This could be fixed (the values would probably change depending on the
    code page), but the code that would get exercised is unlikely to vary
    depending on character set.

M       t/op/print.t

commit e53ec980866226a523fafbace0c8f264d74d4356
Author: Karl Williamson <[email protected]>
Date:   Mon Mar 25 20:43:38 2013 -0600

    op/chop.t: Fix for EBCDIC
    
    One test is skipped because the code point is not representable on
    EBCDIC platforms.  Other tests are modified to work on EBCDIC.

M       t/op/chop.t

commit 8451602e68fc7c1db0ce6513366724e57dd945df
Author: Karl Williamson <[email protected]>
Date:   Mon Mar 25 19:56:50 2013 -0600

    t/op/lc.t: Generalize for non-ASCII platforms
    
    This had code that attempted this, but it was wrong.  The conversion to
    EBCDIC must be done before the \U, or similar.

M       t/op/lc.t

commit ff23b6f8f3337d6033cab71eddbdca1900bd3cb9
Author: Karl Williamson <[email protected]>
Date:   Mon Mar 25 15:04:14 2013 -0600

    Skip some porting tests under EBCDIC
    
    EBCDIC won't work on these because of inherent differences from ASCII,
    including a different sort order.
    
    For porting/regen.t, we use the shipped parsing rules on EBCDIC platforms

M       Porting/pod_rules.pl
M       t/porting/bincompat.t
M       t/porting/customized.t
M       t/porting/manifest.t
M       t/porting/perlfunc.t
M       t/porting/regen.t

commit e9f74f49e30186b02e69532faa3c91e33531eb75
Author: Karl Williamson <[email protected]>
Date:   Mon Mar 25 14:59:50 2013 -0600

    t/re/regex_sets.t: Generalize for non-ASCII platforms

M       t/re/regex_sets.t

commit e2ebe0533216aa5dc6ad255d0627be6798c77ab6
Author: Karl Williamson <[email protected]>
Date:   Tue Mar 19 15:27:31 2013 -0600

    t/op/quotemeta.t: EBCDIC fixes

M       t/op/quotemeta.t

commit 26cbd00eb4f8d475f9f9460fd586a28f2bad01f1
Author: Karl Williamson <[email protected]>
Date:   Tue Mar 19 11:32:55 2013 -0600

    t/re/fold_grind.t: Generalize for non-ASCII platforms

M       t/re/fold_grind.t

commit df6c5b1fa1a6d28edbd3fecc8d63b469f35e32b4
Author: Karl Williamson <[email protected]>
Date:   Tue Mar 19 11:20:24 2013 -0600

    t/uni/class.t: Generalize to work on non-ASCII platforms

M       t/uni/class.t

commit ece2b3f8831561e207746d9e0090998e0381d80b
Author: Karl Williamson <[email protected]>
Date:   Tue Mar 19 11:01:57 2013 -0600

    feature/unicode_strings.t: Generalize to work on non-ASCII platforms

M       lib/feature/unicode_strings.t

commit c9674c0fbc59e7957ef30e6695ed3270f75f44f9
Author: Karl Williamson <[email protected]>
Date:   Sat Nov 1 21:10:48 2014 -0600

    lib/dumpvar.pl: Generalize for non-ASCII platforms

M       lib/dumpvar.pl

commit f99a3fe18a1f68c449ec1b0c9560287b25f5297d
Author: Karl Williamson <[email protected]>
Date:   Fri Mar 15 11:57:24 2013 -0600

    re/charset.t: Generalize for non-ASCII platforms
    
    This converts the hard-coded character numbers to native, so will work
    on any platform.  It also adds some tests, and improves some test names

M       t/re/charset.t

commit b5c3a14a67dbbfeb052b414c85188c08cb6c0b51
Author: Karl Williamson <[email protected]>
Date:   Wed Mar 13 16:17:39 2013 -0600

    APItest/t/handy.t: Generalize for non-ASCII platforms

M       ext/XS-APItest/t/handy.t

commit 6345b1dc4ca7689d528e854f361443fce332ee9b
Author: Karl Williamson <[email protected]>
Date:   Sun Mar 10 22:17:31 2013 -0600

    t/base/lex.t: Generalize for EBCDIC platforms
    
    \xE2 is 'S' in EBCDIC, and so is ASCII there.  We need something that is
    an alpha on both platforms that isn't ASCII.  \xDF works.
    
    Also, 65 may not be "A" on a non-ASCII platform, so use 65 instead of
    'A' where 65 is wanted

M       t/base/lex.t
-----------------------------------------------------------------------

Summary of changes:
 Porting/pod_rules.pl                    |   5 ++
 ext/B/t/b.t                             |   2 +-
 ext/Hash-Util-FieldHash/t/05_perlhook.t |  15 +++-
 ext/Hash-Util/t/Util.t                  |  44 ++++++-----
 ext/POSIX/t/export.t                    |   8 +-
 ext/PerlIO-encoding/t/encoding.t        |  11 +--
 ext/Pod-Functions/Functions_pm.PL       |  16 +++-
 ext/XS-APItest/t/fetch_pad_names.t      |   8 +-
 ext/XS-APItest/t/handy.t                |  10 +--
 ext/XS-APItest/t/hash.t                 |   4 +-
 ext/XS-APItest/t/svpeek.t               |   6 +-
 ext/XS-APItest/t/svpv_magic.t           |   7 +-
 ext/XS-APItest/t/utf8.t                 |  11 ++-
 ext/re/t/re_funcs_u.t                   |   2 +-
 lib/dumpvar.pl                          |  56 ++++++--------
 lib/feature/unicode_strings.t           |   4 +-
 lib/overload.t                          |   6 +-
 lib/utf8.t                              |  35 ++++-----
 t/base/lex.t                            |   7 +-
 t/io/crlf.t                             |  14 ++--
 t/io/through.t                          |  33 +++++++-
 t/io/utf8.t                             |  35 ++++-----
 t/lib/croak/op                          |   7 +-
 t/lib/feature/bundle                    |   6 +-
 t/lib/feature/implicit                  |  11 ++-
 t/lib/warnings/op                       |  26 ++++++-
 t/lib/warnings/regcomp                  |  10 ++-
 t/lib/warnings/toke                     |  10 ++-
 t/op/bop.t                              | 131 ++++++++++++++++----------------
 t/op/chars.t                            |  10 +--
 t/op/chop.t                             |  31 +++++---
 t/op/coreamp.t                          |  26 +++++--
 t/op/evalbytes.t                        |   7 +-
 t/op/goto.t                             |   4 +-
 t/op/inc.t                              |  13 +++-
 t/op/lc.t                               |  19 +++--
 t/op/magic.t                            |   2 +-
 t/op/override.t                         |   2 +-
 t/op/print.t                            |   1 +
 t/op/quotemeta.t                        |  45 ++++++-----
 t/op/split.t                            |   8 +-
 t/op/sprintf.t                          |  33 ++++++--
 t/op/tie.t                              |   7 ++
 t/op/utf8magic.t                        |   4 +-
 t/op/utfhash.t                          |  15 ++--
 t/op/ver.t                              |  10 +--
 t/op/warn.t                             |  21 ++---
 t/porting/bincompat.t                   |   1 +
 t/porting/checkcfgvar.t                 |   1 +
 t/porting/customized.t                  |   1 +
 t/porting/manifest.t                    |   1 +
 t/porting/perlfunc.t                    |   5 ++
 t/porting/regen.t                       |   4 +
 t/re/charset.t                          |  29 ++++---
 t/re/fold_grind.t                       |  37 +++++----
 t/re/pat.t                              |  46 ++++++-----
 t/re/pat_advanced.t                     | 128 ++++++++++++++++++-------------
 t/re/pat_re_eval.t                      |  17 +++--
 t/re/re_tests                           |  50 +++++++-----
 t/re/reg_email.t                        |  15 ++--
 t/re/regex_sets.t                       |   9 ++-
 t/re/regexp_unicode_prop.t              |  46 ++++++-----
 t/uni/case.pl                           |  15 ++--
 t/uni/class.t                           |  16 ++--
 t/uni/fold.t                            |  48 +++++++-----
 t/uni/lower.t                           |   2 +
 t/uni/method.t                          |  26 +++++--
 t/uni/overload.t                        |  88 ++++++++++-----------
 t/uni/parser.t                          |   4 +-
 t/uni/title.t                           |   2 +
 t/uni/upper.t                           |   2 +
 71 files changed, 821 insertions(+), 570 deletions(-)

diff --git a/Porting/pod_rules.pl b/Porting/pod_rules.pl
index 18abe87..1de43a9 100644
--- a/Porting/pod_rules.pl
+++ b/Porting/pod_rules.pl
@@ -6,6 +6,11 @@ use Text::Tabs;
 use Text::Wrap;
 use Getopt::Long;
 
+if (ord("A") == 193) {
+    print "1..0 # EBCDIC sort order is different\n";
+    exit;
+}
+
 # Generate the sections of files listed in %Targets from pod/perl.pod
 # Mostly these are rules in Makefiles
 #
diff --git a/ext/B/t/b.t b/ext/B/t/b.t
index 93d8867..d9d9e7b 100644
--- a/ext/B/t/b.t
+++ b/ext/B/t/b.t
@@ -280,7 +280,7 @@ is(B::opnumber("pp_null"), 0, "Testing opnumber with opname 
(pp_null)");
     while (my ($test, $expect) = splice @tests, 0, 2) {
        is(B::perlstring($test), $expect, "B::perlstring($expect)");
        utf8::upgrade $test;
-       $expect =~ s/\\b/\\x\{8\}/g;
+       $expect =~ s/\\b/sprintf("\\x{%x}", utf8::unicode_to_native(8))/eg;
        $expect =~ s/\\([0-7]{3})/sprintf "\\x\{%x\}", oct $1/eg;
        is(B::perlstring($test), $expect, "B::perlstring($expect) (Unicode)");
     }
diff --git a/ext/Hash-Util-FieldHash/t/05_perlhook.t 
b/ext/Hash-Util-FieldHash/t/05_perlhook.t
index 92c6b7a..61d02ec 100644
--- a/ext/Hash-Util-FieldHash/t/05_perlhook.t
+++ b/ext/Hash-Util-FieldHash/t/05_perlhook.t
@@ -6,6 +6,13 @@ my $n_tests;
 use Hash::Util::FieldHash;
 use Scalar::Util qw( weaken);
 
+sub numbers_first { # Sort helper: All digit entries sort in front of others
+                    # Makes sorting portable across ASCII/EBCDIC
+    return $a cmp $b if ($a =~ /^\d+$/) == ($b =~ /^\d+$/);
+    return -1 if $a =~ /^\d+$/;
+    return 1;
+}
+
 # The functions in Hash::Util::FieldHash
 # _test_uvar_get, _test_uvar_get and _test_uvar_both
 
@@ -108,7 +115,7 @@ use Scalar::Util qw( weaken);
     $h{ def} = 456;
     is( $counter, 2, "lvalue assign triggers");
 
-    (@x) = sort %h;
+    (@x) = sort numbers_first %h;
     is( $counter, 2, "hash in list context doesn't trigger");
     is( "@x", "123 456 abc def", "correct result");
 
@@ -121,14 +128,14 @@ use Scalar::Util qw( weaken);
     delete $h{ def};
     is( $counter, 5, "good delete triggers");
 
-    (@x) = sort %h;
+    (@x) = sort numbers_first %h;
     is( $counter, 5, "hash in list context doesn't trigger");
     is( "@x", "123 abc", "correct result");
 
     delete $h{ xyz};
     is( $counter, 6, "bad delete triggers");
 
-    (@x) = sort %h;
+    (@x) = sort numbers_first %h;
     is( $counter, 6, "hash in list context doesn't trigger");
     is( "@x", "123 abc", "correct result");
 
@@ -138,7 +145,7 @@ use Scalar::Util qw( weaken);
     $x = $h{ xyz};
     is( $counter, 8, "bad read triggers");
 
-    (@x) = sort %h;
+    (@x) = sort numbers_first %h;
     is( $counter, 8, "hash in list context doesn't trigger");
     is( "@x", "123 abc", "correct result");
 
diff --git a/ext/Hash-Util/t/Util.t b/ext/Hash-Util/t/Util.t
index 2e9e333..031d074 100644
--- a/ext/Hash-Util/t/Util.t
+++ b/ext/Hash-Util/t/Util.t
@@ -13,6 +13,14 @@ BEGIN {
 
 use strict;
 use Test::More;
+
+sub numbers_first { # Sort helper: All digit entries sort in front of others
+                    # Makes sorting portable across ASCII/EBCDIC
+    return $a cmp $b if ($a =~ /^\d+$/) == ($b =~ /^\d+$/);
+    return -1 if $a =~ /^\d+$/;
+    return 1;
+}
+
 my @Exported_Funcs;
 BEGIN {
     @Exported_Funcs = qw(
@@ -427,9 +435,9 @@ ok(defined($hash_seed) && $hash_seed ne '', "hash_seed 
$hash_seed");
     my %hash=(0..9);
     lock_keys(%hash,keys(%hash),'a'..'f');
     ok(Internals::SvREADONLY(%hash),'lock_keys args DDS/t');
-    my @hidden=sort(hidden_keys(%hash));
-    my @legal=sort(legal_keys(%hash));
-    my @keys=sort(keys(%hash));
+    my @hidden=sort numbers_first hidden_keys(%hash);
+    my @legal=sort numbers_first legal_keys(%hash);
+    my @keys=sort numbers_first keys(%hash);
     is("@hidden","a b c d e f",'lock_keys() @hidden DDS/t 3');
     is("@legal","0 2 4 6 8 a b c d e f",'lock_keys() @legal DDS/t 3');
     is("@keys","0 2 4 6 8",'lock_keys() @keys');
@@ -452,9 +460,9 @@ ok(defined($hash_seed) && $hash_seed ne '', "hash_seed 
$hash_seed");
     my %hash=(0..9);
     lock_ref_keys(\%hash,keys %hash,'a'..'f');
     ok(Internals::SvREADONLY(%hash),'lock_ref_keys args DDS/t');
-    my @hidden=sort(hidden_keys(%hash));
-    my @legal=sort(legal_keys(%hash));
-    my @keys=sort(keys(%hash));
+    my @hidden=sort numbers_first hidden_keys(%hash);
+    my @legal=sort numbers_first legal_keys(%hash);
+    my @keys=sort numbers_first keys(%hash);
     is("@hidden","a b c d e f",'lock_ref_keys() @hidden DDS/t 2');
     is("@legal","0 2 4 6 8 a b c d e f",'lock_ref_keys() @legal DDS/t 2');
     is("@keys","0 2 4 6 8",'lock_ref_keys() @keys DDS/t 2');
@@ -463,9 +471,9 @@ ok(defined($hash_seed) && $hash_seed ne '', "hash_seed 
$hash_seed");
     my %hash=(0..9);
     lock_ref_keys_plus(\%hash,'a'..'f');
     ok(Internals::SvREADONLY(%hash),'lock_ref_keys_plus args DDS/t');
-    my @hidden=sort(hidden_keys(%hash));
-    my @legal=sort(legal_keys(%hash));
-    my @keys=sort(keys(%hash));
+    my @hidden=sort numbers_first hidden_keys(%hash);
+    my @legal=sort numbers_first legal_keys(%hash);
+    my @keys=sort numbers_first keys(%hash);
     is("@hidden","a b c d e f",'lock_ref_keys_plus() @hidden DDS/t');
     is("@legal","0 2 4 6 8 a b c d e f",'lock_ref_keys_plus() @legal DDS/t');
     is("@keys","0 2 4 6 8",'lock_ref_keys_plus() @keys DDS/t');
@@ -474,9 +482,9 @@ ok(defined($hash_seed) && $hash_seed ne '', "hash_seed 
$hash_seed");
     my %hash=(0..9, 'a' => 'alpha');
     lock_ref_keys_plus(\%hash,'a'..'f');
     ok(Internals::SvREADONLY(%hash),'lock_ref_keys_plus args overlap');
-    my @hidden=sort(hidden_keys(%hash));
-    my @legal=sort(legal_keys(%hash));
-    my @keys=sort(keys(%hash));
+    my @hidden=sort numbers_first hidden_keys(%hash);
+    my @legal=sort numbers_first legal_keys(%hash);
+    my @keys=sort numbers_first keys(%hash);
     is("@hidden","b c d e f",'lock_ref_keys_plus() @hidden overlap');
     is("@legal","0 2 4 6 8 a b c d e f",'lock_ref_keys_plus() @legal overlap');
     is("@keys","0 2 4 6 8 a",'lock_ref_keys_plus() @keys overlap');
@@ -485,9 +493,9 @@ ok(defined($hash_seed) && $hash_seed ne '', "hash_seed 
$hash_seed");
     my %hash=(0..9);
     lock_keys_plus(%hash,'a'..'f');
     ok(Internals::SvREADONLY(%hash),'lock_keys_plus args DDS/t');
-    my @hidden=sort(hidden_keys(%hash));
-    my @legal=sort(legal_keys(%hash));
-    my @keys=sort(keys(%hash));
+    my @hidden=sort numbers_first hidden_keys(%hash);
+    my @legal=sort numbers_first legal_keys(%hash);
+    my @keys=sort numbers_first keys(%hash);
     is("@hidden","a b c d e f",'lock_keys_plus() @hidden DDS/t 3');
     is("@legal","0 2 4 6 8 a b c d e f",'lock_keys_plus() @legal DDS/t 3');
     is("@keys","0 2 4 6 8",'lock_keys_plus() @keys DDS/t 3');
@@ -496,9 +504,9 @@ ok(defined($hash_seed) && $hash_seed ne '', "hash_seed 
$hash_seed");
     my %hash=(0..9, 'a' => 'alpha');
     lock_keys_plus(%hash,'a'..'f');
     ok(Internals::SvREADONLY(%hash),'lock_keys_plus args overlap non-ref');
-    my @hidden=sort(hidden_keys(%hash));
-    my @legal=sort(legal_keys(%hash));
-    my @keys=sort(keys(%hash));
+    my @hidden=sort numbers_first hidden_keys(%hash);
+    my @legal=sort numbers_first legal_keys(%hash);
+    my @keys=sort numbers_first keys(%hash);
     is("@hidden","b c d e f",'lock_keys_plus() @hidden overlap non-ref');
     is("@legal","0 2 4 6 8 a b c d e f",'lock_keys_plus() @legal overlap 
non-ref');
     is("@keys","0 2 4 6 8 a",'lock_keys_plus() @keys overlap non-ref');
diff --git a/ext/POSIX/t/export.t b/ext/POSIX/t/export.t
index caa7f2b..7e18da4 100644
--- a/ext/POSIX/t/export.t
+++ b/ext/POSIX/t/export.t
@@ -10,6 +10,12 @@ plan(skip_all => "POSIX is unavailable")
 require POSIX;
 POSIX->import();
 
+require($ENV{PERL_CORE} ? "../../t/charset_tools.pl" : 
"../t/charset_tools.pl");
+
+sub ascii_order {   # Sort helper
+    return native_to_uni($a) cmp native_to_uni($b);
+}
+
 # @POSIX::EXPORT and @POSIX::EXPORT_OK are generated. The intent of this test 
is
 # to catch *unintended* changes to them introduced by bugs in refactoring.
 
@@ -139,5 +145,5 @@ while (my ($var, $expect) = each %expect) {
     my $have = *{$POSIX::{$var}}{ARRAY};
     cmp_ok(@$have, '==', @$expect,
           "Correct number of entries for \@POSIX::$var");
-    is_deeply([sort @$have], $expect, "Correct entries for \@POSIX::$var");
+    is_deeply([sort ascii_order @$have], $expect, "Correct entries for 
\@POSIX::$var");
 }
diff --git a/ext/PerlIO-encoding/t/encoding.t b/ext/PerlIO-encoding/t/encoding.t
index 6b4d3d0..fdd1f24 100644
--- a/ext/PerlIO-encoding/t/encoding.t
+++ b/ext/PerlIO-encoding/t/encoding.t
@@ -9,6 +9,7 @@ BEGIN {
        print "1..0 # Skip: not Encode\n";
        exit 0;
     }
+    require "../../t/charset_tools.pl";
 }
 
 use Test::More tests => 24;
@@ -37,13 +38,9 @@ if (open(GRK, ">$grk")) {
 
 if (open(UTF, "<$utf")) {
     binmode(UTF, ":bytes");
-    if (ord('A') == 193) { # EBCDIC
-       # alpha beta gamma in UTF-EBCDIC Unicode (0x3b1 0x3b2 0x3b3)
-       is(scalar <UTF>, "\xb4\x58\xb4\x59\xb4\x62");
-    } else {
-       # alpha beta gamma in UTF-8 Unicode (0x3b1 0x3b2 0x3b3)
-       is(scalar <UTF>, "\xce\xb1\xce\xb2\xce\xb3");
-    }
+
+    # alpha beta gamma in UTF-8 Unicode (0x3b1 0x3b2 0x3b3)
+    is(scalar <UTF>, byte_utf8a_to_utf8n("\xce\xb1\xce\xb2\xce\xb3"));
     close UTF;
 }
 
diff --git a/ext/Pod-Functions/Functions_pm.PL 
b/ext/Pod-Functions/Functions_pm.PL
index 578ec89..04bf5bc 100644
--- a/ext/Pod-Functions/Functions_pm.PL
+++ b/ext/Pod-Functions/Functions_pm.PL
@@ -91,11 +91,19 @@ while (my ($type, $funcs) = each %Kinds) {
 }
 
 # We sort __SUB__ after sub, but before substr, but __PACKAGE__ after package,
-# and __END__ after END.
+# and __END__ after END.  (We create a temporary array of two elements, where
+# the second has the underscores squeezed out, and sort on that element
+# first.)
 sub sort_funcs {
     map { $_->[0] }
-        sort { uc $a->[1] cmp uc $b->[1] || $b->[1] cmp $a->[1] || $a->[0] cmp 
$b->[0] }
-            map  { my $f = tr/_//dr; [ $_, $f ] }
+        sort { uc $a->[1] cmp uc $b->[1]
+               || $b->[1] cmp $a->[1]
+               || $a->[0] =~ /^_/   # here $a and $b are identical when
+                                    # underscores squeezed out; so if $a
+                                    # begins with an underscore, it should
+                                    # sort after $b
+               || $a->[0] cmp $b->[0]
+             } map  { my $f = tr/_//dr; [ $_, $f ] }
                 @_;
 }
 
@@ -196,7 +204,7 @@ L<perlfunc/"Perl Functions by Category"> section.
 
 =cut
 
-our $VERSION = '1.08';
+our $VERSION = '1.09';
 
 require Exporter;
 
diff --git a/ext/XS-APItest/t/fetch_pad_names.t 
b/ext/XS-APItest/t/fetch_pad_names.t
index 9e95d1b..1c71a2d 100644
--- a/ext/XS-APItest/t/fetch_pad_names.t
+++ b/ext/XS-APItest/t/fetch_pad_names.t
@@ -2,7 +2,13 @@ use strict;
 use warnings;
 use Encode ();
 
-use Test::More tests => 77;
+use Test::More;
+if (ord("A") != 65) {
+    plan skip_all => "Painful to port to non-ASCII";
+}
+else {
+    plan tests => 77;
+}
 
 use XS::APItest qw( fetch_pad_names pad_scalar );
 
diff --git a/ext/XS-APItest/t/handy.t b/ext/XS-APItest/t/handy.t
index a434e2d..9ebe0d3 100644
--- a/ext/XS-APItest/t/handy.t
+++ b/ext/XS-APItest/t/handy.t
@@ -27,7 +27,7 @@ if($Config{d_setlocale}) {
         # Some locale implementations don't have the 128-255 characters all
         # mean nothing.  Skip the locale tests in that situation
         for my $i (128 .. 255) {
-            if (chr($i) =~ /[[:print:]]/) {
+            if (chr(utf8::unicode_to_native($i)) =~ /[[:print:]]/) {
                 undef $locale;
                 last;
             }
@@ -136,7 +136,7 @@ foreach my $name (sort keys %properties) {
                     fail($@);
                 }
                 else {
-                    my $truth = truth($matches && $i < 128);
+                    my $truth = truth($matches && utf8::native_to_unicode($i) 
< 128);
                     is ($ret, $truth, "is${function}_A( $display_name ) == 
$truth");
                 }
                 $ret = truth eval "test_is${function}_L1($i)";
@@ -157,7 +157,7 @@ foreach my $name (sort keys %properties) {
                     fail($@);
                 }
                 else {
-                    my $truth = truth($matches && $i < 128);
+                    my $truth = truth($matches && utf8::native_to_unicode($i) 
< 128);
                     is ($ret, $truth, "is${function}_LC( $display_name ) == 
$truth (C locale)");
                 }
             }
@@ -195,7 +195,7 @@ foreach my $name (sort keys %properties) {
                 fail($@);
             }
             else {
-                my $truth = truth($matches && ($i < 128 || $i > 255));
+                my $truth = truth($matches && (utf8::native_to_unicode($i) < 
128 || $i > 255));
                 is ($ret, $truth, "is${function}_LC_uvchr( $display_name ) == 
$truth (C locale)");
             }
         }
@@ -233,7 +233,7 @@ foreach my $name (sort keys %properties) {
                 fail($@);
             }
             else {
-                my $truth = truth($matches && ($i < 128 || $i > 255));
+                my $truth = truth($matches && (utf8::native_to_unicode($i) < 
128 || $i > 255));
                 is ($ret, $truth, "is${function}_LC_utf8( $display_name ) == 
$truth (C locale)");
             }
         }
diff --git a/ext/XS-APItest/t/hash.t b/ext/XS-APItest/t/hash.t
index 8a8c607..ac8eebe 100644
--- a/ext/XS-APItest/t/hash.t
+++ b/ext/XS-APItest/t/hash.t
@@ -20,7 +20,7 @@ sub test_fetch_absent;
 my $utf8_for_258 = chr 258;
 utf8::encode $utf8_for_258;
 
-my @testkeys = ('N', chr 198, chr 256);
+my @testkeys = ('N', chr utf8::unicode_to_native(198), chr 256);
 my @keys = (@testkeys, $utf8_for_258);
 
 foreach (@keys) {
@@ -396,7 +396,7 @@ sub test_U_hash {
 sub main_tests {
   my ($keys, $testkeys, $description) = @_;
   foreach my $key (@$testkeys) {
-    my $lckey = ($key eq chr 198) ? chr 230 : lc $key;
+    my $lckey = ($key eq chr utf8::unicode_to_native(198)) ? chr 
utf8::unicode_to_native(230) : lc $key;
     my $unikey = $key;
     utf8::encode $unikey;
 
diff --git a/ext/XS-APItest/t/svpeek.t b/ext/XS-APItest/t/svpeek.t
index df914fc..6c83005 100644
--- a/ext/XS-APItest/t/svpeek.t
+++ b/ext/XS-APItest/t/svpeek.t
@@ -66,9 +66,9 @@ if ($^O eq 'vos') {
   $VAR = "";
   is (DPeek ($VAR),    'PVIV(""\0)',           ' $VAR ""');
   is (DPeek (\$VAR),   '\PVIV(""\0)',          '\$VAR ""');
-  $VAR = "\xa8";
-  is (DPeek ($VAR),    'PVIV("\250"\0)',       ' $VAR "\xa8"');
-  is (DPeek (\$VAR),   '\PVIV("\250"\0)',      '\$VAR "\xa8"');
+  $VAR = "\xdf";    # \xdf works for both ASCII and EBCDIC
+  is (DPeek ($VAR),    'PVIV("\337"\0)',       ' $VAR "\xdf"');
+  is (DPeek (\$VAR),   '\PVIV("\337"\0)',      '\$VAR "\xdf"');
   $VAR = "a\x0a\x{20ac}";
   is (DPeek ($VAR), 'PVIV("a\n\342\202\254"\0) [UTF8 "a\n\x{20ac}"]',
                                        ' $VAR "a\x0a\x{20ac}"');
diff --git a/ext/XS-APItest/t/svpv_magic.t b/ext/XS-APItest/t/svpv_magic.t
index c57257e..9cb52fc 100644
--- a/ext/XS-APItest/t/svpv_magic.t
+++ b/ext/XS-APItest/t/svpv_magic.t
@@ -3,7 +3,8 @@
 use Test::More tests => 10;
 
 BEGIN {
-    use_ok('XS::APItest')
+    use_ok('XS::APItest');
+    require 'charset_tools.pl';
 };
 
 $b = "\303\244"; # or encode_utf8("\x{e4}");
@@ -32,7 +33,7 @@ is(eval { XS::APItest::first_byte($1) } || $@, 0303,
 sub TIESCALAR { bless [], shift }
 sub FETCH { ++$f; *{chr 255} }
 tie $t, "main";
-is SvPVutf8($t), "*main::\xc3\xbf",
+is SvPVutf8($t), "*main::" . byte_utf8a_to_utf8n("\xc3\xbf"),
   'SvPVutf8 works with get-magic changing the SV type';
 is $f, 1, 'SvPVutf8 calls get-magic once';
 
@@ -43,7 +44,7 @@ package t {
 }
 tie $t, "t";
 undef $f;
-is SvPVutf8($t), "\xc3\xbf",
+is SvPVutf8($t), byte_utf8a_to_utf8n("\xc3\xbf"),
   'SvPVutf8 works with get-magic downgrading the SV';
 is $f, 1, 'SvPVutf8 calls get-magic once';
 ()="$t";
diff --git a/ext/XS-APItest/t/utf8.t b/ext/XS-APItest/t/utf8.t
index bc5a7ed..8322cb9 100644
--- a/ext/XS-APItest/t/utf8.t
+++ b/ext/XS-APItest/t/utf8.t
@@ -5,13 +5,15 @@ use Test::More;
 
 use XS::APItest;
 
+my $pound_sign = chr utf8::unicode_to_native(163);
+
 foreach ([0, '', '', 'empty'],
         [0, 'N', 'N', '1 char'],
         [1, 'NN', 'N', '1 char substring'],
         [-2, 'Perl', 'Rules', 'different'],
-        [0, chr 163, chr 163, 'pound sign'],
-        [1, chr (163) . 10, chr (163) . 1, '10 pounds is more than 1 pound'],
-        [1, chr(163) . chr(163), chr 163, '2 pound signs are more than 1'],
+        [0, $pound_sign, $pound_sign, 'pound sign'],
+        [1, $pound_sign . 10, $pound_sign . 1, '10 pounds is more than 1 
pound'],
+        [1, $pound_sign . $pound_sign, $pound_sign, '2 pound signs are more 
than 1'],
         [-2, ' $!', " \x{1F42B}!", 'Camels are worth more than 1 dollar'],
         [-1, '!', "!\x{1F42A}", 'Initial substrings match'],
        ) {
@@ -24,6 +26,8 @@ foreach ([0, '', '', 'empty'],
     is(bytes_cmp_utf8($right, $left), -$expect, "$desc reversed");
 }
 
+if (ord("A") == 65) { # EBCDIC is too hard to test for malformations
+
 # Test uft8n_to_uvchr().  These provide essentially complete code coverage.
 
 # Copied from utf8.h
@@ -319,5 +323,6 @@ foreach my $test (@tests) {
         }
     }
 }
+}
 
 done_testing;
diff --git a/ext/re/t/re_funcs_u.t b/ext/re/t/re_funcs_u.t
index 770b456..781ffc9 100644
--- a/ext/re/t/re_funcs_u.t
+++ b/ext/re/t/re_funcs_u.t
@@ -80,7 +80,7 @@ if ('1234'=~/(?:(?<A>\d)|(?<C>!))(?<B>\d)(?<A>\d)(?<B>\d)/){
 
 {
     # tests for new regexp flags
-    my $text = "\xE4";
+    my $text = chr utf8::unicode_to_native(0xE4);
     my $check;
 
     {
diff --git a/lib/dumpvar.pl b/lib/dumpvar.pl
index 91153ea..b2f3798 100644
--- a/lib/dumpvar.pl
+++ b/lib/dumpvar.pl
@@ -14,6 +14,8 @@ package dumpvar;
 
 $winsize = 80 unless defined $winsize;
 
+sub ASCII { return ord('A') == 65; }
+
 
 # Defaults
 
@@ -25,6 +27,9 @@ $subdump = 1;
 $dumpReused = 0 unless defined $dumpReused;
 $bareStringify = 1 unless defined $bareStringify;
 
+my $APC = chr utf8::unicode_to_native(0x9F);
+my $backslash_c_question = (ASCII) ? '\177' : $APC;
+
 sub main::dumpValue {
   local %address;
   local $^W=0;
@@ -41,12 +46,8 @@ sub unctrl {
        local($v) ; 
 
        return \$_ if ref \$_ eq "GLOB";
-        if (ord('A') == 193) { # EBCDIC.
-           # EBCDIC has no concept of "\cA" or "A" being related
-           # to each other by a linear/boolean mapping.
-       } else {
-           s/([\001-\037\177])/'^'.pack('c',ord($1)^64)/eg;
-       }
+        s/([\000-\037])/ '^' . chr(utf8::unicode_to_native(ord($1)^64))/eg;
+        s/ $backslash_c_question /^?/xg;
        return $_;
     }
 }
@@ -54,7 +55,7 @@ sub unctrl {
 sub uniescape {
     join("",
         map { $_ > 255 ? sprintf("\\x{%04X}", $_) : chr($_) }
-            unpack("U*", $_[0]));
+            unpack("W*", $_[0]));
 }
 
 sub stringify {
@@ -79,39 +80,27 @@ sub _stringify {
            and %overload:: and defined &{'overload::StrVal'};
        
        if ($tick eq 'auto') {
-           if (ord('A') == 193) {
-               if (/[\000-\011]/ or /[\013-\024\31-\037\177]/) {
-                   $tick = '"';
-               } else {
-                   $tick = "'";
-               }
-            }  else {
-               if (/[\000-\011\013-\037\177]/) {
-                   $tick = '"';
-               } else {
-                   $tick = "'";
-               }
-           }
+            if (/[^[:^cntrl:]\n]/u) {   # All controls but \n get '"'
+                $tick = '"';
+            } else {
+                $tick = "'";
+            }
        }
        if ($tick eq "'") {
          s/([\'\\])/\\$1/g;
        } elsif ($unctrl eq 'unctrl') {
          s/([\"\\])/\\$1/g ;
-         s/([\000-\037\177])/'^'.pack('c',ord($1)^64)/eg;
+          $_ = &unctrl($_);
          # uniescape?
-         s/([\200-\377])/'\\0x'.sprintf('%2X',ord($1))/eg 
+         s/([[:^ascii:]])/'\\0x'.sprintf('%2X',ord($1))/eg
            if $quoteHighBit;
        } elsif ($unctrl eq 'quote') {
          s/([\"\\\$\@])/\\$1/g if $tick eq '"';
-         s/\033/\\e/g;
-         if (ord('A') == 193) { # EBCDIC.
-             s/([\000-\037\177])/'\\c'.chr(193)/eg; # Unfinished.
-         } else {
-             s/([\000-\037\177])/'\\c'._escaped_ord($1)/eg;
-         }
+         s/\e/\\e/g;
+          s/([\000-\037$backslash_c_question])/'\\c'._escaped_ord($1)/eg;
        }
        $_ = uniescape($_);
-       s/([\200-\377])/'\\'.sprintf('%3o',ord($1))/eg if $quoteHighBit;
+       s/([[:^ascii:]])/'\\'.sprintf('%3o',ord($1))/eg if $quoteHighBit;
        return ($noticks || /^\d+(\.\d*)?\Z/) 
          ? $_ 
          : $tick . $_ . $tick;
@@ -121,8 +110,13 @@ sub _stringify {
 # Ensure a resulting \ is escaped to be \\
 sub _escaped_ord {
     my $chr = shift;
-    $chr = chr(ord($chr)^64);
-    $chr =~ s{\\}{\\\\}g;
+    if ($chr eq $backslash_c_question) {
+        $chr = '?';
+    }
+    else {
+        $chr = chr(utf8::unicode_to_native(ord($chr)^64));
+        $chr =~ s{\\}{\\\\}g;
+    }
     return $chr;
 }
 
diff --git a/lib/feature/unicode_strings.t b/lib/feature/unicode_strings.t
index ce3f225..186dcf2 100644
--- a/lib/feature/unicode_strings.t
+++ b/lib/feature/unicode_strings.t
@@ -225,7 +225,7 @@ for my $i (0 .. 255) {
 
                     # With the legacy, nothing above 128 should be in the
                     # class
-                    if ($i >= 128) {
+                    if (utf8::native_to_unicode($i) >= 128) {
                         $expect_success = 0;
                         $expect_success = ! $expect_success if $complement;
                         $expect_success = ! $expect_success if 
$complement_class;
@@ -259,7 +259,7 @@ for my $i (0 .. 255) {
 
         no feature 'unicode_strings';
         $prefix = "no uni8bit; Verify $string";
-        if ($i >= 128) {
+        if (utf8::native_to_unicode($i) >= 128) {
             $expect_success = 1;
             $expect_success = ! $expect_success if $complement;
         }
diff --git a/lib/overload.t b/lib/overload.t
index 6bbbb0b..e6f4062 100644
--- a/lib/overload.t
+++ b/lib/overload.t
@@ -47,7 +47,7 @@ sub numify { 0 + "${$_[0]}" } # Not needed, additional 
overhead
 package main;
 
 $| = 1;
-BEGIN { require './test.pl' }
+BEGIN { require './test.pl'; require './charset_tools.pl' }
 plan tests => 5215;
 
 use Scalar::Util qw(tainted);
@@ -2261,9 +2261,9 @@ fresh_perl_is
 
     $o->[0] = 1;
     $c = 0;
-    ::ok("\xc4\x80" =~ "^\x{100}\$",
+    ::ok(main::byte_utf8a_to_utf8n("\xc4\x80") =~ "^\x{100}\$",
                                "regex stringify utf8=1 ol=0 bytes=1");
-    ::ok("\xc4\x80" =~ $o,     "regex stringify utf8=1 ol=1 bytes=1");
+    ::ok(main::byte_utf8a_to_utf8n("\xc4\x80") =~ $o,  "regex stringify utf8=1 
ol=1 bytes=1");
     ::is($c, 1,                        "regex stringify utf8=1 ol=1 bytes=1 
count");
 
 
diff --git a/lib/utf8.t b/lib/utf8.t
index 5c03b31..8578444 100644
--- a/lib/utf8.t
+++ b/lib/utf8.t
@@ -5,7 +5,7 @@ my $has_perlio;
 BEGIN {
     chdir 't' if -d 't';
     @INC = '../lib';
-    require './test.pl';
+    require './test.pl'; require './charset_tools.pl';
     unless ($has_perlio = find PerlIO::Layer 'perlio') {
        print <<EOF;
 # Since you don't have perlio you might get failures with UTF-8 locales.
@@ -44,8 +44,8 @@ no utf8; # Ironic, no?
 
     my ($a, $b);
 
-    { use bytes; $a = "\xc3\xa4" }
-    { use utf8;  $b = "\xe4"     }
+    { use bytes; $a = byte_utf8a_to_utf8n("\xc3\xa4") }
+    { use utf8;  $b = uni_to_native("\xe4")     }
 
     my $test = 68;
 
@@ -429,7 +429,7 @@ SKIP: {
 {
     # Make sure utf8::decode respects copy-on-write [perl #91834].
     # Hash keys are the easiest way to test this.
-    my $name = "\x{c3}\x{b3}";
+    my $name = byte_utf8a_to_utf8n("\x{c3}\x{b3}");
     my ($k1) = keys %{ { $name=>undef } };
     my $k2 = $name;
     utf8::decode($k1);
@@ -442,7 +442,7 @@ SKIP: {
     # Make sure utf8::decode does not modify read-only scalars
     # [perl #91850].
     
-    my $name = "\x{c3}\x{b3}";
+    my $name = byte_utf8a_to_utf8n("\x{c3}\x{b3}");
     Internals::SvREADONLY($name, 1);
     eval { utf8::decode($name) };
     like $@, qr/^Modification of a read-only/,
@@ -452,12 +452,12 @@ SKIP: {
 {
     # utf8::decode should stringify refs [perl #91852].
 
-    package eieifg { use overload '""'      => sub { "\x{c3}\x{b3}" },
+    package eieifg { use overload '""'      => sub { 
main::byte_utf8a_to_utf8n("\x{c3}\x{b3}") },
                                    fallback => 1 }
 
     my $name = bless[], eieifg::;
     utf8::decode($name);
-    is $name, "\xf3", 'utf8::decode flattens references';
+    is $name, uni_to_native("\xf3"), 'utf8::decode flattens references';
 }
 
 {
@@ -500,10 +500,10 @@ SKIP: {
 }
 
 {
-    my $a = "456\xb6";
+    my $a = "456" . uni_to_native("\xb6");
     utf8::upgrade($a);
 
-    my $b = "123456\xb6";
+    my $b = "123456" . uni_to_native("\xb6");
     $b =~ s/^...//;
     utf8::upgrade($b);
     is($b, $a, "utf8::upgrade OffsetOK");
@@ -563,7 +563,8 @@ SKIP: {
 for my $pos (0..5) {
 
     my $p;
-    my $s = "A\xc8\x81\xe8\xab\x86\x{100}";
+    my $utf8_bytes = byte_utf8a_to_utf8n("\xc8\x81\xe3\xbf\xbf");
+    my $s = "A$utf8_bytes\x{100}";
     chop($s);
 
     pos($s) = $pos;
@@ -573,17 +574,17 @@ for my $pos (0..5) {
     utf8::downgrade($s);
     is(length($s), 6,             "(pos $pos) len after     utf8::downgrade");
     is(pos($s),    $pos,          "(pos $pos) pos after     utf8::downgrade");
-    is($s, "A\xc8\x81\xe8\xab\x86","(pos $pos) str after     utf8::downgrade");
+    is($s, "A$utf8_bytes","(pos $pos) str after     utf8::downgrade");
     utf8::decode($s);
     is(length($s), 3,             "(pos $pos) len after  D; utf8::decode");
     is(pos($s),    undef,         "(pos $pos) pos after  D; utf8::decode");
-    is($s, "A\x{201}\x{8ac6}",    "(pos $pos) str after  D; utf8::decode");
+    is($s, "A\x{201}\x{3fff}",    "(pos $pos) str after  D; utf8::decode");
     utf8::encode($s);
     is(length($s), 6,             "(pos $pos) len after  D; utf8::encode");
     is(pos($s),    undef,         "(pos $pos) pos after  D; utf8::encode");
-    is($s, "A\xc8\x81\xe8\xab\x86","(pos $pos) str after  D; utf8::encode");
+    is($s, "A$utf8_bytes","(pos $pos) str after  D; utf8::encode");
 
-    $s = "A\xc8\x81\xe8\xab\x86";
+    $s = "A$utf8_bytes";
 
     pos($s) = $pos;
     is(length($s), 6,             "(pos $pos) len before    utf8::upgrade");
@@ -591,15 +592,15 @@ for my $pos (0..5) {
     utf8::upgrade($s);
     is(length($s), 6,             "(pos $pos) len after     utf8::upgrade");
     is(pos($s),    $pos,          "(pos $pos) pos after     utf8::upgrade");
-    is($s, "A\xc8\x81\xe8\xab\x86","(pos $pos) str after     utf8::upgrade");
+    is($s, "A$utf8_bytes","(pos $pos) str after     utf8::upgrade");
     utf8::decode($s);
     is(length($s), 3,             "(pos $pos) len after  U; utf8::decode");
     is(pos($s),    undef,         "(pos $pos) pos after  U; utf8::decode");
-    is($s, "A\x{201}\x{8ac6}",    "(pos $pos) str after  U; utf8::decode");
+    is($s, "A\x{201}\x{3fff}",    "(pos $pos) str after  U; utf8::decode");
     utf8::encode($s);
     is(length($s), 6,             "(pos $pos) len after  U; utf8::encode");
     is(pos($s),    undef,         "(pos $pos) pos after  U; utf8::encode");
-    is($s, "A\xc8\x81\xe8\xab\x86","(pos $pos) str after  U; utf8::encode");
+    is($s, "A$utf8_bytes","(pos $pos) str after  U; utf8::encode");
 }
 
 # [perl #119043] utf8::upgrade should not croak on read-only COWs
diff --git a/t/base/lex.t b/t/base/lex.t
index 6a8ac61..0a07ab7 100644
--- a/t/base/lex.t
+++ b/t/base/lex.t
@@ -251,8 +251,9 @@ print ((exists $str{xyz::bar} ? "" : "not ")."ok $test\n"); 
++$test;
 sub foo::::::bar { print "ok $test\n"; $test++ }
 foo::::::bar;
 
-eval "\$x =\xE2foo";
-if ($@ =~ /Unrecognized character \\xE2; marked by <-- HERE after \$x =<-- 
HERE near column 5/) { print "ok $test\n"; } else { print "not ok $test\n"; }
+# \xDF is a non-ASCII alpha on both ASCII and EBCDIC.
+eval "\$x =\xDFfoo";
+if ($@ =~ /Unrecognized character \\xDF; marked by <-- HERE after \$x =<-- 
HERE near column 5/) { print "ok $test\n"; } else { print "not ok $test\n"; }
 $test++;
 
 # Is "[~" scanned correctly?
@@ -383,7 +384,7 @@ eval "package v10::foo; sub test2 { return 'v10::foo' }
 print "not " unless $output eq 'v10::foo';
 print "ok $test - call a function in package v10::foo\n"; $test++;
 
-print "not " unless (1?v65:"bar") eq 'A';
+print "not " unless (1?v65:"bar") eq chr(65);
 print "ok $test - colon detection after vstring does not break ? vstring :\n"; 
$test++;
 if (ord("\t") == 9) {
     print v35;
diff --git a/t/io/crlf.t b/t/io/crlf.t
index ea138fa..9331068 100644
--- a/t/io/crlf.t
+++ b/t/io/crlf.t
@@ -3,7 +3,7 @@
 BEGIN {
     chdir 't' if -d 't';
     @INC = qw(. ../lib);
-    require "./test.pl";
+    require "./test.pl"; require "charset_tools.pl";
     skip_all_without_perlio();
 }
 
@@ -11,6 +11,8 @@ use Config;
 
 
 my $file = tempfile();
+my $crlf = uni_to_native("\015\012");
+my $crcr = uni_to_native("\x0d\x0d");
 
 my $ungetc_count = 8200;    # Somewhat over the likely buffer size
 
@@ -22,13 +24,13 @@ my $ungetc_count = 8200;    # Somewhat over the likely 
buffer size
 
     my $text;
     { local $/; $text = <FOO> }
-    is(count_chars($text, "\015\012"), 0);
+    is(count_chars($text, $crlf), 0);
     is(count_chars($text, "\n"), 2000);
 
     binmode(FOO);
     seek(FOO,0,0);
     { local $/; $text = <FOO> }
-    is(count_chars($text, "\015\012"), 2000);
+    is(count_chars($text, $crlf), 2000);
 
     SKIP:
     {
@@ -37,7 +39,7 @@ my $ungetc_count = 8200;    # Somewhat over the likely buffer 
size
        skip("no PerlIO::scalar", 2 * $ungetc_count + 1)
            unless $Config{extensions} =~ m!\bPerlIO/scalar\b!;
        require PerlIO::scalar;
-       my $fcontents = join "", map {"$_\015\012"} "a".."zzz";
+       my $fcontents = join "", map {"$_$crlf"} "a".."zzz";
        open my $fh, "<:crlf", \$fcontents;
        local $/ = "xxx";
        local $_ = <$fh>;
@@ -80,8 +82,8 @@ my $ungetc_count = 8200;    # Somewhat over the likely buffer 
size
            close FOO;
            print join(" ", "#", map { sprintf("%02x", $_) } unpack("C*", 
$foo)),
            "\n";
-           like($foo, qr/\x0d\x0a$/);
-           unlike($foo, qr/\x0d\x0d/);
+           like($foo, qr/$crlf$/);
+           unlike($foo, qr/$crcr/);
        }
     }
 }
diff --git a/t/io/through.t b/t/io/through.t
index 315de90..eaf9bc0 100644
--- a/t/io/through.t
+++ b/t/io/through.t
@@ -72,10 +72,20 @@ sub testpipe ($$$$$$) {
     open $fh, '-|', qq[$Perl -we "$set_out;print for grep length, split 
/(.{1,$write_c})/s, qq($quoted)"] or die "open: $!";
   } elsif ($how_w eq 'print/flush') {
     # shell-neutral and miniperl-enabled autoflush? qq(\x24\x7c) eq '$|'
-    open $fh, '-|', qq[$Perl -we "$set_out;eval qq(\\x24\\x7c = 1) or 
die;print for grep length, split /(.{1,$write_c})/s, qq($quoted)"] or die 
"open: $!";
+    if ($::IS_ASCII) {
+        open $fh, '-|', qq[$Perl -we "$set_out;eval qq(\\x24\\x7c = 1) or 
die;print for grep length, split /(.{1,$write_c})/s, qq($quoted)"] or die 
"open: $!";
+    }
+    else {
+        open $fh, '-|', qq[$Perl -we "$set_out;eval qq(\\x5b\\x4f = 1) or 
die;print for grep length, split /(.{1,$write_c})/s, qq($quoted)"] or die 
"open: $!";
+    }
   } elsif ($how_w eq 'syswrite') {
     ### How to protect \$_
-    open $fh, '-|', qq[$Perl -we "$set_out;eval qq(sub w {syswrite STDOUT, 
\\x24_} 1) or die; w() for grep length, split /(.{1,$write_c})/s, qq($quoted)"] 
or die "open: $!";
+    if ($::IS_ASCII) {
+        open $fh, '-|', qq[$Perl -we "$set_out;eval qq(sub w {syswrite STDOUT, 
\\x24_} 1) or die; w() for grep length, split /(.{1,$write_c})/s, qq($quoted)"] 
or die "open: $!";
+    }
+    else {
+        open $fh, '-|', qq[$Perl -we "$set_out;eval qq(sub w {syswrite STDOUT, 
\\x5B_} 1) or die; w() for grep length, split /(.{1,$write_c})/s, qq($quoted)"] 
or die "open: $!";
+    }
   } else {
     die "Unrecognized write: '$how_w'";
   }
@@ -112,7 +122,13 @@ sub testfile ($$$$$$) {
 }
 
 # shell-neutral and miniperl-enabled autoflush? qq(\x24\x7c) eq '$|'
-open my $fh, '-|', qq[$Perl -we "eval qq(\\x24\\x7c = 1) or die; binmode 
STDOUT; sleep 1, print for split //, qq(a\nb\n\nc\n\n\n)"] or die "open: $!";
+my $fh;
+if ($::IS_ASCII) {
+    open $fh, '-|', qq[$Perl -we "eval qq(\\x24\\x7c = 1) or die; binmode 
STDOUT; sleep 1, print for split //, qq(a\nb\n\nc\n\n\n)"] or die "open: $!";
+}
+else {
+    open $fh, '-|', qq[$Perl -we "eval qq(\\x5B\\x4f = 1) or die; binmode 
STDOUT; sleep 1, print for split //, qq(a\nb\n\nc\n\n\n)"] or die "open: $!";
+}
 ok(1, 'open pipe');
 binmode $fh, q(:crlf);
 ok(1, 'binmode');
@@ -121,7 +137,16 @@ my @c;
 push @c, ord $c while $c = getc $fh;
 ok(1, 'got chars');
 is(scalar @c, 9, 'got 9 chars');
-is("@c", '97 10 98 10 10 99 10 10 10', 'got expected chars');
+is("@c", join(" ", utf8::unicode_to_native(97),
+                   utf8::unicode_to_native(10),
+                   utf8::unicode_to_native(98),
+                   utf8::unicode_to_native(10),
+                   utf8::unicode_to_native(10),
+                   utf8::unicode_to_native(99),
+                   utf8::unicode_to_native(10),
+                   utf8::unicode_to_native(10),
+                   utf8::unicode_to_native(10)),
+         'got expected chars');
 ok(close($fh), 'close');
 
 for my $s (1..2) {
diff --git a/t/io/utf8.t b/t/io/utf8.t
index 4f96dcc..756f307 100644
--- a/t/io/utf8.t
+++ b/t/io/utf8.t
@@ -3,7 +3,7 @@
 BEGIN {
     chdir 't' if -d 't';
     @INC = '../lib';
-    require './test.pl';
+    require './test.pl'; require './charset_tools.pl';
     skip_all_without_perlio();
 }
 
@@ -27,17 +27,18 @@ is( getc(F), "
 is( getc(F), "\n" );
 seek(F,0,0);
 binmode(F,":bytes");
-my $chr = chr(0xc4);
-if (ord($a_file) == 193) { $chr = chr(0x8c); } # EBCDIC
+
+# Byte representation of these characters
+my $U_100 = byte_utf8a_to_utf8n("\xc4\x80");
+my $POUND_SIGN = byte_utf8a_to_utf8n("\xc2\xa3");
+
+my $chr = substr($U_100, 0, 1);
 is( getc(F), $chr );
-$chr = chr(0x80);
-if (ord($a_file) == 193) { $chr = chr(0x41); } # EBCDIC
+$chr = substr($U_100, 1, 1);
 is( getc(F), $chr );
-$chr = chr(0xc2);
-if (ord($a_file) == 193) { $chr = chr(0x80); } # EBCDIC
+$chr = substr($POUND_SIGN, 0, 1);
 is( getc(F), $chr );
-$chr = chr(0xa3);
-if (ord($a_file) == 193) { $chr = chr(0x44); } # EBCDIC
+$chr = substr($POUND_SIGN, 1, 1);
 is( getc(F), $chr );
 is( getc(F), "\n" );
 seek(F,0,0);
@@ -68,7 +69,7 @@ close(F);
     $x = <F>;
     chomp($x);
     $chr = chr(196).chr(172);
-    if (ord($a_file) == 193) { $chr = chr(141).chr(83); } # EBCDIC
+    if ($::IS_EBCDIC) { $chr = chr(141).chr(83); } # EBCDIC
     is( $x, $chr );
     close F;
 
@@ -96,7 +97,7 @@ close(F);
 
     {
        my $x = tell(F);
-       { use bytes; if (ord('A')==193){$y += 2;}else{$y += 3;}} # EBCDIC ASCII
+       { use bytes; if ($::IS_EBCDIC){$y += 2;}else{$y += 3;}} # EBCDIC ASCII
        cmp_ok( $x, '==', $y );
     }
 
@@ -107,7 +108,7 @@ close(F);
     $x = <F>;
     chomp($x);
     $chr = v196.172.194.130;
-    if (ord('A') == 193) { $chr = v141.83.130; } # EBCDIC
+    if ($::IS_EBCDIC) { $chr = v141.83.130; } # EBCDIC
     is( $x, $chr, sprintf('(%vd)', $x) );
 
     open F, "<:utf8", $a_file or die $!;
@@ -141,7 +142,7 @@ open F, "<", $a_file or die $!;
 binmode(F, ":bytes");
 $x = <F>; chomp $x;
 $chr = v196.172.130;
-if (ord('A') == 193) { $chr = v141.83.130; } # EBCDIC
+if ($::IS_EBCDIC) { $chr = v141.83.130; } # EBCDIC
 is( $x, $chr );
 
 # Right.
@@ -165,7 +166,7 @@ SKIP: {
 # Now we have a deformed file.
 
 SKIP: {
-    if (ord('A') == 193) {
+    if ($::IS_EBCDIC) {
        skip("EBCDIC doesn't complain", 2);
     } else {
        my @warnings;
@@ -330,7 +331,7 @@ is($failed, undef);
     open F, ">$a_file";
     binmode F;
     my ($chrE4, $chrF6) = (chr(0xE4), chr(0xF6));
-    if (ord('A') == 193)       # EBCDIC
+    if ($::IS_EBCDIC)  # EBCDIC
     { ($chrE4, $chrF6) = (chr(0x43), chr(0xEC)); }
     print F "foo", $chrE4, "\n";
     print F "foo", $chrF6, "\n";
@@ -339,7 +340,7 @@ is($failed, undef);
     undef $@;
     my $line = <F>;
     my ($chrE4, $chrF6) = ("E4", "F6");
-    if (ord('A') == 193) { ($chrE4, $chrF6) = ("43", "EC"); } # EBCDIC
+    if ($::IS_EBCDIC) { ($chrE4, $chrF6) = ("43", "EC"); } # EBCDIC
     like( $@, qr/utf8 "\\x$chrE4" does not map to Unicode .+ <F> line 1/,
          "<:utf8 readline must warn about bad utf8");
     undef $@;
@@ -393,7 +394,7 @@ is($failed, undef);
 SKIP: {
     skip "no PerlIO::scalar on miniperl", 2, if is_miniperl();
     open my $fh, "<:raw",  \($buf = chr 255);
-    open my $uh, "<:utf8", \($uuf = "\xc4\x80");
+    open my $uh, "<:utf8", \($uuf = $U_100);
     for([$uh,chr 256], [$fh,chr 255]) {
        is getc $$_[0], $$_[1],
          'getc returning non-utf8 after utf8';
diff --git a/t/lib/croak/op b/t/lib/croak/op
index 5652beb..2d2887d 100644
--- a/t/lib/croak/op
+++ b/t/lib/croak/op
@@ -11,13 +11,14 @@ EXPECT
 Can't use global $! in "my" at - line 1, near "my $!"
 Execution of - aborted due to compilation errors.
 ########
-# NAME my $<latin1>
+# NAME my $<non-ASCII> doesn't output garbage
+# \xB6 is same character in all three EBCDIC pages and Latin1
 use open ":std", ":utf8";
-eval qq|my \$\xe9;|; # é in Latin-1
+eval qq|my \$\xb6;|; # ¶ in Latin-1, and EBCDIC 1047, 037, POSIX-BC
 print $@;
 exit 1;
 EXPECT
-Can't use global $é in "my" at (eval 1) line 1, near "my $é"
+Can't use global $¶ in "my" at (eval 1) line 1, near "my $¶"
 ########
 # NAME OP_HELEM fields
 package Foo;
diff --git a/t/lib/feature/bundle b/t/lib/feature/bundle
index 429e68e..a5a6784 100644
--- a/t/lib/feature/bundle
+++ b/t/lib/feature/bundle
@@ -9,20 +9,20 @@ Helloworld
 ########
 # Standard feature bundle, no 5.11
 use feature ":5.10";
-say ord uc chr 233;
+say utf8::native_to_unicode(ord uc chr utf8::unicode_to_native(233));
 EXPECT
 233
 ########
 # Standard feature bundle, 5.11
 use feature ":5.11";
-say ord uc chr 233;
+say utf8::native_to_unicode(ord uc chr utf8::unicode_to_native(233));
 EXPECT
 201
 ########
 # Standard feature bundle, 5.11
 use feature ":5.11";
 use utf8;
-say ord "\ué"; # this is utf8
+say utf8::native_to_unicode(ord "\ué"); # this is utf8
 EXPECT
 201
 ########
diff --git a/t/lib/feature/implicit b/t/lib/feature/implicit
index a741421..79f1bf8 100644
--- a/t/lib/feature/implicit
+++ b/t/lib/feature/implicit
@@ -107,18 +107,21 @@ b
 ########
 # Implicit unicode_string feature
 use v5.14;
-print 'ss' =~ /\xdf/i ? "ok\n" : "nok\n";
+my $sharp_s = chr utf8::unicode_to_native(0xdf);
+print 'ss' =~ /$sharp_s/i ? "ok\n" : "nok\n";
 use v5.8.8;
-print 'ss' =~ /\xdf/i ? "ok\n" : "nok\n";
+print 'ss' =~ /$sharp_s/i ? "ok\n" : "nok\n";
 EXPECT
 ok
 nok
 ########
 # Implicit unicode_eval feature
 use v5.15;
-print eval "use utf8; q|\xc5\xbf|" eq "\xc5\xbf" ? "ok\n" : "nok\n";
+require '../../t/charset_tools.pl';
+my $long_s = byte_utf8a_to_utf8n("\xc5\xbf");
+print eval "use utf8; q|$long_s|" eq $long_s ? "ok\n" : "nok\n";
 use v5.8.8;
-print eval "use utf8; q|\xc5\xbf|" eq "\x{17f}" ? "ok\n" : "nok\n";
+print eval "use utf8; q|$long_s|" eq "\x{17f}" ? "ok\n" : "nok\n";
 EXPECT
 ok
 ok
diff --git a/t/lib/warnings/op b/t/lib/warnings/op
index bb0eb9e..477fdad 100644
--- a/t/lib/warnings/op
+++ b/t/lib/warnings/op
@@ -764,10 +764,23 @@ Useless use of a constant (undef) in void context at - 
line 8.
 Useless use of a constant ("\"\t\n") in void context at - line 9.
 ########
 # op.c
+BEGIN {
+    if (ord('A') == 193) {
+        print "SKIPPED\n# Result varies depending on EBCDIC code page";
+        exit 0;
+    }
+}
 use utf8;
 use open qw( :utf8 :std );
 use warnings 'void' ;
 "àḆc"; # OP_CONST
+EXPECT
+Useless use of a constant ("\340\x{1e06}c") in void context at - line 11.
+########
+# op.c
+use utf8;
+use open qw( :utf8 :std );
+use warnings 'void' ;
 "Ẋ" . "ƴ"; # optimized to OP_CONST
 FOO;     # Bareword optimized to OP_CONST
 use constant ů => undef;
@@ -778,10 +791,9 @@ no warnings 'void' ;
 "àḆc"; # OP_CONST
 "Ẋ" . "ƴ"; # optimized to OP_CONST
 EXPECT
-Useless use of a constant ("\340\x{1e06}c") in void context at - line 5.
-Useless use of a constant ("\x{1e8a}\x{1b4}") in void context at - line 6.
-Useless use of a constant ("\x{ff26}\x{ff2f}\x{ff2f}") in void context at - 
line 7.
-Useless use of a constant (undef) in void context at - line 9.
+Useless use of a constant ("\x{1e8a}\x{1b4}") in void context at - line 5.
+Useless use of a constant ("\x{ff26}\x{ff2f}\x{ff2f}") in void context at - 
line 6.
+Useless use of a constant (undef) in void context at - line 8.
 ########
 # op.c
 #
@@ -1148,6 +1160,12 @@ Prototype after '@' for main::foo : @\0 at (eval 1) line 
1.
 Illegal character in prototype for main::foo : @\0 at (eval 1) line 1.
 ########
 # op.c
+BEGIN {
+    if (ord('A') == 193) {
+        print "SKIPPED\n# Different results on EBCDIC";
+        exit 0;
+    }
+}
 use utf8;
 use open qw( :utf8 :std );
 use warnings;
diff --git a/t/lib/warnings/regcomp b/t/lib/warnings/regcomp
index f62f5f1..b9943a0 100644
--- a/t/lib/warnings/regcomp
+++ b/t/lib/warnings/regcomp
@@ -21,6 +21,12 @@ Non-octal character '8'.  Resolved as "\o{123}" at - line 3.
 Non-octal character '8'.  Resolved as "\o{654}" at - line 4.
 ########
 # regcomp.c.c
+BEGIN {
+    if (ord('A') == 193) {
+        print "SKIPPED\n# Different results on EBCDIC";
+        exit 0;
+    }
+}
 use warnings;
 $a = qr/\c,/;
 $a = qr/[\c,]/;
@@ -28,5 +34,5 @@ no warnings 'syntax';
 $a = qr/\c,/;
 $a = qr/[\c,]/;
 EXPECT
-"\c," is more clearly written simply as "l" at - line 3.
-"\c," is more clearly written simply as "l" at - line 4.
+"\c," is more clearly written simply as "l" at - line 9.
+"\c," is more clearly written simply as "l" at - line 10.
diff --git a/t/lib/warnings/toke b/t/lib/warnings/toke
index 4e15f75..5d31104 100644
--- a/t/lib/warnings/toke
+++ b/t/lib/warnings/toke
@@ -1355,6 +1355,12 @@ syntax error at - line 3, near "=~ ?"
 Execution of - aborted due to compilation errors.
 ########
 # toke.c
+BEGIN {
+    if (ord('A') == 193) {
+        print "SKIPPED\n# result varies depending on which ebcdic platform";
+        exit 0;
+    }
+}
 use warnings;
 $a = "\c,";
 $a = "\c`";
@@ -1362,8 +1368,8 @@ no warnings 'syntax';
 $a = "\c,";
 $a = "\c`";
 EXPECT
-"\c," is more clearly written simply as "l" at - line 3.
-"\c`" is more clearly written simply as "\ " at - line 4.
+"\c," is more clearly written simply as "l" at - line 9.
+"\c`" is more clearly written simply as "\ " at - line 10.
 ########
 # toke.c
 BEGIN {
diff --git a/t/op/bop.t b/t/op/bop.t
index abcfc26..09f2be9 100644
--- a/t/op/bop.t
+++ b/t/op/bop.t
@@ -7,7 +7,7 @@
 BEGIN {
     chdir 't' if -d 't';
     @INC = '../lib';
-    require "./test.pl";
+    require "./test.pl"; require "./charset_tools.pl";
     require Config;
 }
 
@@ -63,19 +63,25 @@ is (($foo | $bar), ($Aoz x 75 . $zap));
 # ^ does not truncate
 is (($foo ^ $bar), ($Axz x 75 . $zap));
 
-# string constants
-sub _and($) { $_[0] & "+0" }
-sub _oar($) { $_[0] | "+0" }
-sub _xor($) { $_[0] ^ "+0" }
-is _and "waf", '# ',  'str var & const str'; # These three
-is _and  0,    '0',   'num var & const str';    # are from
-is _and "waf", '# ',  'str var & const str again'; # [perl #20661]
-is _oar "yit", '{yt', 'str var | const str';
-is _oar  0,    '0',   'num var | const str';
-is _oar "yit", '{yt', 'str var | const str again';
-is _xor "yit", 'RYt', 'str var ^ const str';
-is _xor  0,    '0',   'num var ^ const str';
-is _xor "yit", 'RYt', 'str var ^ const str again';
+# string constants.  These tests expect the bit patterns of these strings in
+# ASCII, so convert to that.
+sub _and($) { $_[0] & native_to_uni("+0") }
+sub _oar($) { $_[0] | native_to_uni("+0") }
+sub _xor($) { $_[0] ^ native_to_uni("+0") }
+is _and native_to_uni("waf"), native_to_uni('# '),  'str var & const str'; # 
[perl #20661]
+is _and native_to_uni("waf"), native_to_uni('# '),  'str var & const str 
again'; # [perl #20661]
+is _oar native_to_uni("yit"), native_to_uni('{yt'), 'str var | const str';
+is _oar native_to_uni("yit"), native_to_uni('{yt'), 'str var | const str 
again';
+is _xor native_to_uni("yit"), native_to_uni('RYt'), 'str var ^ const str';
+is _xor native_to_uni("yit"), native_to_uni('RYt'), 'str var ^ const str 
again';
+
+SKIP: {
+    skip "Converting a numeric doesn't work with EBCDIC unlike the above 
tests",
+         3 if $::IS_EBCDIC;
+    is _and  0, '0',   'num var & const str';     # [perl #20661]
+    is _oar  0, '0',   'num var | const str';
+    is _xor  0, '0',   'num var ^ const str';
+}
 
 # But don’t mistake a COW for a constant when assigning to it
 %h=(150=>1);
@@ -126,68 +132,54 @@ is (sprintf("%vd", $a), '248.444');
 # UTF8 ~ behaviour
 #
 
-my $Is_EBCDIC = (ord('A') == 193) ? 1 : 0;
+SKIP: {
+    skip "Complements exceed maximum representable on EBCDIC ", 5 if 
$::IS_EBCDIC;
 
-my @not36;
+    my @not36;
 
-for (0x100...0xFFF) {
-  $a = ~(chr $_);
-  if ($Is_EBCDIC) {
-      push @not36, sprintf("%#03X", $_)
-          if $a ne chr(~$_) or length($a) != 1;
-  }
-  else {
-      push @not36, sprintf("%#03X", $_)
-          if $a ne chr(~$_) or length($a) != 1 or ~$a ne chr($_);
-  }
-}
-is (join (', ', @not36), '');
+    for (0x100...0xFFF) {
+    $a = ~(chr $_);
+        push @not36, sprintf("%#03X", $_)
+            if $a ne chr(~$_) or length($a) != 1 or ~$a ne chr($_);
+    }
+    is (join (', ', @not36), '');
 
-my @not37;
+    my @not37;
 
-for my $i (0xEEE...0xF00) {
-  for my $j (0x0..0x120) {
-    $a = ~(chr ($i) . chr $j);
-    if ($Is_EBCDIC) {
-        push @not37, sprintf("%#03X %#03X", $i, $j)
-           if $a ne chr(~$i).chr(~$j) or
-              length($a) != 2;
-    }
-    else {
-        push @not37, sprintf("%#03X %#03X", $i, $j)
-           if $a ne chr(~$i).chr(~$j) or
-              length($a) != 2 or 
-               ~$a ne chr($i).chr($j);
+    for my $i (0xEEE...0xF00) {
+        for my $j (0x0..0x120) {
+            $a = ~(chr ($i) . chr $j);
+                push @not37, sprintf("%#03X %#03X", $i, $j)
+                    if $a ne chr(~$i).chr(~$j) or
+                    length($a) != 2 or
+                    ~$a ne chr($i).chr($j);
+        }
     }
-  }
-}
-is (join (', ', @not37), '');
+    is (join (', ', @not37), '');
 
-SKIP: {
-  skip "EBCDIC" if $Is_EBCDIC;
-  is (~chr(~0), "\0");
-}
+    is (~chr(~0), "\0");
 
 
-my @not39;
+    my @not39;
 
-for my $i (0x100..0x120) {
-    for my $j (0x100...0x120) {
-       push @not39, sprintf("%#03X %#03X", $i, $j)
-           if ~(chr($i)|chr($j)) ne (~chr($i)&~chr($j));
+    for my $i (0x100..0x120) {
+        for my $j (0x100...0x120) {
+            push @not39, sprintf("%#03X %#03X", $i, $j)
+                if ~(chr($i)|chr($j)) ne (~chr($i)&~chr($j));
+        }
     }
-}
-is (join (', ', @not39), '');
+    is (join (', ', @not39), '');
 
-my @not40;
+    my @not40;
 
-for my $i (0x100..0x120) {
-    for my $j (0x100...0x120) {
-       push @not40, sprintf("%#03X %#03X", $i, $j)
-           if ~(chr($i)&chr($j)) ne (~chr($i)|~chr($j));
+    for my $i (0x100..0x120) {
+        for my $j (0x100...0x120) {
+            push @not40, sprintf("%#03X %#03X", $i, $j)
+                if ~(chr($i)&chr($j)) ne (~chr($i)|~chr($j));
+        }
     }
+    is (join (', ', @not40), '');
 }
-is (join (', ', @not40), '');
 
 
 # More variations on 19 and 22.
@@ -441,7 +433,7 @@ SKIP: {
 # update to pp_complement() via Coverity
 SKIP: {
   # UTF-EBCDIC is limited to 0x7fffffff and can't encode ~0.
-  skip "EBCDIC" if $Is_EBCDIC;
+  skip "Complements exceed maximum representable on EBCDIC ", 2 if 
$::IS_EBCDIC;
 
   my $str = "\x{10000}\x{800}";
   # U+10000 is four bytes in UTF-8/UTF-EBCDIC.
@@ -484,7 +476,14 @@ SKIP: {
   is 22 &. 66, 22,     '&. with numbers';
   is 22 |. 66, 66,     '|. with numbers';
   is 22 ^. 66, "\4\4", '^. with numbers';
-  is ~.22, "\xcd\xcd", '~. with number';
+  if ($::IS_EBCDIC) {
+    # ord('2') is 0xF2 on EBCDIC
+    is ~.22, "\x0d\x0d", '~. with number';
+  }
+  else {
+    # ord('2') is 0x32 on ASCII
+    is ~.22, "\xcd\xcd", '~. with number';
+  }
   $_ = "22";
   is $_ &= "66", 2,  'numeric &= with strings';
   $_ = "22";
@@ -617,5 +616,7 @@ $strval = "z";
 is("$obj", "z", "|= doesn't break string overload");
 
 # [perl #29070]
-$^A .= new version ~$_ for "\xce", v205, "\xcc";
+$^A .= new version ~$_ for eval sprintf('"\\x%02x"', 0xff - ord("1")),
+                           $::IS_EBCDIC ? v13 : v205, # 255 - ord('2')
+                           eval sprintf('"\\x%02x"', 0xff - ord("3"));
 is $^A, "123", '~v0 clears vstring magic on retval';
diff --git a/t/op/chars.t b/t/op/chars.t
index 3fa9b8f..5eef677 100644
--- a/t/op/chars.t
+++ b/t/op/chars.t
@@ -76,15 +76,13 @@ $c = "\c^";
 is (ord($c), 30, '\c^');
 $c = "\c_";
 is (ord($c), 31, '\c_');
-$c = "\c?";
 
 # '\c?' is an outlier, and is treated differently on each platform.
 # It's DEL on ASCII, and APC on EBCDIC
-is (ord($c), ((ord('^') == 95 || ord('^') == 175) # 1047 or 0037
-               ? 255
-               : ord('^') == 106    # Posix-BC
-                 ? 95
-                 : 127),
+$c = "\c?";
+is (ord($c), ($::IS_ASCII)
+             ? 127
+             : utf8::unicode_to_native(0x9F),
               '\c?');
 $c = '';
 is (ord($c), 0, 'ord("") is 0');
diff --git a/t/op/chop.t b/t/op/chop.t
index 91c4fbe..bdeaf0d 100644
--- a/t/op/chop.t
+++ b/t/op/chop.t
@@ -6,7 +6,9 @@ BEGIN {
     require './test.pl'; require './charset_tools.pl';
 }
 
-plan tests => 148;
+my $tests_count = 148;
+$tests_count -= 2 if $::IS_EBCDIC;
+plan tests => $tests_count;
 
 $_ = 'abc';
 $c = foo();
@@ -183,7 +185,10 @@ ok($@ =~ /Can\'t modify.*chop.*in.*assignment/);
 eval 'chomp($x, $y) = (1, 2);';
 ok($@ =~ /Can\'t modify.*chom?p.*in.*assignment/);
 
-my @chars = ("N", uni_to_native("\xd3"), substr ("\xd4\x{100}", 0, 1), chr 
1296);
+my @chars = ("N",
+             uni_to_native("\xd3"),
+             substr (uni_to_native("\xd4") . "\x{100}", 0, 1),
+             chr 1296);
 foreach my $start (@chars) {
   foreach my $end (@chars) {
     local $/ = $end;
@@ -244,23 +249,31 @@ foreach my $start (@chars) {
     ok(1, "extend sp in pp_chomp");
 }
 
-{
+SKIP: {
     # [perl #73246] chop doesn't support utf8
     # the problem was UTF8_IS_START() didn't handle perl's extended UTF8
-    my $utf = "\x{80000001}\x{80000000}";
+    skip("Not representable in EBCDIC", 2) if $::IS_EBCDIC;
+
+    # We use hex constants instead of literal chars to avoid compilation
+    # errors in EBCDIC.
+    my $first_char =  0x80000001;
+    my $second_char = 0x80000000;
+    my $utf = chr($first_char) . chr($second_char);
     my $result = chop($utf);
-    is($utf, "\x{80000001}", "chopping high 'unicode'- remnant");
-    is($result, "\x{80000000}", "chopping high 'unicode' - result");
+    is($utf, chr $first_char, "chopping high 'unicode'- remnant");
+    is($result, chr $second_char, "chopping high 'unicode' - result");
 
     SKIP: {
         no warnings 'overflow'; # avoid compile-time warnings below on 32-bit 
architectures
         use Config;
         $Config{ivsize} >= 8
          or skip("this build can't handle very large characters", 2);
-        my $utf = "\x{ffffffffffffffff}\x{fffffffffffffffe}";
+        my $first_char =  0xffffffffffffffff;
+        my $second_char = 0xfffffffffffffffe;
+        my $utf = chr($first_char) . chr($second_char);
         my $result = chop $utf;
-        is($utf, "\x{ffffffffffffffff}", "chop even higher 'unicode' - 
remnant");
-        is($result, "\x{fffffffffffffffe}", "chop even higher 'unicode' - 
result");
+        is($utf, chr $first_char, "chop even higher 'unicode' - remnant");
+        is($result, chr $second_char, "chop even higher 'unicode' - result");
     }
 }
 
diff --git a/t/op/coreamp.t b/t/op/coreamp.t
index 9005306..bcc2f1f 100644
--- a/t/op/coreamp.t
+++ b/t/op/coreamp.t
@@ -10,7 +10,7 @@
 BEGIN {
     chdir 't' if -d 't';
     @INC = qw(. ../lib ../dist/if);
-    require "./test.pl";
+    require "./test.pl"; require './charset_tools.pl';
     $^P |= 0x100;
 }
 
@@ -462,7 +462,8 @@ test_proto $_ for qw(
 test_proto 'evalbytes';
 $tests += 4;
 {
-  chop(my $upgraded = "use utf8; '\xc4\x80'" . chr 256);
+  my $U_100_bytes = byte_utf8a_to_utf8n("\xc4\x80");
+  chop(my $upgraded = "use utf8; $U_100_bytes" . chr 256);
   is &myevalbytes($upgraded), chr 256, '&evalbytes';
   # Test hints
   require strict;
@@ -500,7 +501,7 @@ test_proto 'exp';
 test_proto 'fc';
 $tests += 2;
 {
-  my $sharp_s = "\xdf";
+  my $sharp_s = uni_to_native("\xdf");
   is &myfc($sharp_s), $sharp_s, '&fc, no unicode_strings';
   use feature 'unicode_strings';
   is &myfc($sharp_s), "ss", '&fc, unicode_strings';
@@ -632,12 +633,15 @@ close file;
 }
 
 test_proto 'opendir';
-test_proto 'ord', chr(64), 64;
+test_proto 'ord', chr(utf8::unicode_to_native(64)), 
utf8::unicode_to_native(64);
 
 test_proto 'pack';
 $tests += 2;
-is &mypack("H*", '5065726c'), 'Perl', '&pack';
-lis [&mypack("H*", '5065726c')], ['Perl'], '&pack in list context';
+my $Perl_as_a_hex_string = join "", map
+                                    { sprintf("%2X", 
utf8::unicode_to_native($_)) }
+                                    0x50, 0x65, 0x72, 0x6c;
+is &mypack("H*", $Perl_as_a_hex_string), 'Perl', '&pack';
+lis [&mypack("H*", $Perl_as_a_hex_string)], ['Perl'], '&pack in list context';
 
 test_proto 'pipe';
 
@@ -949,9 +953,15 @@ undef @_;
 
 test_proto 'unpack';
 $tests += 2;
+my $abcd_as_a_hex_string = join "", map
+                                    { sprintf("%2X", 
utf8::unicode_to_native($_)) }
+                                    0x61, 0x62, 0x63, 0x64;
+my $bcde_as_a_hex_string = join "", map
+                                    { sprintf("%2X", 
utf8::unicode_to_native($_)) }
+                                    0x62, 0x63, 0x64, 0x65;
 $_ = 'abcd';
-is &myunpack("H*"), '61626364', '&unpack with one arg';
-is &myunpack("H*", "bcde"), '62636465', '&unpack with two arg';
+is &myunpack("H*"), $abcd_as_a_hex_string, '&unpack with one arg';
+is &myunpack("H*", "bcde"), $bcde_as_a_hex_string, '&unpack with two arg';
 
 
 test_proto 'untie'; # behaviour already tested along with tie(d)
diff --git a/t/op/evalbytes.t b/t/op/evalbytes.t
index 9961072..cca7c04 100644
--- a/t/op/evalbytes.t
+++ b/t/op/evalbytes.t
@@ -3,7 +3,7 @@
 BEGIN {
     chdir 't' if -d 't';
     @INC = '../lib';
-    require './test.pl';
+    require './test.pl'; require './charset_tools.pl';
 }
 
 plan(tests => 8);
@@ -27,8 +27,9 @@ is evalbytes($upcode), "\xff\xfe", 'evalbytes on upgraded 
extra-ASCII';
     use utf8;
     is evalbytes($code), "\xff\xfe", 'evalbytes ignores outer utf8 pragma';
 }
-is evalbytes "use utf8; '\xc4\x80'", chr 256, 'use utf8 within evalbytes';
-chop($upcode = "use utf8; '\xc4\x80'" . chr 256);
+my $U_100 = byte_utf8a_to_utf8n("\xc4\x80");
+is evalbytes "use utf8; $U_100", chr 256, 'use utf8 within evalbytes';
+chop($upcode = "use utf8; $U_100" . chr 256);
 is evalbytes $upcode, chr 256, 'use utf8 within evalbytes on utf8 string';
 eval { evalbytes chr 256 };
 like $@, qr/Wide character/, 'evalbytes croaks on non-bytes';
diff --git a/t/op/goto.t b/t/op/goto.t
index ccee71b..ca48ac0 100644
--- a/t/op/goto.t
+++ b/t/op/goto.t
@@ -5,7 +5,7 @@
 BEGIN {
     chdir 't' if -d 't';
     @INC = qw(. ../lib);
-    require "./test.pl";
+    require "./test.pl"; require './charset_tools.pl';
 }
 
 use warnings;
@@ -473,7 +473,7 @@ is sub {
     goto &returnarg;
 }->("quick and easy"), "ick and queasy",
   'goto &foo with *_{ARRAY} replaced';
-my @__ = "\xc4\x80";
+my @__ = byte_utf8a_to_utf8n("\xc4\x80");
 sub { local *_ = \@__; goto &utf8::decode }->("no thinking aloud");
 is "@__", chr 256, 'goto &xsub with replaced *_{ARRAY}';
 
diff --git a/t/op/inc.t b/t/op/inc.t
index a563d70..517fa2d 100644
--- a/t/op/inc.t
+++ b/t/op/inc.t
@@ -320,9 +320,16 @@ is($_, -1, 'regexp--');
     is($_, 1, 'regexp++');
 }
 
-$_ = v97;
-$_++;
-isnt(ref\$_, 'VSTRING', '++ flattens vstrings');
+if ($::IS_EBCDIC) {
+    $_ = v129;
+    $_++;
+    isnt(ref\$_, 'VSTRING', '++ flattens vstrings');
+}
+else {
+    $_ = v97;
+    $_++;
+    isnt(ref\$_, 'VSTRING', '++ flattens vstrings');
+}
 
 sub TIESCALAR {bless\my $x}
 sub STORE { ++$store::called }
diff --git a/t/op/lc.t b/t/op/lc.t
index 716cb2a..ffea0ae 100644
--- a/t/op/lc.t
+++ b/t/op/lc.t
@@ -104,17 +104,17 @@ is(uc($b)         , "\x{100}\x{100}AA",  'uc');
 is(lc($b)         , "\x{101}\x{101}aa",  'lc');
 is(fc($b)         , "\x{101}\x{101}aa",  'fc');
 
+my $sharp_s = uni_to_native("\x{DF}");
 # \x{DF} is LATIN SMALL LETTER SHARP S, its uppercase is SS or \x{53}\x{53};
 # \x{149} is LATIN SMALL LETTER N PRECEDED BY APOSTROPHE, its uppercase is
 # \x{2BC}\x{E4} or MODIFIER LETTER APOSTROPHE and N.
 
-is(uni_to_native("\U\x{DF}aB\x{149}cD"), uni_to_native("SSAB\x{2BC}NCD"),
-       "multicharacter uppercase");
+is("\U${sharp_s}aB\x{149}cD", "SSAB\x{2BC}NCD", "multicharacter uppercase");
 
 # The \x{DF} is its own lowercase, ditto for \x{149}.
 # There are no single character -> multiple characters lowercase mappings.
 
-is(uni_to_native("\L\x{DF}aB\x{149}cD"), uni_to_native("\x{DF}ab\x{149}cd"),
+is("\L${sharp_s}aB\x{149}cD", "${sharp_s}ab\x{149}cd",
        "multicharacter lowercase");
 
 # \x{DF} is LATIN SMALL LETTER SHARP S, its foldcase is ss or \x{73}\x{73};
@@ -122,8 +122,7 @@ is(uni_to_native("\L\x{DF}aB\x{149}cD"), 
uni_to_native("\x{DF}ab\x{149}cd"),
 # \x{2BC}\x{6E} or MODIFIER LETTER APOSTROPHE and n.
 # Note that is this further tested in t/uni/fold.t
 
-is(uni_to_native("\F\x{DF}aB\x{149}cD"), uni_to_native("ssab\x{2BC}ncd"),
-       "multicharacter foldcase");
+is("\F${sharp_s}aB\x{149}cD", "ssab\x{2BC}ncd", "multicharacter foldcase");
 
 
 # titlecase is used for \u / ucfirst.
@@ -283,15 +282,15 @@ for ("$temp") {
 }
 
 # new in Unicode 5.1.0
-is(lc("\x{1E9E}"), "\x{df}", "lc(LATIN CAPITAL LETTER SHARP S)");
+is(lc("\x{1E9E}"), uni_to_native("\x{df}"), "lc(LATIN CAPITAL LETTER SHARP 
S)");
 
 {
     use feature 'unicode_strings';
     use bytes;
-    is(lc("\xc0"), "\xc0", "lc of above-ASCII Latin1 is itself under use 
bytes");
-    is(lcfirst("\xc0"), "\xc0", "lcfirst of above-ASCII Latin1 is itself under 
use bytes");
-    is(uc("\xe0"), "\xe0", "uc of above-ASCII Latin1 is itself under use 
bytes");
-    is(ucfirst("\xe0"), "\xe0", "ucfirst of above-ASCII Latin1 is itself under 
use bytes");
+    is(lc(uni_to_native("\xc0")), uni_to_native("\xc0"), "lc of above-ASCII 
Latin1 is itself under use bytes");
+    is(lcfirst(uni_to_native("\xc0")), uni_to_native("\xc0"), "lcfirst of 
above-ASCII Latin1 is itself under use bytes");
+    is(uc(uni_to_native("\xe0")), uni_to_native("\xe0"), "uc of above-ASCII 
Latin1 is itself under use bytes");
+    is(ucfirst(uni_to_native("\xe0")), uni_to_native("\xe0"), "ucfirst of 
above-ASCII Latin1 is itself under use bytes");
 }
 
 # Brought up in ticket #117855: Constant folding applied to uc() should use
diff --git a/t/op/magic.t b/t/op/magic.t
index 3fbec7b..4a8006d 100644
--- a/t/op/magic.t
+++ b/t/op/magic.t
@@ -26,7 +26,7 @@ BEGIN {
        # avoid using any global vars here:
        if ($v =~ s/^\^(?=.)//) {
            for(substr $v, 0, 1) {
-               $_ = chr ord() - 64;
+               $_ = chr(utf8::native_to_unicode(ord($_)) - 64);
            }
        }
        SKIP:
diff --git a/t/op/override.t b/t/op/override.t
index ead2606..ff43571 100644
--- a/t/op/override.t
+++ b/t/op/override.t
@@ -184,7 +184,7 @@ like runperl(prog => 'use constant foo=>1; '
 
 is runperl(prog => 'use constant t=>42; '
                   .'BEGIN { *{q|CORE::GLOBAL::time|} = \&{q|t|};1}'
-                  .'print time, chr 10',
+                  .'print time, chr utf8::unicode_to_native(10)',
           stderr => 1),
    "42\n",
    'keywords respect global constant overrides';
diff --git a/t/op/print.t b/t/op/print.t
index 93aa94a..15f880f 100644
--- a/t/op/print.t
+++ b/t/op/print.t
@@ -13,6 +13,7 @@ fresh_perl_is('$_ = qq{OK\n}; print STDOUT;', "OK\n", {},
               'print with only a filehandle outputs $_');
 SKIP: {
     skip_if_miniperl('no dynamic loading of PerlIO::scalar in miniperl');
+    skip("EBCDIC") if $::IS_EBCDIC;    # Varies depending on code page
 fresh_perl_is(<<'EOF', "\xC1\xAF\xC1\xAF\xC1\xB0\xC1\xB3", {}, "print doesn't 
launder utf8 overlongs");
 use strict;
 use warnings;
diff --git a/t/op/quotemeta.t b/t/op/quotemeta.t
index 7f5705d..9ba3f09 100644
--- a/t/op/quotemeta.t
+++ b/t/op/quotemeta.t
@@ -19,7 +19,7 @@ if ($Config{ebcdic} eq 'define') {
     # 104 non-backslash characters
     is(tr/\\//cd, 104, "tr count non-backslashed");
 } else { # some ASCII descendant, then.
-    $_ = join "", map chr($_), 32..127;
+    $_ = join "", map chr(utf8::unicode_to_native($_)), 32..127;
 
     # 96 characters - 52 letters - 10 digits - 1 underscore = 33 backslashes
     # 96 characters + 33 backslashes = 129 characters
@@ -64,17 +64,20 @@ utf8::upgrade($char);
 is(quotemeta($char), "\\$char", "quotemeta '\\N{U+D7}' in UTF-8");
 is(length(quotemeta($char)), 2, "quotemeta '\\N{U+D7}'  in UTF-8 length");
 
-$char = "\N{U+D8}";
+$char = "\N{U+DF}";
 utf8::upgrade($char);
-is(quotemeta($char), "$char", "quotemeta '\\N{U+D8}' in UTF-8");
-is(length(quotemeta($char)), 1, "quotemeta '\\N{U+D8}'  in UTF-8 length");
+is(quotemeta($char), "$char", "quotemeta '\\N{U+DF}' in UTF-8");
+is(length(quotemeta($char)), 1, "quotemeta '\\N{U+DF}'  in UTF-8 length");
 
 {
     no feature 'unicode_strings';
-    is(quotemeta("\x{d7}"), "\\\x{d7}", "quotemeta Latin1 no unicode_strings 
quoted");
-    is(length(quotemeta("\x{d7}")), 2, "quotemeta Latin1 no unicode_strings 
quoted length");
-    is(quotemeta("\x{d8}"), "\\\x{d8}", "quotemeta Latin1 no unicode_strings 
quoted");
-    is(length(quotemeta("\x{d8}")), 2, "quotemeta Latin1 no unicode_strings 
quoted length");
+
+    # BF is chosen because it is NOt alphanumeric in both Latin1 and EBCDIC
+    # DF is chosen because it IS alphanumeric in both Latin1 and EBCDIC
+    is(quotemeta("\x{bf}"), "\\\x{bf}", "quotemeta Latin1 no unicode_strings 
quoted");
+    is(length(quotemeta("\x{bf}")), 2, "quotemeta Latin1 no unicode_strings 
quoted length");
+    is(quotemeta("\x{df}"), "\\\x{df}", "quotemeta Latin1 no unicode_strings 
quoted");
+    is(length(quotemeta("\x{df}")), 2, "quotemeta Latin1 no unicode_strings 
quoted length");
 
   SKIP: {
     skip 'No locale testing without d_setlocale', 8 if(!$Config{d_setlocale});
@@ -89,21 +92,21 @@ is(length(quotemeta($char)), 1, "quotemeta '\\N{U+D8}'  in 
UTF-8 length");
     is(quotemeta($char), "$char", "quotemeta '$char' locale");
     is(length(quotemeta($char)), 1, "quotemeta '$char' locale");
 
-    my $char = "\x{D7}";
-    is(quotemeta($char), "\\$char", "quotemeta '\\x{D7}' locale");
-    is(length(quotemeta($char)), 2, "quotemeta '\\x{D7}' locale length");
+    my $char = "\x{BF}";
+    is(quotemeta($char), "\\$char", "quotemeta '\\x{BF}' locale");
+    is(length(quotemeta($char)), 2, "quotemeta '\\x{BF}' locale length");
 
-    $char = "\x{D8}";  # Every non-ASCII Latin1 is quoted in locale.
-    is(quotemeta($char), "\\$char", "quotemeta '\\x{D8}' locale");
-    is(length(quotemeta($char)), 2, "quotemeta '\\x{D8}' locale length");
+    $char = "\x{DF}";  # Every non-ASCII Latin1 is quoted in locale.
+    is(quotemeta($char), "\\$char", "quotemeta '\\x{DF}' locale");
+    is(length(quotemeta($char)), 2, "quotemeta '\\x{DF}' locale length");
     }
 }
 {
     use feature 'unicode_strings';
-    is(quotemeta("\x{d7}"), "\\\x{d7}", "quotemeta Latin1 unicode_strings 
quoted");
-    is(length(quotemeta("\x{d7}")), 2, "quotemeta Latin1 unicode_strings 
quoted length");
-    is(quotemeta("\x{d8}"), "\x{d8}", "quotemeta Latin1 unicode_strings 
nonquoted");
-    is(length(quotemeta("\x{d8}")), 1, "quotemeta Latin1 unicode_strings 
nonquoted length");
+    is(quotemeta("\x{bf}"), "\\\x{bf}", "quotemeta Latin1 unicode_strings 
quoted");
+    is(length(quotemeta("\x{bf}")), 2, "quotemeta Latin1 unicode_strings 
quoted length");
+    is(quotemeta("\x{df}"), "\x{df}", "quotemeta Latin1 unicode_strings 
nonquoted");
+    is(length(quotemeta("\x{df}")), 1, "quotemeta Latin1 unicode_strings 
nonquoted length");
 
   SKIP: {
     skip 'No locale testing without d_setlocale', 12 if(!$Config{d_setlocale});
@@ -124,10 +127,10 @@ is(length(quotemeta($char)), 1, "quotemeta '\\N{U+D8}'  
in UTF-8 length");
     is(quotemeta($char), "\\$char", "quotemeta '\\N{U+D7}' locale in UTF-8");
     is(length(quotemeta($char)), 2, "quotemeta '\\N{U+D7}' locale in UTF-8 
length");
 
-    $char = "\N{U+D8}";  # Every non-ASCII Latin1 is quoted in locale.
+    $char = "\N{U+DF}";  # Every non-ASCII Latin1 is quoted in locale.
     utf8::upgrade($char);
-    is(quotemeta($char), "\\$char", "quotemeta '\\N{U+D8}' locale in UTF-8");
-    is(length(quotemeta($char)), 2, "quotemeta '\\N{U+D8}' locale in UTF-8 
length");
+    is(quotemeta($char), "\\$char", "quotemeta '\\N{U+DF}' locale in UTF-8");
+    is(length(quotemeta($char)), 2, "quotemeta '\\N{U+DF}' locale in UTF-8 
length");
 
     is(quotemeta("\x{263a}"), "\\\x{263a}", "quotemeta locale Unicode quoted");
     is(length(quotemeta("\x{263a}")), 2, "quotemeta locale Unicode quoted 
length");
diff --git a/t/op/split.t b/t/op/split.t
index 5d5c19d..50579bf 100644
--- a/t/op/split.t
+++ b/t/op/split.t
@@ -375,7 +375,7 @@ is($cnt, scalar(@ary));
 
 {
     # LATIN SMALL LETTER A WITH DIAERESIS, CYRILLIC SMALL LETTER I
-    for my $pattern ("\x{e4}", "\x{0437}") {
+    for my $pattern ("\N{U+E4}", "\x{0437}") {
         utf8::upgrade $pattern;
         my @res;
         for my $str ("a${pattern}b", "axb", "a${pattern}b") {
@@ -492,14 +492,16 @@ is($cnt, scalar(@ary));
     my @results;
     my $expr;
     $expr = ' a b c ';
-    @results = split "\x20", $expr;
+    @results = split "\x20", $expr if $::IS_ASCII;
+    @results = split "\x40", $expr if $::IS_EBCDIC;
     is @results, 3,
         "RT #116086: split on string of single hex-20: captured 3 elements";
     is $results[0], 'a',
         "RT #116086: split on string of single hex-20: first element is 
non-empty";
 
     $expr = " a \tb c ";
-    @results = split "\x20", $expr;
+    @results = split "\x20", $expr if $::IS_ASCII;
+    @results = split "\x40", $expr if $::IS_EBCDIC;
     is @results, 3,
         "RT #116086: split on string of single hex-20: captured 3 elements";
     is $results[0], 'a',
diff --git a/t/op/sprintf.t b/t/op/sprintf.t
index f534a86..967b5d3 100644
--- a/t/op/sprintf.t
+++ b/t/op/sprintf.t
@@ -32,8 +32,18 @@ if ($^O eq 'VMS') {
 # No %Config.
 my $Is_Ultrix_VAX = $^O eq 'ultrix' && `uname -m` =~ /^VAX$/;
 
+our $IS_EBCDIC = $::IS_EBCDIC;  # Solely to avoid the 'used once' warning
+our $IS_ASCII = $::IS_ASCII;   # Solely to avoid the 'used once' warning
+
 while (<DATA>) {
-    s/^\s*>//; s/<\s*$//;
+    s/<\s*$//;
+
+    # An initial 'a' or 'e' marks the test as being only for ASCII or EBCDIC
+    # platforms respectively.
+    s/^\s* ( [ae] )? >//x;
+    next if defined $1 && $1 eq 'a' && $::IS_EBCDIC;
+    next if defined $1 && $1 eq 'e' && $::IS_ASCII;
+
     ($template, $data, $result, $comment) = split(/<\s*>/, $_, 4);
     if ($^O eq 'os390' || $^O eq 's390') { # non-IEEE (s390 is UTS)
         $data   =~ s/([eE])96$/${1}63/;      # smaller exponents
@@ -713,10 +723,17 @@ __END__
 >%*2147483647$v2d<     >''<    > MISSING<
 >%.3X<         >[11]<                  >00B<           >perl #83194: hex, 
 >zero-padded to 3 places<
 >%.*X<         >[3, 11]<               >00B<           >perl #83194: dynamic 
 >precision<
->%vX<          >['012']<               >30.31.32<      >perl #83194: vector 
flag<
->%*vX<         >[':', '012']<          >30:31:32<      >perl #83194: vector 
flag + custom separator<
->%v.3X<                >['012']<               >030.031.032<   >perl #83194: 
vector flag + static precision<
->%v.*X<                >[3, '012']<            >030.031.032<   >perl #83194: 
vector flag + dynamic precision<
->%*v.3X<       >[':', '012']<          >030:031:032<   >perl #83194: vector 
flag + custom separator + static precision<
->%*v.*X<       >[':', 3, '012']<       >030:031:032<   >perl #83194: vector 
flag + custom separator + dynamic precision<
->%vd<  >"version"<     >118.101.114.115.105.111.110<   >perl #102586: vector 
flag + "version"<
+a>%vX<         >['012']<               >30.31.32<      >perl #83194: vector 
flag<
+e>%vX<         >['012']<               >F0.F1.F2<      >perl #83194: vector 
flag<
+a>%*vX<                >[':', '012']<          >30:31:32<      >perl #83194: 
vector flag + custom separator<
+e>%*vX<                >[':', '012']<          >F0:F1:F2<      >perl #83194: 
vector flag + custom separator<
+a>%v.3X<               >['012']<               >030.031.032<   >perl #83194: 
vector flag + static precision<
+e>%v.3X<               >['012']<               >0F0.0F1.0F2<   >perl #83194: 
vector flag + static precision<
+a>%v.*X<               >[3, '012']<            >030.031.032<   >perl #83194: 
vector flag + dynamic precision<
+e>%v.*X<               >[3, '012']<            >0F0.0F1.0F2<   >perl #83194: 
vector flag + dynamic precision<
+a>%*v.3X<      >[':', '012']<          >030:031:032<   >perl #83194: vector 
flag + custom separator + static precision<
+e>%*v.3X<      >[':', '012']<          >0F0:0F1:0F2<   >perl #83194: vector 
flag + custom separator + static precision<
+a>%*v.*X<      >[':', 3, '012']<       >030:031:032<   >perl #83194: vector 
flag + custom separator + dynamic precision<
+e>%*v.*X<      >[':', 3, '012']<       >0F0:0F1:0F2<   >perl #83194: vector 
flag + custom separator + dynamic precision<
+a>%vd< >"version"<     >118.101.114.115.105.111.110<   >perl #102586: vector 
flag + "version"<
+e>%vd<   >"version"<    >165.133.153.162.137.150.149<   >perl #102586: vector 
flag + "version"<
diff --git a/t/op/tie.t b/t/op/tie.t
index 42e7bba..23a9b6f 100644
--- a/t/op/tie.t
+++ b/t/op/tie.t
@@ -576,7 +576,11 @@ print $h.$h;
 EXPECT
 01
 ########
+# SKIP ? $IS_EBCDIC
+# skipped on EBCDIC because "2" | "8" is 0xFA (not COLON as it is on ASCII),
+# which isn't representable in this file's UTF-8 encoding.
 # Bug 53482 (and maybe others)
+
 sub TIESCALAR { my $foo = $_[1]; bless \$foo, $_[0] }
 sub FETCH { ${$_[0]} }
 tie my $x1, "main", 2;
@@ -1417,6 +1421,9 @@ EXPECT
 main
 ok
 ########
+# SKIP ? $::IS_EBCDIC
+# skipped on EBCDIC because different from ASCII and results vary depending on
+# code page
 
 # &xsub and goto &xsub with tied @_
 use Tie::Array;
diff --git a/t/op/utf8magic.t b/t/op/utf8magic.t
index 55e921d..c071664 100644
--- a/t/op/utf8magic.t
+++ b/t/op/utf8magic.t
@@ -3,7 +3,7 @@
 BEGIN {
     chdir 't' if -d 't';
     @INC = '../lib';
-    require './test.pl';
+    require './test.pl'; require './charset_tools.pl';
 }
 
 plan tests => 6;
@@ -33,6 +33,6 @@ $str2 = "b";
 utf8::encode $str2;
 is $::stored, "a", 'utf8::encode respects get-magic on POK scalars';
 
-tie $str2, "", "\xc4\x80";
+tie $str2, "", byte_utf8a_to_utf8n("\xc4\x80");
 utf8::decode $str2;
 is $::stored, "\x{100}", 'utf8::decode respects set-magic';
diff --git a/t/op/utfhash.t b/t/op/utfhash.t
index 2f1b688..5200c5b 100644
--- a/t/op/utfhash.t
+++ b/t/op/utfhash.t
@@ -176,13 +176,10 @@ foreach ("\x7f","\xff")
 
 {
     local $/; # Slurp.
-    my $utf8      = <DATA>;
-    my $utfebcdic = <DATA>;
-    if (ord('A') == 65) {
-       eval $utf8;
-    } elsif (ord('A') == 193) {
-       eval $utfebcdic;
-    }
+    my $data = <DATA>;
+    my ($utf8, $utf1047ebcdic) = split /__SPLIT__/, $data;
+    $utf8 = $utf1047ebcdic if $::IS_EBCDIC;
+    eval $utf8;
 }
 __END__
 {
@@ -203,8 +200,8 @@ __END__
     ok !utf8::is_utf8($key), "'$key' shouldn't have utf8 flag";
   }
 }
-__END__
-{
+__SPLIT__
+{   # This is 1047 UTF-EBCDIC; won't work on other code pages.
   # See if utf8 barewords work [perl #22969]
   use utf8; # UTF-EBCDIC, really.
   my %hash = (½ää½âÀ½äâ½ää => 123);
diff --git a/t/op/ver.t b/t/op/ver.t
index 1496725..e896e9e 100644
--- a/t/op/ver.t
+++ b/t/op/ver.t
@@ -4,7 +4,7 @@ BEGIN {
     chdir 't' if -d 't';
     @INC = qw(. ../lib);
     $SIG{'__WARN__'} = sub { warn $_[0] if $DOWARN };
-    require "./test.pl";
+    require "./test.pl"; require "./charset_tools.pl";
 }
 
 $DOWARN = 1; # enable run-time warnings now
@@ -175,7 +175,7 @@ is(sprintf("%vd", join("", map { chr }
 my $vs = v1.20.300.4000;
 is($vs,"\x{1}\x{14}\x{12c}\x{fa0}","v-string ne \\x{}");
 is($vs,chr(1).chr(20).chr(300).chr(4000),"v-string ne chr()");
-is('foo',((chr(193) eq 'A') ? v134.150.150 : v102.111.111),"v-string ne ''");
+is('foo',($::IS_EBCDIC ? v134.150.150 : v102.111.111),"v-string ne ''");
 
 # Chapter 15, pp403
 
@@ -225,7 +225,7 @@ ok( abs($v - $]) < 10**-8 , "\$^V == \$] (numeric)" );
 
 SKIP: {
   skip("In EBCDIC the v-string components cannot exceed 2147483647", 6)
-    if ord "A" == 193;
+    if $::IS_EBCDIC;
 
   # [ID 20010902.001] check if v-strings handle full UV range or not
   if ( $Config{'uvsize'} >= 4 ) {
@@ -270,10 +270,10 @@ ok( exists $h{chr(65).chr(66).chr(67)}, "v-stringness is 
engaged for X.Y.Z" );
     is $|, 1, 'clobbering vstrings does not clobber all magic';
 }
 
-$a = v102; $a =~ s/f/f/;
+$a = $::IS_EBCDIC ? v134 : v102; $a =~ s/f/f/;
 is ref \$a, 'SCALAR',
   's/// flattens vstrings even when the subst results in the same value';
-$a = v102; $a =~ y/f/g/;
+$a = $::IS_EBCDIC ? v134 : v102; $a =~ y/f/g/;
 is ref \$a, 'SCALAR', 'y/// flattens vstrings';
 
 sub { $_[0] = v3;
diff --git a/t/op/warn.t b/t/op/warn.t
index 4d679c2..42b88f8 100644
--- a/t/op/warn.t
+++ b/t/op/warn.t
@@ -3,7 +3,7 @@
 
 BEGIN {
     chdir 't' if -d 't';
-    require './test.pl';
+    require './test.pl'; require './charset_tools.pl';
     set_up_inc('../lib');
 }
**** PATCH TRUNCATED AT 2000 LINES -- 1550 NOT SHOWN ****

--
Perl5 Master Repository

Reply via email to