[trafficserver] 02/03: Revert "Use optionally provided hash string for hashing"

2020-03-20 Thread zwoop
This is an automated email from the ASF dual-hosted git repository.

zwoop pushed a commit to branch 8.1.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git

commit b69a87fe446a58acc3a6c9ed8f63ce1c0aff35c8
Author: Leif Hedstrom 
AuthorDate: Fri Mar 20 11:00:35 2020 -0600

Revert "Use optionally provided hash string for hashing"

This reverts commit 1df4a8d3e58e9a1b4050501d52d7bbfdecf27850.

The requirements for pulling this one is are large, and we
would need to pull in incompatible changes (breaking upgrading
from 8.0.0 -> 8.1.0). Lets hold off on this for now.
---
 doc/admin-guide/files/parent.config.en.rst |  8 ---
 proxy/ParentSelection.cc   | 89 ++
 proxy/ParentSelection.h|  1 -
 3 files changed, 18 insertions(+), 80 deletions(-)

diff --git a/doc/admin-guide/files/parent.config.en.rst 
b/doc/admin-guide/files/parent.config.en.rst
index ca54825..3a26e95 100644
--- a/doc/admin-guide/files/parent.config.en.rst
+++ b/doc/admin-guide/files/parent.config.en.rst
@@ -141,14 +141,6 @@ The following list shows the possible actions and their 
allowed values.
 
 parent="p1.x.com:8080|2.0, 192.168.0.3:80|3.0, 192.168.0.4:80|5.0"
 
-If ``round_robin`` is set to ``consistent_hash``, you may add a ``unique 
hash string``
-following the ``weight`` for each parent.  The ``hash string`` must start 
with ``&``
- and is used to build both the primary and secondary rings using the 
``hash string``
-for each parent insted of the parents ``hostname`` or ``ip address``. This 
can be
-useful so that two different hosts may be used to cache the same requests. 
 Example::
-
-parent="p1.x.com:80|1.0, p2.x.com:80|1.0, 
p3.x.com:80|1.0" round_robin=consistent_hash
-
 .. _parent-config-format-secondary-parent:
 
 ``secondary_parent``
diff --git a/proxy/ParentSelection.cc b/proxy/ParentSelection.cc
index 692cf6a..496146d 100644
--- a/proxy/ParentSelection.cc
+++ b/proxy/ParentSelection.cc
@@ -414,7 +414,7 @@ ParentRecord::ProcessParents(char *val, bool isPrimary)
   int numTok  = 0;
   const char *current = nullptr;
   int port= 0;
-  char *tmp = nullptr, *tmp2 = nullptr, *tmp3 = nullptr;
+  char *tmp = nullptr, *tmp2 = nullptr;
   const char *errPtr = nullptr;
   float weight   = 1.0;
 
@@ -467,27 +467,23 @@ ParentRecord::ProcessParents(char *val, bool isPrimary)
   }
 }
 
-tmp3 = (char *)strchr(current, '&');
-
 // Make sure that is no garbage beyond the parent
-//  port or weight
-if (!tmp3) {
-  char *scan;
-  if (tmp2) {
-scan = tmp2 + 1;
-  } else {
-scan = tmp + 1;
-  }
-  for (; *scan != '\0' && (ParseRules::is_digit(*scan) || *scan == '.'); 
scan++) {
-;
-  }
-  for (; *scan != '\0' && ParseRules::is_wslfcr(*scan); scan++) {
-;
-  }
-  if (*scan != '\0') {
-errPtr = "Garbage trailing entry or invalid separator";
-goto MERROR;
-  }
+//   port or weight
+char *scan;
+if (tmp2) {
+  scan = tmp2 + 1;
+} else {
+  scan = tmp + 1;
+}
+for (; *scan != '\0' && (ParseRules::is_digit(*scan) || *scan == '.'); 
scan++) {
+  ;
+}
+for (; *scan != '\0' && ParseRules::is_wslfcr(*scan); scan++) {
+  ;
+}
+if (*scan != '\0') {
+  errPtr = "Garbage trailing entry or invalid separator";
+  goto MERROR;
 }
 // Check to make sure that the string will fit in the
 //  pRecord
@@ -510,10 +506,6 @@ ParentRecord::ProcessParents(char *val, bool isPrimary)
   this->parents[i].name= this->parents[i].hostname;
   this->parents[i].available   = true;
   this->parents[i].weight  = weight;
-  if (tmp3) {
-memcpy(this->parents[i].hash_string, tmp3 + 1, strlen(tmp3));
-this->parents[i].name = this->parents[i].hash_string;
-  }
   hs.createHostStat(this->parents[i].hostname);
 } else {
   memcpy(this->secondary_parents[i].hostname, current, tmp - current);
@@ -526,13 +518,8 @@ ParentRecord::ProcessParents(char *val, bool isPrimary)
   this->secondary_parents[i].name= 
this->secondary_parents[i].hostname;
   this->secondary_parents[i].available   = true;
   this->secondary_parents[i].weight  = weight;
-  if (tmp3) {
-memcpy(this->secondary_parents[i].hash_string, tmp3 + 1, strlen(tmp3));
-this->secondary_parents[i].name = 
this->secondary_parents[i].hash_string;
-  }
   hs.createHostStat(this->secondary_parents[i].hostname);
 }
-tmp3 = nullptr;
   }
 
   if (isPrimary) {
@@ -817,7 +804,7 @@ ParentRecord::Print()
 {
   printf("\t\t");
   for (int i = 0; i < num_parents; i++) {
-printf(" %s:%d|%f&%s ", parents[i].hostname, parents[i].port, 
parents[i].weight, parents[i].name);
+printf(" %s:%d ", 

[trafficserver] branch 8.1.x updated (f18e6f4 -> 67691ca)

2020-03-20 Thread zwoop
This is an automated email from the ASF dual-hosted git repository.

zwoop pushed a change to branch 8.1.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git.


from f18e6f4  fix crash in CacheVC::openReadFromWriter
 new 1f14f7d  Revert "proxy.config.http.connect_attempts_timeout tracks 
TTBF instead of connect"
 new b69a87f  Revert "Use optionally provided hash string for hashing"
 new 67691ca  Fixes Log build issues from a31bdd5e50fcaa cherry-pick

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 doc/admin-guide/files/parent.config.en.rst |   8 --
 iocore/net/I_NetProcessor.h|  23 ++
 iocore/net/P_SSLNetVConnection.h   |  10 ---
 iocore/net/P_UnixNetVConnection.h  |   6 --
 iocore/net/UnixNetProcessor.cc | 116 +
 iocore/net/UnixNetVConnection.cc   |   7 --
 proxy/ParentSelection.cc   |  89 +-
 proxy/ParentSelection.h|   1 -
 proxy/http/HttpSM.cc   |  52 ++---
 proxy/logging/Log.cc   |   2 +-
 10 files changed, 185 insertions(+), 129 deletions(-)



[trafficserver] 03/03: Fixes Log build issues from a31bdd5e50fcaa cherry-pick

2020-03-20 Thread zwoop
This is an automated email from the ASF dual-hosted git repository.

zwoop pushed a commit to branch 8.1.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git

commit 67691cadb3b6aea06c425b0b598354b1a69d7a71
Author: Leif Hedstrom 
AuthorDate: Fri Mar 20 11:03:41 2020 -0600

Fixes Log build issues from a31bdd5e50fcaa cherry-pick
---
 proxy/logging/Log.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/proxy/logging/Log.cc b/proxy/logging/Log.cc
index 897bd6d..63b3419 100644
--- a/proxy/logging/Log.cc
+++ b/proxy/logging/Log.cc
@@ -507,7 +507,7 @@ Log::init_fields()
   field = new LogField("client_req_is_internal", "cqint", LogField::sINT, 
::marshal_client_req_is_internal,
::unmarshal_int_to_str);
   global_field_list.add(field, false);
-  field_symbol_hash.emplace("cqint", field);
+  ink_hash_table_insert(field_symbol_hash, "cqint", field);
 
   field = new LogField("client_sec_protocol", "cqssv", LogField::STRING, 
::marshal_client_security_protocol,
(LogField::UnmarshalFunc)::unmarshal_str);



[trafficserver] 01/03: Revert "proxy.config.http.connect_attempts_timeout tracks TTBF instead of connect"

2020-03-20 Thread zwoop
This is an automated email from the ASF dual-hosted git repository.

zwoop pushed a commit to branch 8.1.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git

commit 1f14f7db49acc08df66a0c4ea6637bb6552c5147
Author: Leif Hedstrom 
AuthorDate: Fri Mar 20 10:59:39 2020 -0600

Revert "proxy.config.http.connect_attempts_timeout tracks TTBF instead of 
connect"

This reverts commit 58459ab9dc4390144f386798a73555863a620c45.

Reverting for now, to get a stable build. We will revisit this, probably
just revert this revert.
---
 iocore/net/I_NetProcessor.h   |  23 
 iocore/net/P_SSLNetVConnection.h  |  10 
 iocore/net/P_UnixNetVConnection.h |   6 --
 iocore/net/UnixNetProcessor.cc| 116 ++
 iocore/net/UnixNetVConnection.cc  |   7 ---
 proxy/http/HttpSM.cc  |  52 +
 6 files changed, 166 insertions(+), 48 deletions(-)

diff --git a/iocore/net/I_NetProcessor.h b/iocore/net/I_NetProcessor.h
index b8dee4e..03934ec 100644
--- a/iocore/net/I_NetProcessor.h
+++ b/iocore/net/I_NetProcessor.h
@@ -170,6 +170,8 @@ public:
   call back with success. If this behaviour is desired use
   synchronous connect connet_s method.
 
+@see connect_s()
+
 @param cont Continuation to be called back with events.
 @param addr target address and port to connect to.
 @param options @see NetVCOptions.
@@ -179,6 +181,27 @@ public:
   inkcoreapi Action *connect_re(Continuation *cont, sockaddr const *addr, 
NetVCOptions *options = nullptr);
 
   /**
+Open a NetVConnection for connection oriented I/O. This call
+is simliar to connect method except that the cont is called
+back only after the connections has been established. In the
+case of connect the cont could be called back with NET_EVENT_OPEN
+event and OS could still be in the process of establishing the
+connection. Re-entrant Callbacks: same as connect. If unix
+asynchronous type connect is desired use connect_re().
+
+@param cont Continuation to be called back with events.
+@param addr Address to which to connect (includes port).
+@param timeout for connect, the cont will get NET_EVENT_OPEN_FAILED
+  if connection could not be established for timeout msecs. The
+  default is 30 secs.
+@param options @see NetVCOptions.
+
+@see connect_re()
+
+  */
+  Action *connect_s(Continuation *cont, sockaddr const *addr, int timeout = 
NET_CONNECT_TIMEOUT, NetVCOptions *opts = nullptr);
+
+  /**
 Initializes the net processor. This must be called before the event 
threads are started.
 
   */
diff --git a/iocore/net/P_SSLNetVConnection.h b/iocore/net/P_SSLNetVConnection.h
index ff7a801..66505fd 100644
--- a/iocore/net/P_SSLNetVConnection.h
+++ b/iocore/net/P_SSLNetVConnection.h
@@ -101,16 +101,6 @@ public:
   }
 
   bool
-  trackFirstHandshake() override
-  {
-bool retval = sslHandshakeBeginTime == 0;
-if (retval) {
-  sslHandshakeBeginTime = Thread::get_hrtime();
-}
-return retval;
-  }
-
-  bool
   getSSLHandShakeComplete() const override
   {
 return sslHandShakeComplete;
diff --git a/iocore/net/P_UnixNetVConnection.h 
b/iocore/net/P_UnixNetVConnection.h
index 9c1b904..c454b49 100644
--- a/iocore/net/P_UnixNetVConnection.h
+++ b/iocore/net/P_UnixNetVConnection.h
@@ -207,12 +207,6 @@ public:
 return (true);
   }
 
-  virtual bool
-  trackFirstHandshake()
-  {
-return false;
-  }
-
   virtual void net_read_io(NetHandler *nh, EThread *lthread);
   virtual int64_t load_buffer_and_write(int64_t towrite, MIOBufferAccessor 
, int64_t _written, int );
   void readDisable(NetHandler *nh);
diff --git a/iocore/net/UnixNetProcessor.cc b/iocore/net/UnixNetProcessor.cc
index 653ca5c..589c843 100644
--- a/iocore/net/UnixNetProcessor.cc
+++ b/iocore/net/UnixNetProcessor.cc
@@ -305,6 +305,122 @@ UnixNetProcessor::connect(Continuation *cont, 
UnixNetVConnection ** /* avc */, s
   return connect_re(cont, target, opt);
 }
 
+struct CheckConnect : public Continuation {
+  UnixNetVConnection *vc;
+  Action action_;
+  MIOBuffer *buf;
+  IOBufferReader *reader;
+  int connect_status;
+  int recursion;
+  ink_hrtime timeout;
+
+  int
+  handle_connect(int event, Event *e)
+  {
+connect_status = event;
+switch (event) {
+case NET_EVENT_OPEN:
+  vc = (UnixNetVConnection *)e;
+  Debug("iocore_net_connect", "connect Net open");
+  vc->do_io_write(this, 10, /* some non-zero number just to get the poll 
going */
+  reader);
+  /* dont wait for more than timeout secs */
+  vc->set_inactivity_timeout(timeout);
+  return EVENT_CONT;
+  break;
+
+case NET_EVENT_OPEN_FAILED:
+  Debug("iocore_net_connect", "connect Net open failed");
+  if (!action_.cancelled) {
+action_.continuation->handleEvent(NET_EVENT_OPEN_FAILED, (void *)e);
+  }
+  break;
+
+case VC_EVENT_WRITE_READY:
+  int sl, ret;
+  

[trafficserver] branch 9.0.x updated: Disables "virtual host not used with AWS auth v4" error in s3_auth

2020-03-20 Thread zwoop
This is an automated email from the ASF dual-hosted git repository.

zwoop pushed a commit to branch 9.0.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/9.0.x by this push:
 new 7c9db01  Disables "virtual host not used with AWS auth v4" error in 
s3_auth
7c9db01 is described below

commit 7c9db01634320609d4f011f37183dfdc1c4d22bc
Author: Randall Meyer 
AuthorDate: Thu Mar 19 14:52:26 2020 -0700

Disables "virtual host not used with AWS auth v4" error in s3_auth

This would error on every request

(cherry picked from commit 05556de2a3b06400e7f587ab4accf1cbf8b2a88e)
---
 plugins/s3_auth/s3_auth.cc | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/plugins/s3_auth/s3_auth.cc b/plugins/s3_auth/s3_auth.cc
index ce76020..8352195 100644
--- a/plugins/s3_auth/s3_auth.cc
+++ b/plugins/s3_auth/s3_auth.cc
@@ -203,9 +203,7 @@ public:
   }
 } else {
   /* 4 == _version */
-  if (_virt_host_modified) {
-TSError("[%s] virtual host not used with AWS auth v4, parameter 
ignored", PLUGIN_NAME);
-  }
+  // NOTE: virtual host not used with AWS auth v4, parameter ignored
 }
 return true;
   }



[trafficserver] branch master updated: better handling of TSVIO calls and TSVConnAbort (#6239)

2020-03-20 Thread eze
This is an automated email from the ASF dual-hosted git repository.

eze pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/master by this push:
 new 1181399  better handling of TSVIO calls and TSVConnAbort (#6239)
1181399 is described below

commit 1181399556259b8fa27810de481eafb2c0bdd3a7
Author: Brian Olsen 
AuthorDate: Fri Mar 20 10:40:02 2020 -0600

better handling of TSVIO calls and TSVConnAbort (#6239)
---
 doc/admin-guide/plugins/slice.en.rst|   4 +-
 plugins/experimental/slice/Config.cc|   7 +-
 plugins/experimental/slice/Config.h |   1 +
 plugins/experimental/slice/Data.h   |  26 ++--
 plugins/experimental/slice/HttpHeader.cc|  17 +-
 plugins/experimental/slice/HttpHeader.h |  13 +-
 plugins/experimental/slice/Makefile.inc |   5 +-
 plugins/experimental/slice/Makefile.tsxs|   1 -
 plugins/experimental/slice/Range.cc |  11 +-
 plugins/experimental/slice/Range.h  |   4 +
 plugins/experimental/slice/Stage.h  |  77 ++---
 plugins/experimental/slice/client.cc| 175 +++--
 plugins/experimental/slice/client.h |   2 +
 plugins/experimental/slice/intercept.cc |  24 ++-
 plugins/experimental/slice/server.cc| 199 +---
 plugins/experimental/slice/slice.cc |  56 ++-
 plugins/experimental/slice/slice.h  |  11 ++
 plugins/experimental/slice/transfer.cc  | 147 +++--
 plugins/experimental/slice/transfer.h   |   3 +
 plugins/experimental/slice/util.cc  | 134 
 plugins/experimental/slice/{client.h => util.h} |  14 +-
 21 files changed, 600 insertions(+), 331 deletions(-)

diff --git a/doc/admin-guide/plugins/slice.en.rst 
b/doc/admin-guide/plugins/slice.en.rst
index dca442d..b131178 100644
--- a/doc/admin-guide/plugins/slice.en.rst
+++ b/doc/admin-guide/plugins/slice.en.rst
@@ -138,9 +138,9 @@ Under normal logging these slice block errors tend to show 
up as::
 By default more detailed stitching errors are written to ``diags.log``.
 Examples are as follows::
 
-ERROR: [slice.cc: 288] logSliceError(): 1555705573.639 reason="Non 206 
internal block response" uri="http://ats_ep/someasset.mp4; uas="curl" 
req_range="bytes=100-" norm_range="bytes 100-52428799/52428800" 
etag_exp="%221603934496%22" lm_exp="Fri, 19 Apr 2019 18:53:20 GMT" 
blk_range="2100-2199" status_got="206" cr_got="" 
etag_got="%221603934496%22" lm_got="" cc="no-store" via=""
+ERROR: [slice.cc: 288] logSliceError(): 1555705573.639 reason="Non 206 
internal block response" uri="http://ats_ep/someasset.mp4; uas="curl" 
req_range="bytes=100-" norm_range="bytes 100-52428799/52428800" 
etag_exp="%221603934496%22" lm_exp="Fri, 19 Apr 2019 18:53:20 GMT" 
blk_range="2100-2199" status_got="206" cr_got="" 
etag_got="%221603934496%22" lm_got="" cc="no-store" via=""
 
-ERROR: [server.cc: 288] logSliceError(): 157237.219 reason="Mismatch 
block Etag" uri="http://ats_ep/someasset.mp4; uas="curl" 
req_range="bytes=1092779033-1096299354" norm_range="bytes 
1092779033-1096299354/2147483648" etag_exp="%223719843648%22" lm_exp="Tue, 29 
Oct 2019 14:40:00 GMT" blk_range="109500-109599" status_got="206" 
cr_got="bytes 109500-109599/2147483648" etag_got="%223719853648%22" 
lm_got="Tue, 29 Oct 2019 17:26:40 GMT" cc="max-age=1" via=""
+ERROR: [server.cc: 288] logSliceError(): 157237.219 reason="Mismatch block 
Etag" uri="http://ats_ep/someasset.mp4; uas="curl" 
req_range="bytes=1092779033-1096299354" norm_range="bytes 
1092779033-1096299354/2147483648" etag_exp="%223719843648%22" lm_exp="Tue, 29 
Oct 2019 14:40:00 GMT" blk_range="109500-109599" status_got="206" 
cr_got="bytes 109500-109599/2147483648" etag_got="%223719853648%22" 
lm_got="Tue, 29 Oct 2019 17:26:40 GMT" cc="max-age=1" via=""
 
 Whether or how often these detailed log entries are written are
 configurable plugin options.
diff --git a/plugins/experimental/slice/Config.cc 
b/plugins/experimental/slice/Config.cc
index 6d27089..fb04ebb 100644
--- a/plugins/experimental/slice/Config.cc
+++ b/plugins/experimental/slice/Config.cc
@@ -98,13 +98,14 @@ Config::fromArgs(int const argc, char const *const argv[])
 {const_cast("remap-host"), required_argument, nullptr, 'r'},
 {const_cast("pace-errorlog"), required_argument, nullptr, 'p'},
 {const_cast("disable-errorlog"), no_argument, nullptr, 'd'},
+{const_cast("throttle"), no_argument, nullptr, 'o'},
 {nullptr, 0, nullptr, 0},
   };
 
   // getopt assumes args start at '1' so this hack is needed
   char *const *argvp = (const_cast(argv) - 1);
   for (;;) {
-int const opt = getopt_long(argc + 1, argvp, "b:t:r:p:d", longopts, 
nullptr);
+int const opt = getopt_long(argc + 1, 

[trafficserver] branch master updated: Convert tscore regression tests to Catch unit tests.

2020-03-20 Thread bcall
This is an automated email from the ASF dual-hosted git repository.

bcall pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/master by this push:
 new 239336a  Convert tscore regression tests to Catch unit tests.
239336a is described below

commit 239336a7054e579d1a497c21f26a347c717ae295
Author: Walter Karas 
AuthorDate: Thu Mar 19 16:39:45 2020 -0500

Convert tscore regression tests to Catch unit tests.
---
 src/tscore/Makefile.am   |  4 +-
 src/tscore/Tokenizer.cc  | 25 
 src/tscore/Version.cc| 40 
 src/tscore/unit_tests/test_Extendible.cc |  2 +
 src/tscore/unit_tests/test_Tokenizer.cc  | 56 +++
 src/tscore/unit_tests/test_Version.cc| 65 
 6 files changed, 126 insertions(+), 66 deletions(-)

diff --git a/src/tscore/Makefile.am b/src/tscore/Makefile.am
index c342179..36b5fa1 100644
--- a/src/tscore/Makefile.am
+++ b/src/tscore/Makefile.am
@@ -189,7 +189,9 @@ test_tscore_SOURCES = \
unit_tests/test_Regex.cc \
unit_tests/test_Scalar.cc \
unit_tests/test_scoped_resource.cc \
-   unit_tests/test_ts_file.cc
+   unit_tests/test_Tokenizer.cc \
+   unit_tests/test_ts_file.cc \
+   unit_tests/test_Version.cc
 
 if HAS_HKDF
 test_tscore_SOURCES += \
diff --git a/src/tscore/Tokenizer.cc b/src/tscore/Tokenizer.cc
index 068b4e0..dda9435 100644
--- a/src/tscore/Tokenizer.cc
+++ b/src/tscore/Tokenizer.cc
@@ -374,28 +374,3 @@ Tokenizer::ReUse()
   add_node   = _node;
   add_index  = 0;
 }
-
-#if TS_HAS_TESTS
-#include "tscore/TestBox.h"
-
-REGRESSION_TEST(libts_Tokenizer)(RegressionTest *test, int /* atype ATS_UNUSED 
*/, int *pstatus)
-{
-  TestBox box(test, pstatus);
-  box = REGRESSION_TEST_PASSED;
-
-  Tokenizer remap(" \t");
-
-  const char *line = "map https://abc.com https://abc.com 
@plugin=conf_remap.so @pparam=proxy.config.abc='ABC DEF'";
-
-  const char *toks[] = {"map", "https://abc.com;, "https://abc.com;, 
"@plugin=conf_remap.so", "@pparam=proxy.config.abc='ABC DEF'"};
-
-  unsigned count = remap.Initialize(const_cast(line), (COPY_TOKS | 
ALLOW_SPACES));
-
-  box.check(count == 5, "check that we parsed 5 tokens");
-  box.check(count == remap.count(), "parsed %u tokens, but now we have %u 
tokens", count, remap.count());
-
-  for (unsigned i = 0; i < count; ++i) {
-box.check(strcmp(remap[i], toks[i]) == 0, "expected token %u to be '%s' 
but found '%s'", count, toks[i], remap[i]);
-  }
-}
-#endif
diff --git a/src/tscore/Version.cc b/src/tscore/Version.cc
index 07c72fe..e2fd0e3 100644
--- a/src/tscore/Version.cc
+++ b/src/tscore/Version.cc
@@ -124,43 +124,3 @@ AppVersionInfo::setup(const char *pkg_name, const char 
*app_name, const char *ap
 
   defined = 1;
 }
-
-#if TS_HAS_TESTS
-#include "tscore/TestBox.h"
-
-/**
- * AppVersionInfo class test.
- */
-REGRESSION_TEST(AppVersionInfo)(RegressionTest *t, int /* atype ATS_UNUSED */, 
int *pstatus)
-{
-  *pstatus = REGRESSION_TEST_PASSED;
-
-  AppVersionInfo info;
-
-  TestBox tb(t, pstatus);
-
-  const char *errMsgFormat = "wrong build number, expected '%s', got '%s'";
-  const char *bench[][3]   = {// date, time, resulting build number
-{"Oct  4 1957", "19:28:34", BUILD_NUMBER},
-{"Oct  4 1957", "19:28:34", "100419"},
-{"Apr  4 1957", "09:08:04", "040409"},
-{" 4 Apr 1957", "09:08:04", "??"},
-{"Apr  4 1957", "09-08-04", "??"}};
-
-  int benchSize = sizeof(bench) / sizeof(bench[0]);
-
-  if (0 != strlen(BUILD_NUMBER)) {
-// Since BUILD_NUMBER is defined by a #define directive, it is not
-// possible to change the version value from inside the regression test.
-// If not empty BUILD_NUMBER overrides any result, in this case run only
-// this test (the rest will always fail).
-info.setup("Apache Traffic Server", "traffic_server", "5.2.1", 
bench[0][0], bench[0][1], "build_slave", "builder", "");
-tb.check(0 == strcmp(info.BldNumStr, bench[0][2]), errMsgFormat, 
bench[0][2], info.BldNumStr);
-  } else {
-for (int i = 1; i < benchSize; i++) {
-  info.setup("Apache Traffic Server", "traffic_server", "5.2.1", 
bench[i][0], bench[i][1], "build_slave", "builder", "");
-  tb.check(0 == strcmp(info.BldNumStr, bench[i][2]), errMsgFormat, 
bench[i][2], info.BldNumStr);
-}
-  }
-}
-#endif
diff --git a/src/tscore/unit_tests/test_Extendible.cc 
b/src/tscore/unit_tests/test_Extendible.cc
index a6b0210..2369184 100644
--- a/src/tscore/unit_tests/test_Extendible.cc
+++ b/src/tscore/unit_tests/test_Extendible.cc
@@ -397,6 +397,8 @@ TEST_CASE("Extendible", "")
 CHECK(ref.m_str == "Hello");
   }
 
+  printf("\n");
+
   INFO("Extendible Test Complete")
 }
 
diff --git