[trafficserver] branch master updated: Ignore test_librecords

2019-02-25 Thread masaori
This is an automated email from the ASF dual-hosted git repository.

masaori pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/master by this push:
 new 902d5ad  Ignore test_librecords
902d5ad is described below

commit 902d5ad96a446f7485716e8ec72f7f06551418b6
Author: Masaori Koshiba 
AuthorDate: Tue Feb 26 10:55:14 2019 +0900

Ignore test_librecords

It is check program introduced by c83061d68dfbe9e25210a953ea640063a0525ab7.
---
 .gitignore | 1 +
 1 file changed, 1 insertion(+)

diff --git a/.gitignore b/.gitignore
index ec01fe1..0aeedac 100644
--- a/.gitignore
+++ b/.gitignore
@@ -87,6 +87,7 @@ src/tscore/test_Regex
 src/tscore/test_X509HostnameValidator
 src/tscore/test_tscore
 src/tscpp/util/test_tscpputil
+lib/records/test_librecords
 lib/perl/lib/Apache/TS.pm
 
 iocore/net/test_certlookup



[trafficserver] branch master updated: HdrHeap default size unit test fix.

2019-02-25 Thread gancho
This is an automated email from the ASF dual-hosted git repository.

gancho pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/master by this push:
 new 6565396  HdrHeap default size unit test fix.
6565396 is described below

commit 65653961072d9e6d56570e5a744b7499ee6b1b22
Author: Gancho Tenev 
AuthorDate: Mon Feb 25 15:30:46 2019 -0800

HdrHeap default size unit test fix.

After refactoring "HdrHeap refresh" in PR #4953 unit test need
to be updated as well. Changing HDR_HEAP_DEFAULT_SIZE to
HdrHeap::DEFAULT_SIZE
---
 doc/developer-guide/core-architecture/heap.en.rst | 4 ++--
 proxy/hdrs/unit_tests/test_Hdrs.cc| 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/doc/developer-guide/core-architecture/heap.en.rst 
b/doc/developer-guide/core-architecture/heap.en.rst
index e03b5ad..31bee10 100644
--- a/doc/developer-guide/core-architecture/heap.en.rst
+++ b/doc/developer-guide/core-architecture/heap.en.rst
@@ -132,10 +132,10 @@ Classes
 
 .. function:: HdrHeap * new_HdrHeap(int n)
 
-   Create and return a new instance of :class:`HdrHeap`. If :arg:`n` is less 
than ``HDR_HEAP_DEFAULT_SIZE``
+   Create and return a new instance of :class:`HdrHeap`. If :arg:`n` is less 
than ``HdrHeap::DEFAULT_SIZE``
it is increased to that value.
 
-   If the allocated size is ``HDR_HEAP_DEFAULT_SIZE`` (or smaller and upsized 
to that value) then
+   If the allocated size is ``HdrHeap::DEFAULT_SIZE`` (or smaller and upsized 
to that value) then
the instance is allocated from a thread local pool via 
:code:`hdrHeapAllocator`. If larger it
is allocated from global memory via :code:`ats_malloc`.
 
diff --git a/proxy/hdrs/unit_tests/test_Hdrs.cc 
b/proxy/hdrs/unit_tests/test_Hdrs.cc
index a43503a..790418c 100644
--- a/proxy/hdrs/unit_tests/test_Hdrs.cc
+++ b/proxy/hdrs/unit_tests/test_Hdrs.cc
@@ -68,7 +68,7 @@ TEST_CASE("HdrTest", "[proxy][hdrtest]")
 
   for (auto const &test : tests) {
 HTTPHdr req_hdr;
-HdrHeap *heap = new_HdrHeap(HDR_HEAP_DEFAULT_SIZE + 64); // extra to 
prevent proxy allocation.
+HdrHeap *heap = new_HdrHeap(HdrHeap::DEFAULT_SIZE + 64); // extra to 
prevent proxy allocation.
 
 req_hdr.create(HTTP_TYPE_REQUEST, heap);
 



[trafficserver] branch master updated: Cleanup: remove duplicated SSL_CTX_set_tlsext_status_cb calls for OCSP Stapling

2019-02-25 Thread masaori
This is an automated email from the ASF dual-hosted git repository.

masaori pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/master by this push:
 new b4e5319  Cleanup: remove duplicated SSL_CTX_set_tlsext_status_cb calls 
for OCSP Stapling
b4e5319 is described below

commit b4e53199f0d93eecd82db13a3f9841760d1913a0
Author: Masaori Koshiba 
AuthorDate: Wed Feb 20 16:15:50 2019 +0900

Cleanup: remove duplicated SSL_CTX_set_tlsext_status_cb calls for OCSP 
Stapling
---
 iocore/net/SSLUtils.cc | 30 +-
 1 file changed, 9 insertions(+), 21 deletions(-)

diff --git a/iocore/net/SSLUtils.cc b/iocore/net/SSLUtils.cc
index ad20188..653cec4 100644
--- a/iocore/net/SSLUtils.cc
+++ b/iocore/net/SSLUtils.cc
@@ -1285,7 +1285,7 @@ setClientCertLevel(SSL *ssl, uint8_t certLevel)
 }
 
 SSL_CTX *
-SSLInitServerContext(const SSLConfigParams *params, const ssl_user_config 
*sslMultCertSettings, std::vector &certList)
+SSLInitServerContext(const SSLConfigParams *params, const ssl_user_config 
*sslMultCertSettings, std::vector &cert_list)
 {
   int server_verify_client;
   SSL_CTX *ctx = SSLDefaultServerContext();
@@ -1416,7 +1416,7 @@ SSLInitServerContext(const SSLConfigParams *params, const 
ssl_user_config *sslMu
   goto fail;
 }
 
-certList.push_back(cert);
+cert_list.push_back(cert);
 if (SSLConfigParams::load_ssl_file_cb) {
   SSLConfigParams::load_ssl_file_cb(completeServerCertPath.c_str(), 
CONFIG_FLAG_UNVERSIONED);
 }
@@ -1605,6 +1605,12 @@ SSLInitServerContext(const SSLConfigParams *params, 
const ssl_user_config *sslMu
   if (SSLConfigParams::ssl_ocsp_enabled) {
 Debug("ssl", "SSL OCSP Stapling is enabled");
 SSL_CTX_set_tlsext_status_cb(ctx, ssl_callback_ocsp_stapling);
+
+for (auto cert : cert_list) {
+  if (!ssl_stapling_init_cert(ctx, cert, setting_cert)) {
+Warning("failed to configure SSL_CTX for OCSP Stapling info for 
certificate at %s", setting_cert);
+  }
+}
   } else {
 Debug("ssl", "SSL OCSP Stapling is disabled");
   }
@@ -1625,7 +1631,7 @@ fail:
   }
   SSL_CLEAR_PW_REFERENCES(ctx)
   SSLReleaseContext(ctx);
-  for (auto cert : certList) {
+  for (auto cert : cert_list) {
 X509_free(cert);
   }
 
@@ -1703,24 +1709,6 @@ ssl_store_ssl_context(const SSLConfigParams *params, 
SSLCertLookup *lookup, cons
 #endif
   }
 
-#ifdef TS_USE_TLS_OCSP
-  if (SSLConfigParams::ssl_ocsp_enabled) {
-Debug("ssl", "SSL OCSP Stapling is enabled");
-SSL_CTX_set_tlsext_status_cb(ctx, ssl_callback_ocsp_stapling);
-for (auto cert : cert_list) {
-  if (!ssl_stapling_init_cert(ctx, cert, certname)) {
-Warning("failed to configure SSL_CTX for OCSP Stapling info for 
certificate at %s", (const char *)certname);
-  }
-}
-  } else {
-Debug("ssl", "SSL OCSP Stapling is disabled");
-  }
-#else
-  if (SSLConfigParams::ssl_ocsp_enabled) {
-Warning("failed to enable SSL OCSP Stapling; this version of OpenSSL does 
not support it");
-  }
-#endif /* TS_USE_TLS_OCSP */
-
   // Insert additional mappings. Note that this maps multiple keys to the same 
value, so when
   // this code is updated to reconfigure the SSL certificates, it will need 
some sort of
   // refcounting or alternate way of avoiding double frees.



[trafficserver] branch master updated: Implement nbf claim in Uri Signing Plugin

2019-02-25 Thread eze
This is an automated email from the ASF dual-hosted git repository.

eze pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/master by this push:
 new d9dc0f4  Implement nbf claim in Uri Signing Plugin
d9dc0f4 is described below

commit d9dc0f42e9f161f8a943483ab8dc38d178b18e16
Author: Dylan Souza 
AuthorDate: Wed Feb 13 20:49:23 2019 +

Implement nbf claim in Uri Signing Plugin
---
 plugins/experimental/uri_signing/jwt.c | 10 ++
 1 file changed, 2 insertions(+), 8 deletions(-)

diff --git a/plugins/experimental/uri_signing/jwt.c 
b/plugins/experimental/uri_signing/jwt.c
index a38565c..69a07e3 100644
--- a/plugins/experimental/uri_signing/jwt.c
+++ b/plugins/experimental/uri_signing/jwt.c
@@ -98,12 +98,6 @@ unsupported_string_claim(const char *str)
 }
 
 bool
-unsupported_date_claim(double t)
-{
-  return isnan(t);
-}
-
-bool
 jwt_validate(struct jwt *jwt)
 {
   if (!jwt) {
@@ -126,8 +120,8 @@ jwt_validate(struct jwt *jwt)
 return false;
   }
 
-  if (!unsupported_date_claim(jwt->nbf)) {
-PluginDebug("Initial JWT Failure: nbf unsupported");
+  if (now() < jwt->nbf) {
+PluginDebug("Initial JWT Failure: nbf claim violated");
 return false;
   }
 



[trafficserver] branch 7.1.x updated: Revert "Avoid ats_malloc in unmarshal"

2019-02-25 Thread eze
This is an automated email from the ASF dual-hosted git repository.

eze pushed a commit to branch 7.1.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/7.1.x by this push:
 new 3b37f1a  Revert "Avoid ats_malloc in unmarshal"
3b37f1a is described below

commit 3b37f1ac7aa5bd91c1c10c5a7c72a93afd37f551
Author: Evan Zelkowitz <19699200+ezelk...@users.noreply.github.com>
AuthorDate: Mon Feb 25 13:35:24 2019 -0700

Revert "Avoid ats_malloc in unmarshal"

This reverts commit 739d8d7d6fab4a8145cc902c8a0b32b2fbddd4c8.
---
 proxy/hdrs/HTTP.cc | 44 +++-
 1 file changed, 35 insertions(+), 9 deletions(-)

diff --git a/proxy/hdrs/HTTP.cc b/proxy/hdrs/HTTP.cc
index f558663..6566dab 100644
--- a/proxy/hdrs/HTTP.cc
+++ b/proxy/hdrs/HTTP.cc
@@ -2007,6 +2007,7 @@ HTTPInfo::marshal_length()
   }
 
   if (m_alt->m_frag_offset_count > HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS) {
+len -= sizeof(m_alt->m_integral_frag_offsets);
 len += sizeof(FragOffset) * m_alt->m_frag_offset_count;
   }
 
@@ -2021,11 +2022,23 @@ HTTPInfo::marshal(char *buf, int len)
   HTTPCacheAlt *marshal_alt = (HTTPCacheAlt *)buf;
   // non-zero only if the offsets are external. Otherwise they get
   // marshalled along with the alt struct.
+  int frag_len = (0 == m_alt->m_frag_offset_count || m_alt->m_frag_offsets == 
m_alt->m_integral_frag_offsets) ?
+   0 :
+   sizeof(HTTPCacheAlt::FragOffset) * 
m_alt->m_frag_offset_count;
+
   ink_assert(m_alt->m_magic == CACHE_ALT_MAGIC_ALIVE);
 
   // Make sure the buffer is aligned
   //ink_assert(((intptr_t)buf) & 0x3 == 0);
 
+  // If we have external fragment offsets, copy the initial ones
+  // into the integral data.
+  if (frag_len) {
+memcpy(m_alt->m_integral_frag_offsets, m_alt->m_frag_offsets, 
sizeof(m_alt->m_integral_frag_offsets));
+frag_len -= sizeof(m_alt->m_integral_frag_offsets);
+// frag_len should never be non-zero at this point, as the offsets
+// should be external only if too big for the internal table.
+  }
   // Memcpy the whole object so that we can use it
   //   live later.  This involves copying a few
   //   extra bytes now but will save copying any
@@ -2038,14 +2051,13 @@ HTTPInfo::marshal(char *buf, int len)
   buf += HTTP_ALT_MARSHAL_SIZE;
   used += HTTP_ALT_MARSHAL_SIZE;
 
-  if (m_alt->m_frag_offset_count > HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS) {
+  if (frag_len > 0) {
 marshal_alt->m_frag_offsets = static_cast(reinterpret_cast(used));
-memcpy(buf, m_alt->m_frag_offsets, m_alt->m_frag_offset_count * 
sizeof(FragOffset));
-buf += m_alt->m_frag_offset_count * sizeof(FragOffset);
-used += m_alt->m_frag_offset_count * sizeof(FragOffset);
+memcpy(buf, m_alt->m_frag_offsets + HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS, 
frag_len);
+buf += frag_len;
+used += frag_len;
   } else {
-// the data stored in intergral buffer
-m_alt->m_frag_offsets = nullptr;
+marshal_alt->m_frag_offsets = nullptr;
   }
 
   // The m_{request,response}_hdr->m_heap pointers are converted
@@ -2102,9 +2114,23 @@ HTTPInfo::unmarshal(char *buf, int len, RefCountObj 
*block_ref)
   len -= HTTP_ALT_MARSHAL_SIZE;
 
   if (alt->m_frag_offset_count > HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS) {
-alt->m_frag_offsets = reinterpret_cast(buf + 
reinterpret_cast(alt->m_frag_offsets));
-len -= sizeof(FragOffset) * alt->m_frag_offset_count;
-ink_assert(len >= 0);
+// stuff that didn't fit in the integral slots.
+int extra   = sizeof(FragOffset) * alt->m_frag_offset_count - 
sizeof(alt->m_integral_frag_offsets);
+char *extra_src = buf + reinterpret_cast(alt->m_frag_offsets);
+// Actual buffer size, which must be a power of two.
+// Well, technically not, because we never modify an unmarshalled fragment
+// offset table, but it would be a nasty bug should that be done in the
+// future.
+int bcount = HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS * 2;
+
+while (bcount < alt->m_frag_offset_count) {
+  bcount *= 2;
+}
+alt->m_frag_offsets =
+  static_cast(ats_malloc(bcount * sizeof(FragOffset))); // 
WRONG - must round up to next power of 2.
+memcpy(alt->m_frag_offsets, alt->m_integral_frag_offsets, 
sizeof(alt->m_integral_frag_offsets));
+memcpy(alt->m_frag_offsets + HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS, 
extra_src, extra);
+len -= extra;
   } else if (alt->m_frag_offset_count > 0) {
 alt->m_frag_offsets = alt->m_integral_frag_offsets;
   } else {



[trafficserver] branch master updated: MIMEScanner: Make MIMEScanner a class, not a POD with free functions.

2019-02-25 Thread amc
This is an automated email from the ASF dual-hosted git repository.

amc pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/master by this push:
 new 0be4e73  MIMEScanner: Make MIMEScanner a class, not a POD with free 
functions.
0be4e73 is described below

commit 0be4e7326b89670dea1ffe4c7f21c36bfd2bdf59
Author: Alan M. Carroll 
AuthorDate: Wed Feb 6 11:10:22 2019 -0600

MIMEScanner: Make MIMEScanner a class, not a POD with free functions.
---
 proxy/hdrs/HTTP.cc  |  27 +--
 proxy/hdrs/HdrTest.cc   |  79 -
 proxy/hdrs/HdrTest.h|   1 -
 proxy/hdrs/MIME.cc  | 303 
 proxy/hdrs/MIME.h   |  82 +++--
 proxy/hdrs/Makefile.am  |   1 +
 proxy/hdrs/unit_tests/test_Hdrs.cc  |  86 +
 proxy/hdrs/unit_tests/unit_test_main.cc |  21 ++-
 8 files changed, 305 insertions(+), 295 deletions(-)

diff --git a/proxy/hdrs/HTTP.cc b/proxy/hdrs/HTTP.cc
index 41bf41b..eebf4ad 100644
--- a/proxy/hdrs/HTTP.cc
+++ b/proxy/hdrs/HTTP.cc
@@ -894,17 +894,21 @@ http_parser_parse_req(HTTPParser *parser, HdrHeap *heap, 
HTTPHdrImpl *hh, const
 const char *version_start;
 const char *version_end;
 
+ts::TextView text, parsed;
+
 real_end = end;
 
   start:
 hh->m_polarity = HTTP_TYPE_REQUEST;
 
 // Make sure the line is not longer than 64K
-if (scanner->m_line_length >= UINT16_MAX) {
+if (scanner->get_buffered_line_size() >= UINT16_MAX) {
   return PARSE_RESULT_ERROR;
 }
 
-err = mime_scanner_get(scanner, start, real_end, &line_start, &end, 
&line_is_real, eof, MIME_SCANNER_TYPE_LINE);
+text.assign(*start, real_end);
+err= scanner->get(text, parsed, line_is_real, eof, MIMEScanner::LINE);
+*start = text.data();
 if (err < 0) {
   return err;
 }
@@ -917,9 +921,9 @@ http_parser_parse_req(HTTPParser *parser, HdrHeap *heap, 
HTTPHdrImpl *hh, const
   return err;
 }
 
-cur = line_start;
-ink_assert((end - cur) >= 0);
-ink_assert((end - cur) < UINT16_MAX);
+ink_assert(parsed.size() < UINT16_MAX);
+line_start = cur = parsed.data();
+end  = parsed.data_end();
 
 must_copy_strings = (must_copy_strings || (!line_is_real));
 
@@ -1244,11 +1248,14 @@ http_parser_parse_resp(HTTPParser *parser, HdrHeap 
*heap, HTTPHdrImpl *hh, const
 hh->m_polarity = HTTP_TYPE_RESPONSE;
 
 // Make sure the line is not longer than 64K
-if (scanner->m_line_length >= UINT16_MAX) {
+if (scanner->get_buffered_line_size() >= UINT16_MAX) {
   return PARSE_RESULT_ERROR;
 }
 
-err = mime_scanner_get(scanner, start, real_end, &line_start, &end, 
&line_is_real, eof, MIME_SCANNER_TYPE_LINE);
+ts::TextView text{*start, real_end};
+ts::TextView parsed;
+err= scanner->get(text, parsed, line_is_real, eof, MIMEScanner::LINE);
+*start = text.data();
 if (err < 0) {
   return err;
 }
@@ -1256,9 +1263,9 @@ http_parser_parse_resp(HTTPParser *parser, HdrHeap *heap, 
HTTPHdrImpl *hh, const
   return err;
 }
 
-cur = line_start;
-ink_assert((end - cur) >= 0);
-ink_assert((end - cur) < UINT16_MAX);
+ink_assert(parsed.size() < UINT16_MAX);
+line_start = cur = parsed.data();
+end  = parsed.data_end();
 
 must_copy_strings = (must_copy_strings || (!line_is_real));
 
diff --git a/proxy/hdrs/HdrTest.cc b/proxy/hdrs/HdrTest.cc
index ee3f93d..875e4e1 100644
--- a/proxy/hdrs/HdrTest.cc
+++ b/proxy/hdrs/HdrTest.cc
@@ -74,7 +74,6 @@ HdrTest::go(RegressionTest *t, int /* atype ATS_UNUSED */)
   status = status & test_url();
   status = status & test_arena();
   status = status & test_regex();
-  status = status & test_http_parser_eos_boundary_cases();
   status = status & test_http_mutation();
   status = status & test_mime();
   status = status & test_http();
@@ -521,84 +520,6 @@ HdrTest::test_mime()
   -*/
 
 int
-HdrTest::test_http_parser_eos_boundary_cases()
-{
-  struct {
-const char *msg;
-int expected_result;
-int expected_bytes_consumed;
-  } tests[] = {
-{"GET /index.html HTTP/1.0\r\n", PARSE_RESULT_DONE, 26},
-{"GET /index.html HTTP/1.0\r\n\r\n***BODY", PARSE_RESULT_DONE, 28},
-{"GET /index.html HTTP/1.0\r\nUser-Agent: foobar\r\n\r\n***BODY", 
PARSE_RESULT_DONE, 48},
-{"GET", PARSE_RESULT_ERROR, 3},
-{"GET /index.html", PARSE_RESULT_ERROR, 15},
-{"GET /index.html\r\n", PARSE_RESULT_ERROR, 17},
-{"GET /index.html HTTP/1.0", PARSE_RESULT_ERROR, 24},
-{"GET /index.html HTTP/1.0\r", PARSE_RESULT_ERROR, 25},
-{"GET /index.html HTTP/1.0\n", PARSE_RESULT_DONE, 25},
-{"GET /index.html HTTP/1.0\n\n", PARSE_RESULT_DONE, 26},
-{"GET /index.html HTTP/1.0\r\n\r\n", PARSE_RESULT_DONE, 

[trafficserver] branch master updated: Added connect_end to the slow log report

2019-02-25 Thread bcall
This is an automated email from the ASF dual-hosted git repository.

bcall pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/master by this push:
 new ac25c6c  Added connect_end to the slow log report
ac25c6c is described below

commit ac25c6c0cb99735f9a45c75e4c32b03bf2bae786
Author: Bryan Call 
AuthorDate: Fri Feb 22 09:59:07 2019 -0800

Added connect_end to the slow log report
---
 tools/slow_log_report.pl | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tools/slow_log_report.pl b/tools/slow_log_report.pl
index 597b2f5..9207b59 100755
--- a/tools/slow_log_report.pl
+++ b/tools/slow_log_report.pl
@@ -37,7 +37,7 @@ sub displayStat($) {
   my($stats) = @_;
 
   printf("%25s %10s %10s %10s %10s %10s %10s %10s %10s\n", 'key', 'total', 
'count', 'mean', 'median', '95th', '99th', 'min', 'max');
-  foreach my $key ('ua_begin', 'ua_first_read', 'ua_read_header_done', 
'cache_open_read_begin', 'cache_open_read_end', 'dns_lookup_begin', 
'dns_lookup_end', 'server_connect', 'server_first_read', 
'server_read_header_done', 'server_close', 'ua_close', 'sm_finish') {
+  foreach my $key ('ua_begin', 'ua_first_read', 'ua_read_header_done', 
'cache_open_read_begin', 'cache_open_read_end', 'dns_lookup_begin', 
'dns_lookup_end', 'server_connect', 'server_connect_end', 'server_first_read', 
'server_read_header_done', 'server_close', 'ua_close', 'sm_finish') {
 
 my $count = $stats->{$key}->{count};
 my $total = $stats->{$key}->{total};