This is an automated email from the ASF dual-hosted git repository.
bneradt pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git
The following commit(s) were added to refs/heads/master by this push:
new 9400df3c0c Term standardization: dead/down server -> down server
(#9582)
9400df3c0c is described below
commit 9400df3c0c571af8fc4e61a23a42dbc4cb69061f
Author: lzx404243 <[email protected]>
AuthorDate: Thu May 25 15:51:53 2023 -0500
Term standardization: dead/down server -> down server (#9582)
#7283 records the issue of using dead server and down server in the
configurations interchangeably, which has caused confusion in the field when
inspecting issues(e.g. making one wonder the distinction between dead and
down). This PR standardizes those terms to down server. The following
configuration and metric items (and their documentation) are updated:
* proxy.config.http.connect_attempts_max_retries_dead_server becomes
proxy.config.http.connect_attempts_max_retries_down_server
* proxy.config.http.connect.dead.policy becomes
proxy.config.http.connect.down.policy
* proxy.process.http.dead_server.no_requests becomes
proxy.process.http.down_server.no_requests
In addition to the above, other usage of the term "dead server" and the
like has been updated (in hostDB and http).
This resolves #7283.
---
configs/body_factory/default/Makefile.am | 2 +-
.../default/{connect#all_dead => connect#all_down} | 2 +-
configs/records.yaml.default.in | 2 +-
doc/admin-guide/files/records.yaml.en.rst | 18 +++++-----
.../statistics/core/http-connection.en.rst | 4 +--
doc/admin-guide/plugins/lua.en.rst | 4 +--
.../api/functions/TSHttpOverridableConfig.en.rst | 2 +-
.../api/types/TSOverridableConfigKey.en.rst | 2 +-
.../core-architecture/hostdb.en.rst | 18 +++++-----
.../admin-guide/files/records.config.en.po | 10 +++---
.../api/functions/TSHttpOverridableConfig.en.po | 2 +-
doc/release-notes/upgrading.en.rst | 16 +++++++++
include/ts/apidefs.h.in | 4 +--
iocore/hostdb/HostDB.cc | 10 +++---
iocore/hostdb/I_HostDBProcessor.h | 20 +++++------
plugins/lua/ts_lua_http_config.c | 8 ++---
proxy/http/HttpConfig.cc | 14 ++++----
proxy/http/HttpConfig.h | 6 ++--
proxy/http/HttpSM.cc | 12 +++----
proxy/http/HttpTransact.cc | 42 +++++++++++-----------
proxy/http/HttpTransact.h | 6 ++--
src/records/RecordsConfig.cc | 4 +--
src/shared/overridable_txn_vars.cc | 6 ++--
src/traffic_server/InkAPI.cc | 8 ++---
src/traffic_server/InkAPITest.cc | 4 +--
tests/gold_tests/records/gold/full_records.yaml | 4 +--
.../records/legacy_config/full_records.config | 4 +--
.../tls/tls_verify_override_base.test.py | 2 +-
28 files changed, 126 insertions(+), 110 deletions(-)
diff --git a/configs/body_factory/default/Makefile.am
b/configs/body_factory/default/Makefile.am
index 69eb6b6810..e1125d0b3b 100644
--- a/configs/body_factory/default/Makefile.am
+++ b/configs/body_factory/default/Makefile.am
@@ -28,7 +28,7 @@ dist_bodyfactory_DATA = \
connect\#dns_failed \
connect\#failed_connect \
connect\#hangup \
- connect\#all_dead \
+ connect\#all_down \
default \
interception\#no_host \
README \
diff --git a/configs/body_factory/default/connect#all_dead
b/configs/body_factory/default/connect#all_down
similarity index 83%
rename from configs/body_factory/default/connect#all_dead
rename to configs/body_factory/default/connect#all_down
index 7e18a62986..b0d439a262 100644
--- a/configs/body_factory/default/connect#all_dead
+++ b/configs/body_factory/default/connect#all_down
@@ -10,7 +10,7 @@
<FONT FACE="Helvetica,Arial"><B>
Description: Unable to find a valid target host.
-The server was found but all of the addresses are marked dead and so there is
+The server was found but all of the addresses are marked down and so there is
no valid target address to which to connect. Please try again after a few
minutes.
</B></FONT>
<HR>
diff --git a/configs/records.yaml.default.in b/configs/records.yaml.default.in
index 47c51208df..d0c24adcfd 100644
--- a/configs/records.yaml.default.in
+++ b/configs/records.yaml.default.in
@@ -96,7 +96,7 @@ ts:
#
https://docs.trafficserver.apache.org/records.yaml#origin-server-connect-attempts
##############################################################################
connect_attempts_max_retries: 3
- connect_attempts_max_retries_dead_server: 1
+ connect_attempts_max_retries_down_server: 1
connect_attempts_rr_retries: 3
connect_attempts_timeout: 30
down_server:
diff --git a/doc/admin-guide/files/records.yaml.en.rst
b/doc/admin-guide/files/records.yaml.en.rst
index ded30d08c4..0cf49b0df2 100644
--- a/doc/admin-guide/files/records.yaml.en.rst
+++ b/doc/admin-guide/files/records.yaml.en.rst
@@ -1599,23 +1599,23 @@ Origin Server Connect Attempts
The maximum number of connection retries |TS| can make when the origin
server is not responding.
Each retry attempt lasts for `proxy.config.http.connect_attempts_timeout`_
seconds. Once the maximum number of retries is
- reached, the origin is marked dead (as controlled by
`proxy.config.http.connect.dead.policy`_. After this, the setting
- `proxy.config.http.connect_attempts_max_retries_dead_server`_ is used to
limit the number of retry attempts to the known dead origin.
+ reached, the origin is marked down (as controlled by
`proxy.config.http.connect.down.policy`_. After this, the setting
+ `proxy.config.http.connect_attempts_max_retries_down_server`_ is used to
limit the number of retry attempts to the known down origin.
-.. ts:cv:: CONFIG proxy.config.http.connect_attempts_max_retries_dead_server
INT 1
+.. ts:cv:: CONFIG proxy.config.http.connect_attempts_max_retries_down_server
INT 1
:reloadable:
:overridable:
- Maximum number of connection attempts |TS| can make while an origin is
marked dead per request. Typically this value is smaller than
- `proxy.config.http.connect_attempts_max_retries`_ so an error is returned
to the client faster and also to reduce the load on the dead origin.
+ Maximum number of connection attempts |TS| can make while an origin is
marked down per request. Typically this value is smaller than
+ `proxy.config.http.connect_attempts_max_retries`_ so an error is returned
to the client faster and also to reduce the load on the down origin.
The timeout interval `proxy.config.http.connect_attempts_timeout`_ in
seconds is used with this setting.
-.. ts:cv:: CONFIG proxy.config.http.connect.dead.policy INT 2
+.. ts:cv:: CONFIG proxy.config.http.connect.down.policy INT 2
:overridable:
- Controls what origin server connection failures contribute to marking a
server dead. When set to 2, any connection failure during the TCP and TLS
- handshakes will contribute to marking the server dead. When set to 1, only
TCP handshake failures will contribute to marking a server dead.
- When set to 0, no connection failures will be used towards marking a server
dead.
+ Controls what origin server connection failures contribute to marking a
server down. When set to 2, any connection failure during the TCP and TLS
+ handshakes will contribute to marking the server down. When set to 1, only
TCP handshake failures will contribute to marking a server down.
+ When set to 0, no connection failures will be used towards marking a server
down.
.. ts:cv:: CONFIG proxy.config.http.server_max_connections INT 0
:reloadable:
diff --git a/doc/admin-guide/monitoring/statistics/core/http-connection.en.rst
b/doc/admin-guide/monitoring/statistics/core/http-connection.en.rst
index 667dcf9de1..c11d5c7bf2 100644
--- a/doc/admin-guide/monitoring/statistics/core/http-connection.en.rst
+++ b/doc/admin-guide/monitoring/statistics/core/http-connection.en.rst
@@ -144,10 +144,10 @@ HTTP Connection
This metric tracks the number of server connections currently in the server
session sharing pools. The server session sharing is
controlled by settings
:ts:cv:`proxy.config.http.server_session_sharing.pool` and
:ts:cv:`proxy.config.http.server_session_sharing.match`.
-.. ts:stat:: global proxy.process.http.dead_server.no_requests integer
+.. ts:stat:: global proxy.process.http.down_server.no_requests integer
:type: counter
- Tracks the number of client requests that did not have a request sent to
the origin server because the origin server was marked dead.
+ Tracks the number of client requests that did not have a request sent to
the origin server because the origin server was marked down.
.. ts:stat:: global proxy.process.http.http_proxy_loop_detected integer
:type: counter
diff --git a/doc/admin-guide/plugins/lua.en.rst
b/doc/admin-guide/plugins/lua.en.rst
index 63c4c5d656..a8292662d3 100644
--- a/doc/admin-guide/plugins/lua.en.rst
+++ b/doc/admin-guide/plugins/lua.en.rst
@@ -4082,8 +4082,8 @@ Http config constants
TS_LUA_CONFIG_HTTP_PER_SERVER_CONNECTION_MAX
TS_LUA_CONFIG_HTTP_PER_SERVER_CONNECTION_MATCH
TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES
- TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DEAD_SERVER
- TS_LUA_CONFIG_HTTP_CONNECT_DEAD_POLICY
+ TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DOWN_SERVER
+ TS_LUA_CONFIG_HTTP_CONNECT_DOWN_POLICY
TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_RR_RETRIES
TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_TIMEOUT
TS_LUA_CONFIG_HTTP_DOWN_SERVER_CACHE_TIME
diff --git a/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst
b/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst
index 38567a35e3..f2c27dfd22 100644
--- a/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst
+++ b/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst
@@ -111,7 +111,7 @@ TSOverridableConfigKey Value
Config
:c:enumerator:`TS_CONFIG_HTTP_CACHE_WHEN_TO_REVALIDATE`
:ts:cv:`proxy.config.http.cache.when_to_revalidate`
:c:enumerator:`TS_CONFIG_HTTP_CHUNKING_ENABLED`
:ts:cv:`proxy.config.http.chunking_enabled`
:c:enumerator:`TS_CONFIG_HTTP_CHUNKING_SIZE`
:ts:cv:`proxy.config.http.chunking.size`
-:c:enumerator:`TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DEAD_SERVER`
:ts:cv:`proxy.config.http.connect_attempts_max_retries_dead_server`
+:c:enumerator:`TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DOWN_SERVER`
:ts:cv:`proxy.config.http.connect_attempts_max_retries_down_server`
:c:enumerator:`TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES`
:ts:cv:`proxy.config.http.connect_attempts_max_retries`
:c:enumerator:`TS_CONFIG_HTTP_CONNECT_ATTEMPTS_RR_RETRIES`
:ts:cv:`proxy.config.http.connect_attempts_rr_retries`
:c:enumerator:`TS_CONFIG_HTTP_CONNECT_ATTEMPTS_TIMEOUT`
:ts:cv:`proxy.config.http.connect_attempts_timeout`
diff --git a/doc/developer-guide/api/types/TSOverridableConfigKey.en.rst
b/doc/developer-guide/api/types/TSOverridableConfigKey.en.rst
index 796fe563ac..287e4f5262 100644
--- a/doc/developer-guide/api/types/TSOverridableConfigKey.en.rst
+++ b/doc/developer-guide/api/types/TSOverridableConfigKey.en.rst
@@ -76,7 +76,7 @@ Enumeration Members
.. c:enumerator:: TS_CONFIG_HTTP_TRANSACTION_ACTIVE_TIMEOUT_OUT
.. c:enumerator:: TS_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS
.. c:enumerator:: TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES
-.. c:enumerator:: TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DEAD_SERVER
+.. c:enumerator:: TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DOWN_SERVER
.. c:enumerator:: TS_CONFIG_HTTP_CONNECT_ATTEMPTS_RR_RETRIES
.. c:enumerator:: TS_CONFIG_HTTP_CONNECT_ATTEMPTS_TIMEOUT
.. c:enumerator:: TS_CONFIG_HTTP_POST_CONNECT_ATTEMPTS_TIMEOUT
diff --git a/doc/developer-guide/core-architecture/hostdb.en.rst
b/doc/developer-guide/core-architecture/hostdb.en.rst
index f6c036a835..a6ebee3008 100644
--- a/doc/developer-guide/core-architecture/hostdb.en.rst
+++ b/doc/developer-guide/core-architecture/hostdb.en.rst
@@ -47,13 +47,13 @@ about the protocol to use to the upstream.
The last failure time tracks when the last connection failure to the info
occurred and doubles as
a flag, where a value of ``TS_TIME_ZERO`` indicates a live target and any
other value indicates a
-dead info.
+down info.
-If an info is marked dead (has a non-zero last failure time) there is a "fail
window" during which
+If an info is marked down (has a non-zero last failure time) there is a "fail
window" during which
no connections are permitted. After this time the info is considered to be a
"zombie". If all infos
-for a record are dead then a specific error message is generated (body factory
tag
-"connect#all_dead"). Otherwise if the selected info is a zombie, a request is
permitted but the
-zombie is immediately marked dead again, preventing any additional requests
until either the fail
+for a record are down then a specific error message is generated (body factory
tag
+"connect#all_down"). Otherwise if the selected info is a zombie, a request is
permitted but the
+zombie is immediately marked down again, preventing any additional requests
until either the fail
window has passed or the single connection succeeds. A successful connection
clears the last file
time and the info becomes alive.
@@ -135,10 +135,10 @@ Issues
======
Currently if an upstream is marked down connections are still permitted, the
only change is the
-number of retries. This has caused operational problems where dead systems are
flooded with requests
+number of retries. This has caused operational problems where down systems are
flooded with requests
which, despite the timeouts, accumulate in ATS until ATS runs out of memory
(there were instances of
over 800K pending transactions). This also made it hard to bring the upstreams
back online. With
-these changes requests to dead upstreams are strongly rate limited and other
transactions are
+these changes, requests to upstreams marked down are strongly rate limited and
other transactions are
immediately terminated with a 502 response, protecting both the upstream and
ATS.
Future
@@ -176,14 +176,14 @@ This version has several major architectural changes from
the previous version.
* Single and multiple address results are treated identically - a singleton
is simply a multiple
of size 1. This yields a major simplification of the implementation.
-* Connections are throttled to dead upstreams, allowing only a single
connection attempt per fail
+* Connections are throttled to upstreams marked down, allowing only a single
connection attempt per fail
window timing until a connection succeeds.
* Timing information is stored in ``std::chrono`` data types instead of
proprietary types.
* State information has been promoted to atomics and updates are immediate
rather than scheduled.
This also means the data in the state machine is a reference to a shared
object, not a local copy.
- The promotion was necessary to coordinate zombie connections to dead
upstreams across transactions.
+ The promotion was necessary to coordinate zombie connections to upstreams
marked down across transactions.
* The "resolve key" is now a separate data object from the HTTP request. This
is a subtle but
major change. The effect is requests can be routed to different upstreams
without changing
diff --git a/doc/locale/ja/LC_MESSAGES/admin-guide/files/records.config.en.po
b/doc/locale/ja/LC_MESSAGES/admin-guide/files/records.config.en.po
index e6cf623095..c3ee155df2 100644
--- a/doc/locale/ja/LC_MESSAGES/admin-guide/files/records.config.en.po
+++ b/doc/locale/ja/LC_MESSAGES/admin-guide/files/records.config.en.po
@@ -2268,17 +2268,17 @@ msgid ""
"The maximum number of connection retries Traffic Server can make when the "
"origin server is not responding. Each retry attempt lasts for `proxy.config."
"http.connect_attempts_timeout`_ seconds. Once the maximum number of "
-"retries is reached, the origin is marked dead. After this, the setting "
-"`proxy.config.http.connect_attempts_max_retries_dead_server`_ is used to "
-"limit the number of retry attempts to the known dead origin."
+"retries is reached, the origin is marked down. After this, the setting "
+"`proxy.config.http.connect_attempts_max_retries_down_server`_ is used to "
+"limit the number of retry attempts to the known down origin."
msgstr ""
#: ../../../admin-guide/files/records.yaml.en.rst:1363
msgid ""
"Maximum number of connection retries Traffic Server can make while an "
-"origin is marked dead. Typically this value is smaller than `proxy.config."
+"origin is marked down. Typically this value is smaller than `proxy.config."
"http.connect_attempts_max_retries`_ so an error is returned to the client "
-"faster and also to reduce the load on the dead origin. The timeout interval "
+"faster and also to reduce the load on the down origin. The timeout interval "
"`proxy.config.http.connect_attempts_timeout`_ in seconds is used with this "
"setting."
msgstr ""
diff --git
a/doc/locale/ja/LC_MESSAGES/developer-guide/api/functions/TSHttpOverridableConfig.en.po
b/doc/locale/ja/LC_MESSAGES/developer-guide/api/functions/TSHttpOverridableConfig.en.po
index 8a3d6be4a1..5b6054aead 100644
---
a/doc/locale/ja/LC_MESSAGES/developer-guide/api/functions/TSHttpOverridableConfig.en.po
+++
b/doc/locale/ja/LC_MESSAGES/developer-guide/api/functions/TSHttpOverridableConfig.en.po
@@ -271,7 +271,7 @@ msgid
":ts:cv:`proxy.config.http.connect_attempts_max_retries`"
msgstr ""
#: ../../../developer-guide/api/functions/TSHttpOverridableConfig.en.rst:114
-msgid ":ts:cv:`proxy.config.http.connect_attempts_max_retries_dead_server`"
+msgid ":ts:cv:`proxy.config.http.connect_attempts_max_retries_down_server`"
msgstr ""
#: ../../../developer-guide/api/functions/TSHttpOverridableConfig.en.rst:115
diff --git a/doc/release-notes/upgrading.en.rst
b/doc/release-notes/upgrading.en.rst
index e0155e4017..fb0d18f80e 100644
--- a/doc/release-notes/upgrading.en.rst
+++ b/doc/release-notes/upgrading.en.rst
@@ -66,6 +66,22 @@ The following incompatible changes to the configurations
have been made in this
The records.yaml entry proxy.config.http.down_server.abort_threshold has been
removed.
+The records.yaml entry
proxy.config.http.connect_attempts_max_retries_dead_server has been renamed to
proxy.config.http.connect_attempts_max_retries_down_server.
+
+The records.yaml entry proxy.config.http.connect.dead.policy has been renamed
to proxy.config.http.connect.down.policy.
Plugins
-------
+
+Lua Plugin
+~~~~~~~~~~
+The following Http config constants have been renamed:
+
+TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DEAD_SERVER has been renamed
to TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DOWN_SERVER.
+
+TS_LUA_CONFIG_HTTP_CONNECT_DEAD_POLICY has been renamed to
TS_LUA_CONFIG_HTTP_CONNECT_DOWN_POLICY.
+
+Metrics
+------------------
+
+The HTTP connection metric proxy.process.http.dead_server.no_requests has been
renamed to proxy.process.http.down_server.no_requests.
diff --git a/include/ts/apidefs.h.in b/include/ts/apidefs.h.in
index 729da51bae..fd0056a938 100644
--- a/include/ts/apidefs.h.in
+++ b/include/ts/apidefs.h.in
@@ -804,7 +804,7 @@ typedef enum {
TS_CONFIG_HTTP_TRANSACTION_NO_ACTIVITY_TIMEOUT_OUT,
TS_CONFIG_HTTP_TRANSACTION_ACTIVE_TIMEOUT_OUT,
TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES,
- TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DEAD_SERVER,
+ TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DOWN_SERVER,
TS_CONFIG_HTTP_CONNECT_ATTEMPTS_RR_RETRIES,
TS_CONFIG_HTTP_CONNECT_ATTEMPTS_TIMEOUT,
TS_CONFIG_HTTP_DOWN_SERVER_CACHE_TIME,
@@ -878,7 +878,7 @@ typedef enum {
TS_CONFIG_SSL_CLIENT_CA_CERT_FILENAME,
TS_CONFIG_SSL_CLIENT_ALPN_PROTOCOLS,
TS_CONFIG_HTTP_HOST_RESOLUTION_PREFERENCE,
- TS_CONFIG_HTTP_CONNECT_DEAD_POLICY,
+ TS_CONFIG_HTTP_CONNECT_DOWN_POLICY,
TS_CONFIG_HTTP_MAX_PROXY_CYCLES,
TS_CONFIG_PLUGIN_VC_DEFAULT_BUFFER_INDEX,
TS_CONFIG_PLUGIN_VC_DEFAULT_BUFFER_WATER_MARK,
diff --git a/iocore/hostdb/HostDB.cc b/iocore/hostdb/HostDB.cc
index a3c4a44949..3889abdd29 100644
--- a/iocore/hostdb/HostDB.cc
+++ b/iocore/hostdb/HostDB.cc
@@ -1550,7 +1550,7 @@ HostDBRecord::select_best_http(ts_time now, ts_seconds
fail_window, sockaddr con
{
ink_assert(0 < rr_count && rr_count <= hostdb_round_robin_max_count);
- // @a best_any is set to a base candidate, which may be dead.
+ // @a best_any is set to a base candidate, which may be down.
HostDBInfo *best_any = nullptr;
// @a best_alive is set when a valid target has been selected and should be
used.
HostDBInfo *best_alive = nullptr;
@@ -1567,8 +1567,8 @@ HostDBRecord::select_best_http(ts_time now, ts_seconds
fail_window, sockaddr con
// Check and update RR if it's time - this always yields a valid target if
there is one.
if (now > ntime && rr_ctime.compare_exchange_strong(ctime, ntime)) {
best_alive = best_any = this->select_next_rr(now, fail_window);
- Dbg(dbg_ctl_hostdb, "Round robin timed interval expired - index %d",
this->index_of(best_alive));
- } else { // pick the current index, which may be dead.
+ Debug("hostdb", "Round robin timed interval expired - index %d",
this->index_of(best_alive));
+ } else { // pick the current index, which may be down.
best_any = &info[this->rr_idx()];
}
Dbg(dbg_ctl_hostdb, "Using timed round robin - index %d",
this->index_of(best_any));
@@ -2201,8 +2201,8 @@ HostDBRecord::select_best_srv(char *target, InkRand
*rand, ts_time now, ts_secon
// Array of live targets, sized by @a live_n
HostDBInfo *live[rr.count()];
for (auto &target : rr) {
- // skip dead upstreams.
- if (rr[i].is_dead(now, fail_window)) {
+ // skip down targets.
+ if (rr[i].is_down(now, fail_window)) {
continue;
}
diff --git a/iocore/hostdb/I_HostDBProcessor.h
b/iocore/hostdb/I_HostDBProcessor.h
index 463c7612ae..f6ef804edc 100644
--- a/iocore/hostdb/I_HostDBProcessor.h
+++ b/iocore/hostdb/I_HostDBProcessor.h
@@ -139,7 +139,7 @@ struct HostDBInfo {
bool is_alive();
/// Target has failed and is still in the blocked time window.
- bool is_dead(ts_time now, ts_seconds fail_window);
+ bool is_down(ts_time now, ts_seconds fail_window);
/** Select this target.
*
@@ -147,7 +147,7 @@ struct HostDBInfo {
* @param fail_window Failure window.
* @return Status of the selection.
*
- * If a zombie is selected the failure time is updated to make it look dead
to other threads in a thread safe
+ * If a zombie is selected the failure time is updated to make it appear
down to other threads in a thread safe
* manner. The caller should check @c last_fail_time to see if a zombie was
selected.
*/
bool select(ts_time now, ts_seconds fail_window);
@@ -234,7 +234,7 @@ HostDBInfo::is_alive()
}
inline bool
-HostDBInfo::is_dead(ts_time now, ts_seconds fail_window)
+HostDBInfo::is_down(ts_time now, ts_seconds fail_window)
{
auto last_fail = this->last_fail_time();
return (last_fail != TS_TIME_ZERO) && (last_fail + fail_window < now);
@@ -360,10 +360,10 @@ public:
* attempt to connect to the selected target if possible.
*
* @param now Current time to use for aliveness calculations.
- * @param fail_window Blackout time for dead servers.
+ * @param fail_window Blackout time for down servers.
* @return Status of the updated target.
*
- * If the return value is @c HostDBInfo::Status::DEAD this means all targets
are dead and there is
+ * If the return value is @c HostDBInfo::Status::DOWN this means all targets
are down and there is
* no valid upstream.
*
* @note Concurrency - this is not done under lock and depends on the caller
for correct use.
@@ -404,7 +404,7 @@ public:
/** Select an upstream target.
*
* @param now Current time.
- * @param fail_window Dead server blackout time.
+ * @param fail_window Down server blackout time.
* @param hash_addr Inbound remote IP address.
* @return A selected target, or @c nullptr if there are no valid targets.
*
@@ -632,13 +632,13 @@ struct ResolveInfo {
*/
bool resolve_immediate();
- /** Mark the active target as dead.
+ /** Mark the active target as down.
*
* @param now Time of failure.
- * @return @c true if the server was marked as dead, @c false if not.
+ * @return @c true if the server was marked as down, @c false if not.
*
*/
- bool mark_active_server_dead(ts_time now);
+ bool mark_active_server_down(ts_time now);
/** Mark the active target as alive.
*
@@ -844,7 +844,7 @@ ResolveInfo::mark_active_server_alive()
}
inline bool
-ResolveInfo::mark_active_server_dead(ts_time now)
+ResolveInfo::mark_active_server_down(ts_time now)
{
return active != nullptr && active->mark_down(now);
}
diff --git a/plugins/lua/ts_lua_http_config.c b/plugins/lua/ts_lua_http_config.c
index c2055e8b73..36c80195a4 100644
--- a/plugins/lua/ts_lua_http_config.c
+++ b/plugins/lua/ts_lua_http_config.c
@@ -68,8 +68,8 @@ typedef enum {
TS_LUA_CONFIG_HTTP_PER_SERVER_CONNECTION_MAX =
TS_CONFIG_HTTP_PER_SERVER_CONNECTION_MAX,
TS_LUA_CONFIG_HTTP_PER_SERVER_CONNECTION_MATCH =
TS_CONFIG_HTTP_PER_SERVER_CONNECTION_MATCH,
TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES =
TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES,
- TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DEAD_SERVER =
TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DEAD_SERVER,
- TS_LUA_CONFIG_HTTP_CONNECT_DEAD_POLICY =
TS_CONFIG_HTTP_CONNECT_DEAD_POLICY,
+ TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DOWN_SERVER =
TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DOWN_SERVER,
+ TS_LUA_CONFIG_HTTP_CONNECT_DOWN_POLICY =
TS_CONFIG_HTTP_CONNECT_DOWN_POLICY,
TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_RR_RETRIES =
TS_CONFIG_HTTP_CONNECT_ATTEMPTS_RR_RETRIES,
TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_TIMEOUT =
TS_CONFIG_HTTP_CONNECT_ATTEMPTS_TIMEOUT,
TS_LUA_CONFIG_HTTP_DOWN_SERVER_CACHE_TIME =
TS_CONFIG_HTTP_DOWN_SERVER_CACHE_TIME,
@@ -203,8 +203,8 @@ ts_lua_var_item ts_lua_http_config_vars[] = {
TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_TRANSACTION_NO_ACTIVITY_TIMEOUT_OUT),
TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_TRANSACTION_ACTIVE_TIMEOUT_OUT),
TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES),
-
TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DEAD_SERVER),
- TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_CONNECT_DEAD_POLICY),
+
TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DOWN_SERVER),
+ TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_CONNECT_DOWN_POLICY),
TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_RR_RETRIES),
TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_CONNECT_ATTEMPTS_TIMEOUT),
TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_DOWN_SERVER_CACHE_TIME),
diff --git a/proxy/http/HttpConfig.cc b/proxy/http/HttpConfig.cc
index cd6785b7ed..d8c3a348df 100644
--- a/proxy/http/HttpConfig.cc
+++ b/proxy/http/HttpConfig.cc
@@ -1077,8 +1077,8 @@ register_stat_callbacks()
RecRegisterRawStat(http_rsb, RECT_PROCESS,
"proxy.process.http.milestone.sm_finish", RECD_COUNTER, RECP_PERSISTENT,
(int)http_sm_finish_time_stat, RecRawStatSyncSum);
- RecRegisterRawStat(http_rsb, RECT_PROCESS,
"proxy.process.http.dead_server.no_requests", RECD_COUNTER, RECP_PERSISTENT,
- (int)http_dead_server_no_requests, RecRawStatSyncSum);
+ RecRegisterRawStat(http_rsb, RECT_PROCESS,
"proxy.process.http.down_server.no_requests", RECD_COUNTER, RECP_PERSISTENT,
+ (int)http_down_server_no_requests, RecRawStatSyncSum);
// Current transaction stats parent counter
RecRegisterRawStat(http_rsb, RECT_PROCESS,
"proxy.process.http_parent_count", RECD_COUNTER, RECP_PERSISTENT,
@@ -1254,10 +1254,10 @@ HttpConfig::startup()
HttpEstablishStaticConfigFloat(c.oride.background_fill_threshold,
"proxy.config.http.background_fill_completed_threshold");
HttpEstablishStaticConfigLongLong(c.oride.connect_attempts_max_retries,
"proxy.config.http.connect_attempts_max_retries");
-
HttpEstablishStaticConfigLongLong(c.oride.connect_attempts_max_retries_dead_server,
-
"proxy.config.http.connect_attempts_max_retries_dead_server");
+
HttpEstablishStaticConfigLongLong(c.oride.connect_attempts_max_retries_down_server,
+
"proxy.config.http.connect_attempts_max_retries_down_server");
- HttpEstablishStaticConfigLongLong(c.oride.connect_dead_policy,
"proxy.config.http.connect.dead.policy");
+ HttpEstablishStaticConfigLongLong(c.oride.connect_down_policy,
"proxy.config.http.connect.down.policy");
HttpEstablishStaticConfigLongLong(c.oride.connect_attempts_rr_retries,
"proxy.config.http.connect_attempts_rr_retries");
HttpEstablishStaticConfigLongLong(c.oride.connect_attempts_timeout,
"proxy.config.http.connect_attempts_timeout");
@@ -1541,7 +1541,7 @@ HttpConfig::reconfigure()
params->oride.background_fill_threshold =
m_master.oride.background_fill_threshold;
params->oride.connect_attempts_max_retries =
m_master.oride.connect_attempts_max_retries;
- params->oride.connect_attempts_max_retries_dead_server =
m_master.oride.connect_attempts_max_retries_dead_server;
+ params->oride.connect_attempts_max_retries_down_server =
m_master.oride.connect_attempts_max_retries_down_server;
if (m_master.oride.connect_attempts_rr_retries >
params->oride.connect_attempts_max_retries) {
Warning("connect_attempts_rr_retries (%" PRIu64 ") is greater than "
"connect_attempts_max_retries (%" PRIu64 "), this means requests "
@@ -1550,7 +1550,7 @@ HttpConfig::reconfigure()
}
params->oride.connect_attempts_rr_retries =
m_master.oride.connect_attempts_rr_retries;
params->oride.connect_attempts_timeout =
m_master.oride.connect_attempts_timeout;
- params->oride.connect_dead_policy =
m_master.oride.connect_dead_policy;
+ params->oride.connect_down_policy =
m_master.oride.connect_down_policy;
params->oride.parent_connect_attempts =
m_master.oride.parent_connect_attempts;
params->oride.parent_retry_time =
m_master.oride.parent_retry_time;
params->oride.parent_fail_threshold =
m_master.oride.parent_fail_threshold;
diff --git a/proxy/http/HttpConfig.h b/proxy/http/HttpConfig.h
index 3bb0244788..c116976a66 100644
--- a/proxy/http/HttpConfig.h
+++ b/proxy/http/HttpConfig.h
@@ -356,7 +356,7 @@ enum {
http_origin_shutdown_cleanup_entry,
http_origin_shutdown_tunnel_abort,
- http_dead_server_no_requests,
+ http_down_server_no_requests,
http_origin_reuse,
http_origin_not_found,
@@ -681,11 +681,11 @@ struct OverridableHttpConfigParams {
// origin server connect attempts //
////////////////////////////////////
MgmtInt connect_attempts_max_retries = 0;
- MgmtInt connect_attempts_max_retries_dead_server = 3;
+ MgmtInt connect_attempts_max_retries_down_server = 3;
MgmtInt connect_attempts_rr_retries = 3;
MgmtInt connect_attempts_timeout = 30;
- MgmtInt connect_dead_policy = 2;
+ MgmtInt connect_down_policy = 2;
////////////////////////////////////
// parent proxy connect attempts //
diff --git a/proxy/http/HttpSM.cc b/proxy/http/HttpSM.cc
index 22a80696fb..c0087fadef 100644
--- a/proxy/http/HttpSM.cc
+++ b/proxy/http/HttpSM.cc
@@ -4623,9 +4623,9 @@ HttpSM::track_connect_fail() const
bool retval = false;
if (t_state.current.server->had_connect_fail()) {
// What does our policy say?
- if (t_state.txn_conf->connect_dead_policy == 2) { // Any connection error
through TLS handshake
+ if (t_state.txn_conf->connect_down_policy == 2) { // Any connection error
through TLS handshake
retval = true;
- } else if (t_state.txn_conf->connect_dead_policy == 1) { // Any connection
error through TCP
+ } else if (t_state.txn_conf->connect_down_policy == 1) { // Any connection
error through TCP
retval = t_state.current.server->connect_result !=
-ENET_SSL_CONNECT_FAILED;
}
}
@@ -5353,8 +5353,8 @@ HttpSM::do_http_server_open(bool raw, bool only_direct)
}
if (HttpTransact::is_server_negative_cached(&t_state) == true &&
- t_state.txn_conf->connect_attempts_max_retries_dead_server <= 0) {
- call_transact_and_set_next_state(HttpTransact::OriginDead);
+ t_state.txn_conf->connect_attempts_max_retries_down_server <= 0) {
+ call_transact_and_set_next_state(HttpTransact::OriginDown);
return;
}
}
@@ -5973,7 +5973,7 @@ HttpSM::release_server_session(bool serve_from_cache)
//
// We failed in our attempt post (or put) a document
// to the server. Two cases happen here. The normal
-// one is the server died, in which case we ought to
+// one is the server is down, in which case we ought to
// return an error to the client. The second one is
// stupid. The server returned a response without reading
// all the post data. In order to be as transparent as
@@ -6011,7 +6011,7 @@ HttpSM::handle_post_failure()
tunnel.deallocate_buffers();
tunnel.reset();
- // Server died
+ // Server is down
if (t_state.current.state == HttpTransact::STATE_UNDEFINED ||
t_state.current.state == HttpTransact::CONNECTION_ALIVE) {
t_state.set_connect_fail(server_txn->get_netvc()->lerrno);
t_state.current.state = HttpTransact::CONNECTION_CLOSED;
diff --git a/proxy/http/HttpTransact.cc b/proxy/http/HttpTransact.cc
index 3296671632..7753db9508 100644
--- a/proxy/http/HttpTransact.cc
+++ b/proxy/http/HttpTransact.cc
@@ -903,17 +903,17 @@ HttpTransact::TooEarly(State *s)
}
void
-HttpTransact::OriginDead(State *s)
+HttpTransact::OriginDown(State *s)
{
TxnDebug("http_trans", "origin server is marked down");
bootstrap_state_variables_from_request(s, &s->hdr_info.client_request);
build_error_response(s, HTTP_STATUS_BAD_GATEWAY, "Origin Server Marked
Down", "connect#failed_connect");
- HTTP_INCREMENT_DYN_STAT(http_dead_server_no_requests);
+ HTTP_INCREMENT_DYN_STAT(http_down_server_no_requests);
char *url_str = s->hdr_info.client_request.url_string_get(&s->arena);
int host_len;
const char *host_name_ptr = s->unmapped_url.host_get(&host_len);
std::string_view host_name{host_name_ptr, size_t(host_len)};
- ts::bwprint(error_bw_buffer, "CONNECT: dead server no request to {} for
host='{}' url='{}'", s->current.server->dst_addr,
+ ts::bwprint(error_bw_buffer, "CONNECT: down server no request to {} for
host='{}' url='{}'", s->current.server->dst_addr,
host_name, ts::bwf::FirstOf(url_str, "<none>"));
Log::error("%s", error_bw_buffer.c_str());
s->arena.str_free(url_str);
@@ -1803,7 +1803,7 @@ HttpTransact::PPDNSLookup(State *s)
return;
}
ink_assert(s->current.request_to == ResolveInfo::HOST_NONE);
- handle_parent_died(s);
+ handle_parent_down(s);
return;
}
@@ -1817,7 +1817,7 @@ HttpTransact::PPDNSLookup(State *s)
} else {
// We could be out of parents here if all the parents failed DNS lookup
ink_assert(s->current.request_to == ResolveInfo::HOST_NONE);
- handle_parent_died(s);
+ handle_parent_down(s);
}
return;
}
@@ -1908,7 +1908,7 @@ HttpTransact::OSDNSLookup(State *s)
build_error_response(s, HTTP_STATUS_INTERNAL_SERVER_ERROR, "Cannot
find server.", "connect#dns_failed");
log_msg = "looking up";
} else {
- build_error_response(s, HTTP_STATUS_INTERNAL_SERVER_ERROR, "No valid
server.", "connect#all_dead");
+ build_error_response(s, HTTP_STATUS_INTERNAL_SERVER_ERROR, "No valid
server.", "connect#all_down");
log_msg = "no valid server";
}
char *url_str = s->hdr_info.client_request.url_string_get(&s->arena,
nullptr);
@@ -2193,7 +2193,7 @@ HttpTransact::LookupSkipOpenServer(State *s)
if (s->current.request_to == ResolveInfo::PARENT_PROXY) {
TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, PPDNSLookupAPICall);
} else if (s->parent_result.result == PARENT_FAIL) {
- handle_parent_died(s);
+ handle_parent_down(s);
return;
}
@@ -2863,7 +2863,7 @@ HttpTransact::HandleCacheOpenReadHit(State *s)
update_current_info(&s->current, nullptr,
ResolveInfo::UNDEFINED_LOOKUP, true);
TxnDebug("http_trans", "CacheOpenReadHit - server_down, returning
stale document");
} else {
- handle_parent_died(s);
+ handle_parent_down(s);
return;
}
}
@@ -2893,7 +2893,7 @@ HttpTransact::HandleCacheOpenReadHit(State *s)
} else if (s->current.request_to == ResolveInfo::ORIGIN_SERVER) {
return CallOSDNSLookup(s);
} else {
- handle_parent_died(s);
+ handle_parent_down(s);
return;
}
}
@@ -3305,7 +3305,7 @@ HttpTransact::HandleCacheOpenReadMiss(State *s)
// a parent lookup could come back as PARENT_FAIL if in parent.config
go_direct == false and
// there are no available parents (all down).
if (s->parent_result.result == PARENT_FAIL) {
- handle_parent_died(s);
+ handle_parent_down(s);
return;
}
if (!s->current.server->dst_addr.isValid()) {
@@ -3317,7 +3317,7 @@ HttpTransact::HandleCacheOpenReadMiss(State *s)
if (s->current.request_to == ResolveInfo::PARENT_PROXY) {
TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP,
HttpTransact::PPDNSLookupAPICall);
} else {
- handle_parent_died(s);
+ handle_parent_down(s);
return;
}
}
@@ -3367,7 +3367,7 @@ HttpTransact::OriginServerRawOpen(State *s)
/* fall through */
case OUTBOUND_CONGESTION:
/* fall through */
- handle_server_died(s);
+ handle_server_down(s);
ink_assert(s->cache_info.action == CACHE_DO_NO_ACTION);
s->next_action = SM_ACTION_INTERNAL_CACHE_NOOP;
@@ -3636,7 +3636,7 @@ HttpTransact::handle_response_from_parent(State *s)
markParentDown(s);
}
s->parent_result.result = PARENT_FAIL;
- handle_parent_died(s);
+ handle_parent_down(s);
return;
}
@@ -3756,7 +3756,7 @@ HttpTransact::handle_response_from_server(State *s)
case BAD_INCOMING_RESPONSE:
if (is_server_negative_cached(s)) {
- max_connect_retries =
s->txn_conf->connect_attempts_max_retries_dead_server - 1;
+ max_connect_retries =
s->txn_conf->connect_attempts_max_retries_down_server - 1;
} else {
// server not yet negative cached - use default number of retries
max_connect_retries = s->txn_conf->connect_attempts_max_retries;
@@ -3941,10 +3941,10 @@ HttpTransact::handle_server_connection_not_open(State
*s)
} else {
switch (s->current.request_to) {
case ResolveInfo::PARENT_PROXY:
- handle_parent_died(s);
+ handle_parent_down(s);
break;
case ResolveInfo::ORIGIN_SERVER:
- handle_server_died(s);
+ handle_server_down(s);
break;
default:
ink_assert(!("s->current.request_to is not P.P. or O.S. - hmmm."));
@@ -6494,7 +6494,7 @@ HttpTransact::is_request_valid(State *s, HTTPHdr
*incoming_request)
// and the first set of bytes is relatively small. This distinction is more
apparent in the
// case where the origin connection is a KA session. In this case, the session
may not have
// been used for a long time. In that case, we'll immediately queue up session
to send to the
-// origin, without any idea of the state of the connection. If the origin is
dead (or the connection
+// origin, without any idea of the state of the connection. If the origin is
down (or the connection
// is broken for some other reason) we'll immediately get a RST back. In that
case-- since no
// bytes where ACKd by the remote end, we can retry/redispatch the request.
//
@@ -7525,7 +7525,7 @@ HttpTransact::AuthenticationNeeded(const
OverridableHttpConfigParams *p, HTTPHdr
}
void
-HttpTransact::handle_parent_died(State *s)
+HttpTransact::handle_parent_down(State *s)
{
ink_assert(s->parent_result.result == PARENT_FAIL);
@@ -7546,7 +7546,7 @@ HttpTransact::handle_parent_died(State *s)
}
void
-HttpTransact::handle_server_died(State *s)
+HttpTransact::handle_server_down(State *s)
{
const char *reason = nullptr;
const char *body_type = "UNKNOWN";
@@ -7557,7 +7557,7 @@ HttpTransact::handle_server_died(State *s)
////////////////////////////////////////////////////////
switch (s->current.state) {
- case CONNECTION_ALIVE: /* died while alive for unknown reason */
+ case CONNECTION_ALIVE: /* down while alive for unknown reason */
ink_release_assert(s->hdr_info.response_error != NO_RESPONSE_HEADER_ERROR);
status = HTTP_STATUS_BAD_GATEWAY;
reason = "Unknown Error";
@@ -7609,7 +7609,7 @@ HttpTransact::handle_server_died(State *s)
case STATE_UNDEFINED:
case TRANSACTION_COMPLETE:
default: /* unknown death */
- ink_release_assert(!"[handle_server_died] Unreasonable state - not dead,
shouldn't be here");
+ ink_release_assert(!"[handle_server_down] Unreasonable state - not down,
shouldn't be here");
status = HTTP_STATUS_BAD_GATEWAY;
reason = nullptr;
body_type = "response#bad_response";
diff --git a/proxy/http/HttpTransact.h b/proxy/http/HttpTransact.h
index 5c54c47f52..e30a811465 100644
--- a/proxy/http/HttpTransact.h
+++ b/proxy/http/HttpTransact.h
@@ -977,7 +977,7 @@ public:
static void Forbidden(State *s);
static void SelfLoop(State *s);
static void TooEarly(State *s);
- static void OriginDead(State *s);
+ static void OriginDown(State *s);
static void PostActiveTimeoutResponse(State *s);
static void PostInactiveTimeoutResponse(State *s);
static void DecideCacheLookup(State *s);
@@ -1087,8 +1087,8 @@ public:
static Freshness_t what_is_document_freshness(State *s, HTTPHdr
*client_request, HTTPHdr *cached_obj_response);
static Authentication_t AuthenticationNeeded(const
OverridableHttpConfigParams *p, HTTPHdr *client_request,
HTTPHdr *obj_response);
- static void handle_parent_died(State *s);
- static void handle_server_died(State *s);
+ static void handle_parent_down(State *s);
+ static void handle_server_down(State *s);
static void build_error_response(State *s, HTTPStatus status_code, const
char *reason_phrase_or_null,
const char *error_body_type);
static void build_redirect_response(State *s);
diff --git a/src/records/RecordsConfig.cc b/src/records/RecordsConfig.cc
index c01d2a9b76..c3213febc6 100644
--- a/src/records/RecordsConfig.cc
+++ b/src/records/RecordsConfig.cc
@@ -472,13 +472,13 @@ static const RecordElement RecordsConfig[] =
// ##################################
{RECT_CONFIG, "proxy.config.http.connect_attempts_max_retries", RECD_INT,
"3", RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
,
- {RECT_CONFIG, "proxy.config.http.connect_attempts_max_retries_dead_server",
RECD_INT, "1", RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
+ {RECT_CONFIG, "proxy.config.http.connect_attempts_max_retries_down_server",
RECD_INT, "1", RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
,
{RECT_CONFIG, "proxy.config.http.connect_attempts_rr_retries", RECD_INT,
"3", RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
,
{RECT_CONFIG, "proxy.config.http.connect_attempts_timeout", RECD_INT, "30",
RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
,
- {RECT_CONFIG, "proxy.config.http.connect.dead.policy", RECD_INT, "2",
RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
+ {RECT_CONFIG, "proxy.config.http.connect.down.policy", RECD_INT, "2",
RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
,
{RECT_CONFIG, "proxy.config.http.down_server.cache_time", RECD_INT, "60",
RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
,
diff --git a/src/shared/overridable_txn_vars.cc
b/src/shared/overridable_txn_vars.cc
index 6e6c3c60a6..fc504047a1 100644
--- a/src/shared/overridable_txn_vars.cc
+++ b/src/shared/overridable_txn_vars.cc
@@ -48,7 +48,7 @@ const std::unordered_map<std::string_view, std::tuple<const
TSOverridableConfigK
{"proxy.config.net.sock_option_flag_out",
{TS_CONFIG_NET_SOCK_OPTION_FLAG_OUT, TS_RECORDDATATYPE_INT} },
{"proxy.config.net.sock_packet_mark_out",
{TS_CONFIG_NET_SOCK_PACKET_MARK_OUT, TS_RECORDDATATYPE_INT} },
{"proxy.config.websocket.active_timeout",
{TS_CONFIG_WEBSOCKET_ACTIVE_TIMEOUT, TS_RECORDDATATYPE_INT} },
- {"proxy.config.http.connect.dead.policy",
{TS_CONFIG_HTTP_CONNECT_DEAD_POLICY, TS_RECORDDATATYPE_INT} },
+ {"proxy.config.http.connect.down.policy",
{TS_CONFIG_HTTP_CONNECT_DOWN_POLICY, TS_RECORDDATATYPE_INT} },
{"proxy.config.http.flow_control.enabled",
{TS_CONFIG_HTTP_FLOW_CONTROL_ENABLED, TS_RECORDDATATYPE_INT} },
{"proxy.config.http.send_http11_requests",
{TS_CONFIG_HTTP_SEND_HTTP11_REQUESTS, TS_RECORDDATATYPE_INT} },
{"proxy.config.body_factory.template_base",
{TS_CONFIG_BODY_FACTORY_TEMPLATE_BASE, TS_RECORDDATATYPE_STRING} },
@@ -148,8 +148,8 @@ const std::unordered_map<std::string_view, std::tuple<const
TSOverridableConfigK
{TS_CONFIG_HTTP_CACHE_IGNORE_ACCEPT_LANGUAGE_MISMATCH,
TS_RECORDDATATYPE_INT}
},
{"proxy.config.http.cache.ignore_accept_encoding_mismatch",
{TS_CONFIG_HTTP_CACHE_IGNORE_ACCEPT_ENCODING_MISMATCH,
TS_RECORDDATATYPE_INT}
},
- {"proxy.config.http.connect_attempts_max_retries_dead_server",
- {TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DEAD_SERVER,
TS_RECORDDATATYPE_INT}
},
+ {"proxy.config.http.connect_attempts_max_retries_down_server",
+ {TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DOWN_SERVER,
TS_RECORDDATATYPE_INT}
},
{"proxy.config.http.parent_proxy.per_parent_connect_attempts",
{TS_CONFIG_HTTP_PER_PARENT_CONNECT_ATTEMPTS, TS_RECORDDATATYPE_INT}
},
{"proxy.config.http.no_dns_just_forward_to_parent",
{TS_CONFIG_HTTP_NO_DNS_JUST_FORWARD_TO_PARENT, TS_RECORDDATATYPE_INT} },
diff --git a/src/traffic_server/InkAPI.cc b/src/traffic_server/InkAPI.cc
index 2baf4280b4..6f43a07f48 100644
--- a/src/traffic_server/InkAPI.cc
+++ b/src/traffic_server/InkAPI.cc
@@ -8595,11 +8595,11 @@ _conf_to_memberp(TSOverridableConfigKey conf,
OverridableHttpConfigParams *overr
case TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES:
ret =
_memberp_to_generic(&overridableHttpConfig->connect_attempts_max_retries, conv);
break;
- case TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DEAD_SERVER:
- ret =
_memberp_to_generic(&overridableHttpConfig->connect_attempts_max_retries_dead_server,
conv);
+ case TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DOWN_SERVER:
+ ret =
_memberp_to_generic(&overridableHttpConfig->connect_attempts_max_retries_down_server,
conv);
break;
- case TS_CONFIG_HTTP_CONNECT_DEAD_POLICY:
- ret = _memberp_to_generic(&overridableHttpConfig->connect_dead_policy,
conv);
+ case TS_CONFIG_HTTP_CONNECT_DOWN_POLICY:
+ ret = _memberp_to_generic(&overridableHttpConfig->connect_down_policy,
conv);
break;
case TS_CONFIG_HTTP_CONNECT_ATTEMPTS_RR_RETRIES:
ret =
_memberp_to_generic(&overridableHttpConfig->connect_attempts_rr_retries, conv);
diff --git a/src/traffic_server/InkAPITest.cc b/src/traffic_server/InkAPITest.cc
index ed8e498dfa..6e1e0a73a1 100644
--- a/src/traffic_server/InkAPITest.cc
+++ b/src/traffic_server/InkAPITest.cc
@@ -8603,7 +8603,7 @@ std::array<std::string_view, TS_CONFIG_LAST_ENTRY>
SDK_Overridable_Configs = {
"proxy.config.http.keep_alive_no_activity_timeout_in",
"proxy.config.http.keep_alive_no_activity_timeout_out",
"proxy.config.http.transaction_no_activity_timeout_in",
"proxy.config.http.transaction_no_activity_timeout_out",
"proxy.config.http.transaction_active_timeout_out",
"proxy.config.http.connect_attempts_max_retries",
- "proxy.config.http.connect_attempts_max_retries_dead_server",
"proxy.config.http.connect_attempts_rr_retries",
+ "proxy.config.http.connect_attempts_max_retries_down_server",
"proxy.config.http.connect_attempts_rr_retries",
"proxy.config.http.connect_attempts_timeout",
"proxy.config.http.down_server.cache_time",
"proxy.config.http.doc_in_cache_skip_dns",
"proxy.config.http.background_fill_active_timeout",
"proxy.config.http.response_server_str",
"proxy.config.http.cache.heuristic_lm_factor",
@@ -8640,7 +8640,7 @@ std::array<std::string_view, TS_CONFIG_LAST_ENTRY>
SDK_Overridable_Configs = {
"proxy.config.ssl.client.verify.server.policy",
"proxy.config.ssl.client.verify.server.properties",
"proxy.config.ssl.client.sni_policy",
"proxy.config.ssl.client.private_key.filename",
"proxy.config.ssl.client.CA.cert.filename",
"proxy.config.ssl.client.alpn_protocols",
- "proxy.config.hostdb.ip_resolve", "proxy.config.http.connect.dead.policy",
+ "proxy.config.hostdb.ip_resolve", "proxy.config.http.connect.down.policy",
"proxy.config.http.max_proxy_cycles",
"proxy.config.plugin.vc.default_buffer_index",
"proxy.config.plugin.vc.default_buffer_water_mark",
"proxy.config.net.sock_notsent_lowat",
"proxy.config.body_factory.response_suppression_mode",
"proxy.config.http.parent_proxy.enable_parent_timeout_markdowns",
diff --git a/tests/gold_tests/records/gold/full_records.yaml
b/tests/gold_tests/records/gold/full_records.yaml
index b8372eee73..55ff1e7764 100644
--- a/tests/gold_tests/records/gold/full_records.yaml
+++ b/tests/gold_tests/records/gold/full_records.yaml
@@ -181,10 +181,10 @@ ts:
size: 4096
chunking_enabled: 1
connect:
- dead:
+ down:
policy: 2
connect_attempts_max_retries: 3
- connect_attempts_max_retries_dead_server: 1
+ connect_attempts_max_retries_down_server: 1
connect_attempts_rr_retries: 3
connect_attempts_timeout: 30
connect_ports: '443'
diff --git a/tests/gold_tests/records/legacy_config/full_records.config
b/tests/gold_tests/records/legacy_config/full_records.config
index c88cdf1cf7..dfdc345242 100644
--- a/tests/gold_tests/records/legacy_config/full_records.config
+++ b/tests/gold_tests/records/legacy_config/full_records.config
@@ -141,10 +141,10 @@ CONFIG proxy.config.http.accept_no_activity_timeout INT
120
CONFIG proxy.config.http.background_fill_active_timeout INT 0
CONFIG proxy.config.http.background_fill_completed_threshold FLOAT 0.0
CONFIG proxy.config.http.connect_attempts_max_retries INT 3
-CONFIG proxy.config.http.connect_attempts_max_retries_dead_server INT 1
+CONFIG proxy.config.http.connect_attempts_max_retries_down_server INT 1
CONFIG proxy.config.http.connect_attempts_rr_retries INT 3
CONFIG proxy.config.http.connect_attempts_timeout INT 30
-CONFIG proxy.config.http.connect.dead.policy INT 2
+CONFIG proxy.config.http.connect.down.policy INT 2
CONFIG proxy.config.http.down_server.cache_time INT 60
CONFIG proxy.config.http.negative_revalidating_enabled INT 1
CONFIG proxy.config.http.negative_revalidating_lifetime INT 1800
diff --git a/tests/gold_tests/tls/tls_verify_override_base.test.py
b/tests/gold_tests/tls/tls_verify_override_base.test.py
index 3ccf55360e..4063898bde 100644
--- a/tests/gold_tests/tls/tls_verify_override_base.test.py
+++ b/tests/gold_tests/tls/tls_verify_override_base.test.py
@@ -115,7 +115,7 @@ ts.Disk.records_config.update({
'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port),
'proxy.config.dns.resolv_conf': 'NULL',
'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE',
- 'proxy.config.http.connect.dead.policy': 1, # Don't count TLS failures
for dead upstream.
+ 'proxy.config.http.connect.down.policy': 1, # Don't count TLS failures
when deciding whehter the server is down.
})
dns.addRecords(records={"foo.com.": ["127.0.0.1"]})