maskit commented on a change in pull request #7622:
URL: https://github.com/apache/trafficserver/pull/7622#discussion_r645395815
##########
File path: proxy/http/HttpSM.cc
##########
@@ -289,6 +277,160 @@ HttpVCTable::cleanup_all()
}
}
+class ConnectingEntry : public Continuation
+{
+public:
+ std::set<HttpSM *> _connect_sms;
+ NetVConnection *_netvc = nullptr;
+ IOBufferReader *_netvc_reader = nullptr;
+ MIOBuffer *_netvc_read_buffer = nullptr;
+ std::string sni;
+ std::string cert_name;
+ IpEndpoint _ipaddr;
+ std::string hostname;
+ Action *_pending_action = nullptr;
+ NetVCOptions opt;
+
+ void remove_entry();
+ int state_http_server_open(int event, void *data);
+ static PoolableSession *create_server_session(HttpSM *root_sm,
NetVConnection *netvc, MIOBuffer *netvc_read_buffer,
+ IOBufferReader *netvc_reader);
+};
+
+struct IpHelper {
+ size_t
+ operator()(IpEndpoint const &arg) const
+ {
+ return IpAddr{&arg.sa}.hash();
+ }
+ bool
+ operator()(IpEndpoint const &arg1, IpEndpoint const &arg2) const
+ {
+ return ats_ip_addr_port_eq(&arg1.sa, &arg2.sa);
+ }
+};
+using ConnectingIpPool = std::unordered_multimap<IpEndpoint, ConnectingEntry
*, IpHelper>;
+
+class ConnectingPool
+{
+public:
+ ConnectingPool() {}
+ ConnectingIpPool m_ip_pool;
+};
+
+void
+initialize_thread_for_connecting_pools(EThread *thread)
+{
+ if (thread->connecting_pool == nullptr) {
+ thread->connecting_pool = new ConnectingPool();
+ }
+}
+
+int
+ConnectingEntry::state_http_server_open(int event, void *data)
+{
+ Debug("http_connect", "entered inside
ConnectingEntry::state_http_server_open");
+
+ switch (event) {
+ case NET_EVENT_OPEN: {
+ _netvc = static_cast<NetVConnection *>(data);
+ UnixNetVConnection *vc = static_cast<UnixNetVConnection *>(_netvc);
+ ink_release_assert(_pending_action == nullptr ||
_pending_action->continuation == vc->get_action()->continuation);
+ _pending_action = nullptr;
+ Debug("http_connect", "ConnectingEntrysetting handler for TCP handshake");
+ // Just want to get a write-ready event so we know that the TCP handshake
is complete.
+ // The buffer we create will be handed over to the eventually created
server session
+ _netvc_read_buffer = new_MIOBuffer(HTTP_SERVER_RESP_HDR_BUFFER_INDEX);
+ _netvc_reader = _netvc_read_buffer->alloc_reader();
+ _netvc->do_io_write(this, 1, _netvc_reader);
+ ink_release_assert(!_connect_sms.empty());
+ if (!_connect_sms.empty()) {
+ HttpSM *prime_connect_sm = *(_connect_sms.begin());
+
_netvc->set_inactivity_timeout(prime_connect_sm->get_server_connect_timeout());
+ }
+ ink_release_assert(_pending_action == nullptr);
+ return 0;
+ }
+ case VC_EVENT_READ_COMPLETE:
+ case VC_EVENT_WRITE_READY:
+ case VC_EVENT_WRITE_COMPLETE: {
+ Debug("http_connect", "Kick off %" PRId64 " state machines waiting for
origin", _connect_sms.size());
+ this->remove_entry();
+ _netvc->do_io_write(nullptr, 0, nullptr);
+ if (!_connect_sms.empty()) {
+ auto prime_iter = _connect_sms.rbegin();
+ ink_release_assert(prime_iter != _connect_sms.rend());
+ PoolableSession *new_session =
(*prime_iter)->create_server_session(_netvc, _netvc_read_buffer, _netvc_reader);
+ _netvc = nullptr;
+
+ // Did we end up with a multiplexing session?
+ int count = 0;
+ if (new_session->is_multiplexing()) {
+ // Hand off to all queued up ConnectSM's.
Review comment:
How do you know a multiplexed connection can process all the requests?
An origin server may stop accepting new requests on a connection for some
reasons (e.g. max requests per conn to mitigate memory pressure due to memory
leak), and the queued up requests would just fail where those could be
processed successfully on H1 if the server still accepts new connections.
Also, there's no guarantee a multiplexed connection can process all the
requests at once. The number of H2 concurrent stream might be set to 1 for some
reasons.
In that sense, whether a connection is multiplexing is not important.
Non-multiplexed connection could be handled as a multiplexed connection that
only process 1 request at once.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]