SolidWallOfCode commented on a change in pull request #7622:
URL: https://github.com/apache/trafficserver/pull/7622#discussion_r609083731



##########
File path: proxy/http/HttpSM.cc
##########
@@ -289,6 +278,160 @@ HttpVCTable::cleanup_all()
   }
 }
 
+class ConnectingEntry : public Continuation
+{
+public:
+  std::set<HttpSM *> _connect_sms;
+  NetVConnection *_netvc        = nullptr;
+  IOBufferReader *_netvc_reader = nullptr;
+  MIOBuffer *_netvc_read_buffer = nullptr;
+  std::string sni;
+  std::string cert_name;
+  IpEndpoint _ipaddr;
+  std::string hostname;
+  Action *_pending_action = nullptr;
+  NetVCOptions opt;
+
+  void remove_entry();
+  int state_http_server_open(int event, void *data);
+  static PoolableSession *create_server_session(HttpSM *root_sm, 
NetVConnection *netvc, MIOBuffer *netvc_read_buffer,
+                                                IOBufferReader *netvc_reader);
+};
+
+struct IpHelper {
+  size_t
+  operator()(IpEndpoint const &arg) const
+  {
+    return IpAddr{&arg.sa}.hash();
+  }
+  bool
+  operator()(IpEndpoint const &arg1, IpEndpoint const &arg2) const
+  {
+    return ats_ip_addr_port_eq(&arg1.sa, &arg2.sa);
+  }
+};
+using ConnectingIpPool = std::unordered_multimap<IpEndpoint, ConnectingEntry 
*, IpHelper>;
+
+class ConnectingPool
+{
+public:
+  ConnectingPool() {}
+  ConnectingIpPool m_ip_pool;
+};
+
+void
+initialize_thread_for_connecting_pools(EThread *thread)
+{
+  if (thread->connecting_pool == nullptr) {
+    thread->connecting_pool = new ConnectingPool();
+  }
+}
+
+int
+ConnectingEntry::state_http_server_open(int event, void *data)
+{
+  Debug("http_connect", "entered inside 
ConnectingEntry::state_http_server_open");
+
+  switch (event) {
+  case NET_EVENT_OPEN: {
+    _netvc                 = static_cast<NetVConnection *>(data);
+    UnixNetVConnection *vc = static_cast<UnixNetVConnection *>(_netvc);
+    ink_release_assert(_pending_action == nullptr || 
_pending_action->continuation == vc->get_action()->continuation);
+    _pending_action = nullptr;
+    Debug("http_connect", "ConnectingEntrysetting handler for TCP handshake");
+    // Just want to get a write-ready event so we know that the TCP handshake 
is complete.
+    // The buffer we create will be handed over to the eventually created 
server session
+    _netvc_read_buffer = new_MIOBuffer(HTTP_SERVER_RESP_HDR_BUFFER_INDEX);
+    _netvc_reader      = _netvc_read_buffer->alloc_reader();
+    _netvc->do_io_write(this, 1, _netvc_reader);
+    ink_release_assert(!_connect_sms.empty());
+    if (!_connect_sms.empty()) {
+      HttpSM *prime_connect_sm = *(_connect_sms.begin());
+      
_netvc->set_inactivity_timeout(prime_connect_sm->get_server_connect_timeout());
+    }
+    ink_release_assert(_pending_action == nullptr);
+    return 0;
+  }
+  case VC_EVENT_READ_COMPLETE:
+  case VC_EVENT_WRITE_READY:
+  case VC_EVENT_WRITE_COMPLETE: {
+    Debug("http_connect", "Kick off %" PRId64 " state machines waiting for 
origin", _connect_sms.size());
+    this->remove_entry();
+    _netvc->do_io_write(nullptr, 0, nullptr);
+    if (!_connect_sms.empty()) {
+      auto prime_iter = _connect_sms.rbegin();
+      ink_release_assert(prime_iter != _connect_sms.rend());
+      PoolableSession *new_session = 
ConnectingEntry::create_server_session(*prime_iter, _netvc, _netvc_read_buffer, 
_netvc_reader);
+      _netvc                       = nullptr;
+
+      // Did we end up with a multiplexing session?
+      int count = 0;
+      if (new_session->is_multiplexing()) {
+        // Hand off to all queued up ConnectSM's.
+        while (!_connect_sms.empty()) {

Review comment:
       I'd be tempted to do
   ```
   for ( HttpSM* sm  : _connect_sms) {
     Debug("http_connect", "ConnectingEntry Pass along CONNECT_EVENT_TXN %d", 
count++);
     SCOPED_MUTEX_LOCK(lock, sm->mutex, this_ethread());
     sm->handleEvent(CONNECT_EVENT_TXN, new_session);
   }
   _connect_sms.clear();




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to