On Fri, Jan 21, 2022 at 1:09 AM <minf...@apache.org> wrote:
>
> Author: minfrin
> Date: Fri Jan 21 00:09:24 2022
> New Revision: 1897281
>
> URL: http://svn.apache.org/viewvc?rev=1897281&view=rev
> Log:
> event: Add support for non blocking behaviour in the
> CONN_STATE_READ_REQUEST_LINE phase, in addition to the existing
> CONN_STATE_WRITE_COMPLETION phase. Update mod_ssl to perform non blocking
> TLS handshakes.
[]
>
> Modified: httpd/httpd/trunk/modules/ssl/mod_ssl.c
> URL: 
> http://svn.apache.org/viewvc/httpd/httpd/trunk/modules/ssl/mod_ssl.c?rev=1897281&r1=1897280&r2=1897281&view=diff
> ==============================================================================
> --- httpd/httpd/trunk/modules/ssl/mod_ssl.c (original)
> +++ httpd/httpd/trunk/modules/ssl/mod_ssl.c Fri Jan 21 00:09:24 2022
[]
> @@ -691,6 +692,8 @@ static int ssl_hook_process_connection(c
>  {
>      SSLConnRec *sslconn = myConnConfig(c);
>
> +    int status = DECLINED;
> +
>      if (sslconn && !sslconn->disabled) {
>          /* On an active SSL connection, let the input filters initialize
>           * themselves which triggers the handshake, which again triggers
> @@ -698,13 +701,48 @@ static int ssl_hook_process_connection(c
>           */
>          apr_bucket_brigade* temp;
>
> +        int async_mpm = 0;
> +
>          temp = apr_brigade_create(c->pool, c->bucket_alloc);
> -        ap_get_brigade(c->input_filters, temp,
> -                       AP_MODE_INIT, APR_BLOCK_READ, 0);
> +
> +        if (ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm) != APR_SUCCESS) {
> +            async_mpm = 0;
> +        }
> +
> +        if (async_mpm) {
> +
> +            /* Take advantage of an async MPM. If we see an EAGAIN,
> +             * loop round and don't block.
> +             */
> +            apr_status_t rv;
> +
> +            rv = ap_get_brigade(c->input_filters, temp,
> +                           AP_MODE_INIT, APR_NONBLOCK_READ, 0);
> +
> +            if (rv == APR_SUCCESS) {
> +                /* great news, lets continue */
> +                status = DECLINED;
> +            }
> +            else if (rv == APR_EAGAIN) {

APR_STATUS_IS_EAGAIN(rv)?

> +                /* we've been asked to come around again, don't block */
> +                status = OK;
> +            }
> +            else {
> +                /* we failed, give up */
> +                status = DONE;
> +
> +                c->aborted = 1;
> +            }
> +        }
> +        else {
> +            ap_get_brigade(c->input_filters, temp,
> +                           AP_MODE_INIT, APR_BLOCK_READ, 0);
> +        }
> +
>          apr_brigade_destroy(temp);
>      }
> -
> -    return DECLINED;
> +
> +    return status;
>  }
[]
>
> Modified: httpd/httpd/trunk/server/mpm/event/event.c
> URL: 
> http://svn.apache.org/viewvc/httpd/httpd/trunk/server/mpm/event/event.c?rev=1897281&r1=1897280&r2=1897281&view=diff
> ==============================================================================
> --- httpd/httpd/trunk/server/mpm/event/event.c (original)
> +++ httpd/httpd/trunk/server/mpm/event/event.c Fri Jan 21 00:09:24 2022
> @@ -268,12 +268,14 @@ struct timeout_queue {
>  /*
>   * Several timeout queues that use different timeouts, so that we always can
>   * simply append to the end.
> + *   read_line_q        uses vhost's TimeOut FIXME - we can use a short 
> timeout here

I think we need a better interaction with mod_reqtimeout if we add
client side read timeouts in the MPM.
For instance we document handshake=5 in mod_reqtimeout and it does not
fit the fixed s->timeout here (handshake= is disabled by default for
compatibility because it was added late(ly) in 2.4, but if it's
configured we should honor it in the MPM too).

Not sure how to do that but the single timeout per timeout_queue model
is probably not the right one here.
Maybe we could queue a timer event based on the actual connection's
socket timeout?

>   *   write_completion_q uses vhost's TimeOut
>   *   keepalive_q        uses vhost's KeepAliveTimeOut
>   *   linger_q           uses MAX_SECS_TO_LINGER
>   *   short_linger_q     uses SECONDS_TO_LINGER
>   */
> -static struct timeout_queue *write_completion_q,
> +static struct timeout_queue *read_line_q,
> +                            *write_completion_q,
>                              *keepalive_q,
>                              *linger_q,
>                              *short_linger_q;
[]
> @@ -1153,6 +1157,41 @@ read_request:
>          cs->pub.state = CONN_STATE_LINGER;
>      }
>
> +    if (cs->pub.state == CONN_STATE_READ_REQUEST_LINE) {
> +        ap_update_child_status(cs->sbh, SERVER_BUSY_READ, NULL);
> +
> +        /* It greatly simplifies the logic to use a single timeout value per 
> q
> +         * because the new element can just be added to the end of the list 
> and
> +         * it will stay sorted in expiration time sequence.

Yeah, but it does not fit well here IMO as explained above.

>             If brand new
> +         * sockets are sent to the event thread for a readability check, this
> +         * will be a slight behavior change - they use the non-keepalive
> +         * timeout today.

They may use a dynamic timeout actually.

>             With a normal client, the socket will be readable in
> +         * a few milliseconds anyway.
> +         */
> +        cs->queue_timestamp = apr_time_now();
> +        notify_suspend(cs);
> +
> +        /* Add work to pollset. */
> +        update_reqevents_from_sense(cs, -1);
> +        apr_thread_mutex_lock(timeout_mutex);
> +        TO_QUEUE_APPEND(cs->sc->rl_q, cs);
> +        rv = apr_pollset_add(event_pollset, &cs->pfd);
> +        if (rv != APR_SUCCESS && !APR_STATUS_IS_EEXIST(rv)) {
> +            AP_DEBUG_ASSERT(0);
> +            TO_QUEUE_REMOVE(cs->sc->rl_q, cs);
> +            apr_thread_mutex_unlock(timeout_mutex);
> +            ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO()
> +                         "process_socket: apr_pollset_add failure for "
> +                         "read request line");
> +            close_connection(cs);
> +            signal_threads(ST_GRACEFUL);
> +        }
> +        else {
> +            apr_thread_mutex_unlock(timeout_mutex);
> +        }
> +        return;
> +    }
> +
[]
> @@ -2263,16 +2308,19 @@ do_maintenance:
>              else {
>                  process_keepalive_queue(now);
>              }
> -            /* Step 2: write completion timeouts */
> +            /* Step 2: read line timeouts */
> +            process_timeout_queue(read_line_q, now,
> +                                  defer_lingering_close);

I think we should shutdown_connection() directly if READ_REQUEST_LINE times out.

> +            /* Step 3: write completion timeouts */
>              process_timeout_queue(write_completion_q, now,
>                                    defer_lingering_close);
> -            /* Step 3: (normal) lingering close completion timeouts */
> +            /* Step 4: (normal) lingering close completion timeouts */
>              if (dying && linger_q->timeout > short_linger_q->timeout) {
>                  /* Dying, force short timeout for normal lingering close */
>                  linger_q->timeout = short_linger_q->timeout;
>              }
>              process_timeout_queue(linger_q, now, shutdown_connection);
> -            /* Step 4: (short) lingering close completion timeouts */
> +            /* Step 5: (short) lingering close completion timeouts */
>              process_timeout_queue(short_linger_q, now, shutdown_connection);
>
>              apr_thread_mutex_unlock(timeout_mutex);


Regards;
Yann.

Reply via email to