On 7/1/20 6:35 PM, yla...@apache.org wrote:
> Author: ylavic
> Date: Wed Jul 1 16:35:48 2020
> New Revision: 1879401
>
> URL: http://svn.apache.org/viewvc?rev=1879401&view=rev
> Log:
> mod_proxy: improved and reentrant tunneling loop.
>
> modules/proxy/mod_proxy.h:
> Rename AP_PROXY_TRANSFER_SHOULD_YIELD to AP_PROXY_TRANSFER_YIELD_PENDING
> and add AP_PROXY_TRANSFER_YIELD_MAX_READS.
>
> modules/proxy/mod_proxy_http.c:
> modules/proxy/mod_proxy_wstunnel.c:
> Removing of reqtimeout filter is now handled by ap_proxy_tunnel_create().
>
> modules/proxy/proxy_util.c:
> ap_proxy_transfer_between_connections():
> Reorganize loop to break out early.
> When AP_PROXY_TRANSFER_YIELD_PENDING, if !ap_filter_should_yield() we
> still need to run and check ap_filter_output_pending() since it may
> release pending data.
> When AP_PROXY_TRANSFER_YIELD_MAX_READS, stop the loop after too much
> reads (PROXY_TRANSFER_MAX_READS = 10000) to release the thread and
> give the caller a chance to schedule the other direction.
> Don't return APR_INCOMPLETE when it comes from an incomplete body
> detected by ap_http_filter().
>
> ap_proxy_tunnel_create():
> Start with POLLOUT on both directions so that any pending output data
> is flushed first.
>
> ap_proxy_tunnel_run():
> Remove re-init/clear of the pollset for each call so that the function
> is reentrant.
> Handle POLLOUT before POLLIN so that we can read in the same pass once
> all buffered output data are flushed, using ap_filter_input_pending()
> to drain buffered input data.
>
> This is preparatory patch for async websocket tunneling is mod_proxy_http.
>
> Modified:
> httpd/httpd/trunk/modules/proxy/mod_proxy.h
> httpd/httpd/trunk/modules/proxy/mod_proxy_http.c
> httpd/httpd/trunk/modules/proxy/mod_proxy_wstunnel.c
> httpd/httpd/trunk/modules/proxy/proxy_util.c
>
> Modified: httpd/httpd/trunk/modules/proxy/proxy_util.c
> URL:
> http://svn.apache.org/viewvc/httpd/httpd/trunk/modules/proxy/proxy_util.c?rev=1879401&r1=1879400&r2=1879401&view=diff
> ==============================================================================
> --- httpd/httpd/trunk/modules/proxy/proxy_util.c (original)
> +++ httpd/httpd/trunk/modules/proxy/proxy_util.c Wed Jul 1 16:35:48 2020
> @@ -4410,51 +4470,88 @@ PROXY_DECLARE(int) ap_proxy_tunnel_run(p
> "proxy: %s: woken up, %i result(s)", scheme, nresults);
>
> for (i = 0; i < nresults; i++) {
> - const apr_pollfd_t *cur = &results[i];
> - int revents = cur->rtnevents;
> + const apr_pollfd_t *pfd = &results[i];
> + struct proxy_tunnel_conn *tc = pfd->client_data;
> +
> + ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r,
> + "proxy: %s: #%i: %s: %hx/%hx", scheme, i,
> + tc->name, pfd->rtnevents, tc->pfd->reqevents);
>
> /* sanity check */
> - if (cur->desc.s != client->pfd->desc.s
> - && cur->desc.s != origin->pfd->desc.s) {
> + if (pfd->desc.s != client->pfd->desc.s
> + && pfd->desc.s != origin->pfd->desc.s) {
> ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10222)
> "proxy: %s: unknown socket in pollset",
> scheme);
> rc = HTTP_INTERNAL_SERVER_ERROR;
> goto cleanup;
> }
> -
> - in = cur->client_data;
> - if (revents & APR_POLLOUT) {
> - in = in->other;
> - }
> - else if (!(revents & (APR_POLLIN | APR_POLLHUP))) {
> + if (!(pfd->rtnevents & (APR_POLLIN | APR_POLLHUP |
> APR_POLLOUT))) {
> /* this catches POLLERR/POLLNVAL etc.. */
> ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10220)
> "proxy: %s: polling events error (%x)",
> - scheme, revents);
> + scheme, pfd->rtnevents);
> rc = HTTP_INTERNAL_SERVER_ERROR;
> goto cleanup;
> }
> - out = in->other;
>
> - ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r,
> - "proxy: %s: #%i: %s/%hx => %s/%hx: %x",
> - scheme, i, in->name, in->pfd->reqevents,
> - out->name, out->pfd->reqevents, revents);
> + if (pfd->rtnevents & APR_POLLOUT) {
> + struct proxy_tunnel_conn *out = tc, *in = tc->other;
> +
> + ap_log_rerror(APLOG_MARK, APLOG_TRACE8, 0, r,
> + "proxy: %s: %s output ready",
> + scheme, out->name);
> +
> + rc = ap_filter_output_pending(out->c);
> + if (rc == OK) {
> + /* Keep polling out (only) */
> + continue;
> + }
> + if (rc != DECLINED) {
> + /* Real failure, bail out */
> + ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10221)
> + "proxy: %s: %s flushing failed (%i)",
> + scheme, out->name, rc);
> + goto cleanup;
> + }
> + rc = OK;
> +
> + /* No more pending data. If the input side is not readable
> + * anymore it's time to shutdown for write (this direction
> + * is over). Otherwise back to normal business.
> + */
> + del_pollset(pollset, out->pfd, APR_POLLOUT);
> + if (in->readable) {
> + ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r,
> + "proxy: %s: %s resume writable",
> + scheme, out->name);
> + add_pollset(pollset, in->pfd, APR_POLLIN);
> + out->writable = 1;
> + }
> + else {
> + ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
> + "proxy: %s: %s write shutdown",
> + scheme, out->name);
> + apr_socket_shutdown(out->pfd->desc.s, 1);
> + }
> + }
>
> - if (in->readable && (in->drain || !(revents & APR_POLLOUT))) {
> + if (pfd->rtnevents & (APR_POLLIN | APR_POLLHUP)
Why do we check for APR_POLLHUP here as well? I noticed a case where we request
only APR_POLLOUT but get back APR_POLLOUT and
APR_POLLHUB which causes an endless loop.
Regards
RĂ¼diger