hello?

发自我的 iPhone

> 在 2018年4月2日,08:28,Haitao Lv <i...@lvht.net> 写道:
> 
> Any body is here?
> 
>> On Mar 21, 2018, at 11:36, Haitao Lv <i...@lvht.net> wrote:
>> 
>> Thank you for reviewing.
>> 
>> And here is the patch that fix the breaking PROXY protocol functionality.
>> 
>> Sorry for disturbing.
>> 
>> diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c
>> index 2db7a627..9f1b8544 100644
>> --- a/src/http/ngx_http_request.c
>> +++ b/src/http/ngx_http_request.c
>> @@ -17,6 +17,10 @@ static ssize_t 
>> ngx_http_read_request_header(ngx_http_request_t *r);
>> static ngx_int_t ngx_http_alloc_large_header_buffer(ngx_http_request_t *r,
>>    ngx_uint_t request_line);
>> 
>> +#if (NGX_HTTP_V2)
>> +static void ngx_http_wait_v2_preface_handler(ngx_event_t *rev);
>> +#endif
>> +
>> static ngx_int_t ngx_http_process_header_line(ngx_http_request_t *r,
>>    ngx_table_elt_t *h, ngx_uint_t offset);
>> static ngx_int_t ngx_http_process_unique_header_line(ngx_http_request_t *r,
>> @@ -325,7 +329,7 @@ ngx_http_init_connection(ngx_connection_t *c)
>> 
>> #if (NGX_HTTP_V2)
>>    if (hc->addr_conf->http2) {
>> -        rev->handler = ngx_http_v2_init;
>> +        rev->handler = ngx_http_wait_v2_preface_handler;
>>    }
>> #endif
>> 
>> @@ -381,6 +385,131 @@ ngx_http_init_connection(ngx_connection_t *c)
>> }
>> 
>> 
>> +#if (NGX_HTTP_V2)
>> +static void
>> +ngx_http_wait_v2_preface_handler(ngx_event_t *rev)
>> +{
>> +    size_t                     size;
>> +    ssize_t                    n;
>> +    u_char                    *p;
>> +    ngx_buf_t                 *b;
>> +    ngx_connection_t          *c;
>> +    ngx_http_connection_t     *hc;
>> +    static const u_char        preface[] = "PRI";
>> +
>> +    c = rev->data;
>> +    hc = c->data;
>> +
>> +    size = sizeof(preface) - 1;
>> +
>> +    if (hc->proxy_protocol) {
>> +        size += NGX_PROXY_PROTOCOL_MAX_HEADER;
>> +    }
>> +
>> +    ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
>> +            "http wait h2 preface handler");
>> +
>> +    if (rev->timedout) {
>> +        ngx_log_error(NGX_LOG_INFO, c->log, NGX_ETIMEDOUT, "client timed 
>> out");
>> +        ngx_http_close_connection(c);
>> +        return;
>> +    }
>> +
>> +    if (c->close) {
>> +        ngx_http_close_connection(c);
>> +        return;
>> +    }
>> +
>> +    b = c->buffer;
>> +
>> +    if (b == NULL) {
>> +        b = ngx_create_temp_buf(c->pool, size);
>> +        if (b == NULL) {
>> +            ngx_http_close_connection(c);
>> +            return;
>> +        }
>> +
>> +        c->buffer = b;
>> +
>> +    } else if (b->start == NULL) {
>> +
>> +        b->start = ngx_palloc(c->pool, size);
>> +        if (b->start == NULL) {
>> +            ngx_http_close_connection(c);
>> +            return;
>> +        }
>> +
>> +        b->pos = b->start;
>> +        b->last = b->start;
>> +        b->end = b->last + size;
>> +    }
>> +
>> +    n = c->recv(c, b->last, b->end - b->last);
>> +
>> +    if (n == NGX_AGAIN) {
>> +
>> +        if (!rev->timer_set) {
>> +            ngx_add_timer(rev, c->listening->post_accept_timeout);
>> +            ngx_reusable_connection(c, 1);
>> +        }
>> +
>> +        if (ngx_handle_read_event(rev, 0) != NGX_OK) {
>> +            ngx_http_close_connection(c);
>> +            return;
>> +        }
>> +
>> +        /*
>> +         * We are trying to not hold c->buffer's memory for an idle 
>> connection.
>> +         */
>> +
>> +        if (ngx_pfree(c->pool, b->start) == NGX_OK) {
>> +            b->start = NULL;
>> +        }
>> +
>> +        return;
>> +    }
>> +
>> +    if (n == NGX_ERROR) {
>> +        ngx_http_close_connection(c);
>> +        return;
>> +    }
>> +
>> +    if (n == 0) {
>> +        ngx_log_error(NGX_LOG_INFO, c->log, 0,
>> +                      "client closed connection");
>> +        ngx_http_close_connection(c);
>> +        return;
>> +    }
>> +
>> +    b->last += n;
>> +
>> +    if (hc->proxy_protocol) {
>> +        hc->proxy_protocol = 0;
>> +
>> +        p = ngx_proxy_protocol_read(c, b->pos, b->last);
>> +
>> +        if (p == NULL) {
>> +            ngx_http_close_connection(c);
>> +            return;
>> +        }
>> +
>> +        b->pos = p;
>> +    }
>> +
>> +    if (b->last >= b->pos + sizeof(preface) - 1) {
>> +        /* b will be freed in 
>> ngx_http_v2_init/ngx_http_wait_request_handler */
>> +
>> +        if (ngx_strncmp(b->pos, preface, sizeof(preface) - 1) == 0) {
>> +            ngx_http_v2_init(rev);
>> +        } else {
>> +            rev->handler = ngx_http_wait_request_handler;
>> +            ngx_http_wait_request_handler(rev);
>> +        }
>> +    }
>> +}
>> +#endif
>> +
>> +
>> static void
>> ngx_http_wait_request_handler(ngx_event_t *rev)
>> {
>> @@ -393,6 +522,7 @@ ngx_http_wait_request_handler(ngx_event_t *rev)
>>    ngx_http_core_srv_conf_t  *cscf;
>> 
>>    c = rev->data;
>> +    n = NGX_AGAIN;
>> 
>>    ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http wait request 
>> handler");
>> 
>> @@ -434,9 +564,27 @@ ngx_http_wait_request_handler(ngx_event_t *rev)
>>        b->pos = b->start;
>>        b->last = b->start;
>>        b->end = b->last + size;
>> +    } else {
>> +
>> +        p = ngx_palloc(c->pool, size);
>> +        if (p == NULL) {
>> +            ngx_http_close_connection(c);
>> +            return;
>> +        }
>> +
>> +        n = b->last - b->pos;
>> +        ngx_memcpy(p, b->pos, n);
>> +        ngx_pfree(c->pool, b->start);
>> +
>> +        b->start = p;
>> +        b->pos = b->start;
>> +        b->last = b->start + n;
>> +        b->end = b->last + size;
>>    }
>> 
>> -    n = c->recv(c, b->last, size);
>> +    if (n == NGX_AGAIN) {
>> +        n = c->recv(c, b->last, size);
>> +    }
>> 
>>    if (n == NGX_AGAIN) {
>> 
>> diff --git a/src/http/v2/ngx_http_v2.c b/src/http/v2/ngx_http_v2.c
>> index 77ebb847..6724b662 100644
>> --- a/src/http/v2/ngx_http_v2.c
>> +++ b/src/http/v2/ngx_http_v2.c
>> @@ -229,6 +229,8 @@ static ngx_http_v2_parse_header_t  
>> ngx_http_v2_parse_headers[] = {
>> void
>> ngx_http_v2_init(ngx_event_t *rev)
>> {
>> +    size_t                     size;
>> +    ngx_buf_t                 *b;
>>    ngx_connection_t          *c;
>>    ngx_pool_cleanup_t        *cln;
>>    ngx_http_connection_t     *hc;
>> @@ -260,6 +262,23 @@ ngx_http_v2_init(ngx_event_t *rev)
>>        return;
>>    }
>> 
>> +    b = c->buffer;
>> +
>> +    if (b != NULL) {
>> +        size = b->last - b->pos;
>> +
>> +        if (size > h2mcf->recv_buffer_size) {
>> +            size = h2mcf->recv_buffer_size;
>> +        }
>> +
>> +        ngx_memcpy(h2mcf->recv_buffer, b->pos, size);
>> +        h2c->state.buffer_used = size;
>> +
>> +        ngx_pfree(c->pool, b->start);
>> +        ngx_pfree(c->pool, b);
>> +        c->buffer = NULL;
>> +    }
>> +
>>    h2c->connection = c;
>>    h2c->http_connection = hc;
>> 
>> @@ -379,13 +398,15 @@ ngx_http_v2_read_handler(ngx_event_t *rev)
>>    h2mcf = ngx_http_get_module_main_conf(h2c->http_connection->conf_ctx,
>>                                          ngx_http_v2_module);
>> 
>> -    available = h2mcf->recv_buffer_size - 2 * NGX_HTTP_V2_STATE_BUFFER_SIZE;
>> +    available = h2mcf->recv_buffer_size - h2c->state.buffer_used - 2 * 
>> NGX_HTTP_V2_STATE_BUFFER_SIZE;
>> 
>>    do {
>>        p = h2mcf->recv_buffer;
>> 
>> -        ngx_memcpy(p, h2c->state.buffer, NGX_HTTP_V2_STATE_BUFFER_SIZE);
>>        end = p + h2c->state.buffer_used;
>> +        if (h2c->state.buffer_used == 0) {
>> +            ngx_memcpy(p, h2c->state.buffer, NGX_HTTP_V2_STATE_BUFFER_SIZE);
>> +        }
>> 
>>        n = c->recv(c, end, available);
>> 
>> 
>> 
>>> On Mar 21, 2018, at 00:02, Valentin V. Bartenev <vb...@nginx.com> wrote:
>>> 
>>> On Thursday 08 March 2018 08:42:27 Haitao Lv wrote:
>>>> Sorry for disturbing. But I have to fix a buffer overflow bug.
>>>> Here is the latest patch.
>>>> 
>>>> Sorry. But please make your comments. Thank you.
>>> [..]
>>> 
>>> There's no way for this patch to be accepted as it breaks PROXY protocol
>>> functionality.
>>> 
>>> wbr, Valentin V. Bartenev
>>> 
>>> _______________________________________________
>>> nginx-devel mailing list
>>> nginx-devel@nginx.org
>>> http://mailman.nginx.org/mailman/listinfo/nginx-devel
>> 
>> 
>> _______________________________________________
>> nginx-devel mailing list
>> nginx-devel@nginx.org
>> http://mailman.nginx.org/mailman/listinfo/nginx-devel



_______________________________________________
nginx-devel mailing list
nginx-devel@nginx.org
http://mailman.nginx.org/mailman/listinfo/nginx-devel

Reply via email to