# HG changeset patch # User Roman Arutyunyan <a...@nginx.com> # Date 1660119182 -14400 # Wed Aug 10 12:13:02 2022 +0400 # Branch quic # Node ID 7fc94f5288f492d2147bf22850744cd9888d2c8f # Parent f9d7930d0eedae28defd0803cb95dc8ab68e56b3 QUIC: a socket for new connections in BPF mode.
Previously, when a packet from a new connection was received, Linux reuseport BPF program failed to select a socket for it and fell back to a default kernel selection algorithm. This worked well most of the time. However, during nginx reload, two sets of workers (and sockets) may coexist and only the existing connections should be served by old sockets. All new connections should be routed to new sockets since workers in closing state drop new connections. Now a socket for new connections is introduced. This socket is added to the reuseport group and registered in the socket map with the key = 0. When a map lookup fails with the key extracted from packet dcid, the packet is routed to the new socket. This socket is read by all nginx workers like a regular TCP listen socket. The approach has a drawback though. For correct routing it is expected that client will not send more than one datagram before receiving a packet from server. This requirement can be violated if large enough TLS early data is supplied by client. The size of early data can be limited with a properly tuned initial_max_data transport parameter. Currently its value is derived from QUIC stream buffer size and number of concurrent streams. diff --git a/src/core/ngx_connection.c b/src/core/ngx_connection.c --- a/src/core/ngx_connection.c +++ b/src/core/ngx_connection.c @@ -1034,7 +1034,9 @@ ngx_close_listening_sockets(ngx_cycle_t for (i = 0; i < cycle->listening.nelts; i++) { #if (NGX_QUIC) - if (ls[i].quic) { + if (ngx_process == NGX_PROCESS_WORKER + && ls[i].quic && ls[i].worker != (ngx_uint_t) -1) + { continue; } #endif diff --git a/src/core/ngx_connection.h b/src/core/ngx_connection.h --- a/src/core/ngx_connection.h +++ b/src/core/ngx_connection.h @@ -47,6 +47,7 @@ struct ngx_listening_s { size_t post_accept_buffer_size; ngx_listening_t *previous; + ngx_listening_t *main; ngx_connection_t *connection; ngx_rbtree_t rbtree; diff --git a/src/event/ngx_event.c b/src/event/ngx_event.c --- a/src/event/ngx_event.c +++ b/src/event/ngx_event.c @@ -802,7 +802,10 @@ ngx_event_process_init(ngx_cycle_t *cycl for (i = 0; i < cycle->listening.nelts; i++) { #if (NGX_HAVE_REUSEPORT) - if (ls[i].reuseport && ls[i].worker != ngx_worker) { + if (ls[i].reuseport + && ls[i].worker != ngx_worker + && ls[i].worker != (ngx_uint_t) -1) + { continue; } #endif @@ -899,7 +902,7 @@ ngx_event_process_init(ngx_cycle_t *cycl #if (NGX_HAVE_REUSEPORT) - if (ls[i].reuseport) { + if (ls[i].reuseport && ls[i].worker != (ngx_uint_t) -1) { if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } diff --git a/src/event/ngx_event.h b/src/event/ngx_event.h --- a/src/event/ngx_event.h +++ b/src/event/ngx_event.h @@ -496,6 +496,9 @@ extern ngx_module_t ngx_event_ void ngx_event_accept(ngx_event_t *ev); ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle); ngx_int_t ngx_enable_accept_events(ngx_cycle_t *cycle); +#if (NGX_HAVE_EPOLLEXCLUSIVE) +void ngx_reorder_accept_events(ngx_listening_t *ls); +#endif u_char *ngx_accept_log_error(ngx_log_t *log, u_char *buf, size_t len); #if (NGX_DEBUG) void ngx_debug_accepted_connection(ngx_event_conf_t *ecf, ngx_connection_t *c); diff --git a/src/event/ngx_event_accept.c b/src/event/ngx_event_accept.c --- a/src/event/ngx_event_accept.c +++ b/src/event/ngx_event_accept.c @@ -11,9 +11,6 @@ static ngx_int_t ngx_disable_accept_events(ngx_cycle_t *cycle, ngx_uint_t all); -#if (NGX_HAVE_EPOLLEXCLUSIVE) -static void ngx_reorder_accept_events(ngx_listening_t *ls); -#endif static void ngx_close_accepted_connection(ngx_connection_t *c); @@ -410,7 +407,7 @@ ngx_disable_accept_events(ngx_cycle_t *c * when disabling accept events due to accept mutex */ - if (ls[i].reuseport && !all) { + if (ls[i].reuseport && ls[i].worker != (ngx_uint_t) -1 && !all) { continue; } @@ -429,7 +426,7 @@ ngx_disable_accept_events(ngx_cycle_t *c #if (NGX_HAVE_EPOLLEXCLUSIVE) -static void +void ngx_reorder_accept_events(ngx_listening_t *ls) { ngx_connection_t *c; @@ -448,7 +445,7 @@ ngx_reorder_accept_events(ngx_listening_ #if (NGX_HAVE_REUSEPORT) - if (ls->reuseport) { + if (ls->reuseport && ls->worker != (ngx_uint_t) -1) { return; } diff --git a/src/event/quic/bpf/ngx_quic_reuseport_helper.c b/src/event/quic/bpf/ngx_quic_reuseport_helper.c --- a/src/event/quic/bpf/ngx_quic_reuseport_helper.c +++ b/src/event/quic/bpf/ngx_quic_reuseport_helper.c @@ -48,7 +48,7 @@ char _license[] SEC("license") = LICENSE offset += nbytes; \ if (start + offset > end) { \ debugmsg("cannot read %ld bytes at offset %ld", nbytes, offset); \ - goto failed; \ + goto new; \ } \ data = start + offset - 1; @@ -93,8 +93,7 @@ int ngx_quic_select_socket_by_dcid(struc len = data[0]; /* read DCID length */ if (len < 8) { - /* it's useless to search for key in such short DCID */ - return SK_PASS; + goto new; } } else { @@ -102,39 +101,39 @@ int ngx_quic_select_socket_by_dcid(struc } dcid = &data[1]; - advance_data(len); /* we expect the packet to have full DCID */ + advance_data(len); - /* make verifier happy */ - if (dcid + sizeof(__u64) > end) { - goto failed; + if (dcid + 8 > end) { + goto new; } + /* search for existing connection */ + key = ngx_quic_parse_uint64(dcid); rc = bpf_sk_select_reuseport(ctx, &ngx_quic_sockmap, &key, 0); - switch (rc) { - case 0: - debugmsg("nginx quic socket selected by key 0x%llx", key); + if (rc == 0) { + debugmsg("nginx quic hit key:0x%llx", key); return SK_PASS; - - /* kernel returns positive error numbers, errno.h defines positive */ - case -ENOENT: - debugmsg("nginx quic default route for key 0x%llx", key); - /* let the default reuseport logic decide which socket to choose */ - return SK_PASS; - - default: - debugmsg("nginx quic bpf_sk_select_reuseport err: %d key 0x%llx", - rc, key); - goto failed; } -failed: - /* - * SK_DROP will generate ICMP, but we may want to process "invalid" packet - * in userspace quic to investigate further and finally react properly - * (maybe ignore, maybe send something in response or close connection) - */ - return SK_PASS; + debugmsg("nginx quic miss err:%d key:0x%llx", rc, key); + +new: + + /* new connection */ + + key = 0; + + rc = bpf_sk_select_reuseport(ctx, &ngx_quic_sockmap, &key, 0); + + if (rc == 0) { + debugmsg("nginx quic new hit"); + return SK_PASS; + } + + debugmsg("nginx quic new miss err:%d", rc); + + return SK_DROP; } diff --git a/src/event/quic/ngx_event_quic.c b/src/event/quic/ngx_event_quic.c --- a/src/event/quic/ngx_event_quic.c +++ b/src/event/quic/ngx_event_quic.c @@ -910,14 +910,6 @@ ngx_quic_handle_packet(ngx_connection_t pkt->odcid = pkt->dcid; } - if (ngx_terminate || ngx_exiting) { - if (conf->retry) { - return ngx_quic_send_retry(c, conf, pkt); - } - - return NGX_ERROR; - } - c->log->action = "creating quic connection"; qc = ngx_quic_new_connection(c, conf, pkt); diff --git a/src/event/quic/ngx_event_quic_bpf.c b/src/event/quic/ngx_event_quic_bpf.c --- a/src/event/quic/ngx_event_quic_bpf.c +++ b/src/event/quic/ngx_event_quic_bpf.c @@ -42,7 +42,9 @@ typedef struct { static void *ngx_quic_bpf_create_conf(ngx_cycle_t *cycle); +static char *ngx_quic_bpf_init_conf(ngx_cycle_t *cycle, void *conf); static ngx_int_t ngx_quic_bpf_module_init(ngx_cycle_t *cycle); +static ngx_int_t ngx_quic_bpf_process_init(ngx_cycle_t *cycle); static void ngx_quic_bpf_cleanup(void *data); static ngx_inline void ngx_quic_bpf_close(ngx_log_t *log, int fd, @@ -82,7 +84,7 @@ static ngx_command_t ngx_quic_bpf_comma static ngx_core_module_t ngx_quic_bpf_module_ctx = { ngx_string("quic_bpf"), ngx_quic_bpf_create_conf, - NULL + ngx_quic_bpf_init_conf }; @@ -93,7 +95,7 @@ ngx_module_t ngx_quic_bpf_module = { NGX_CORE_MODULE, /* module type */ NULL, /* init master */ ngx_quic_bpf_module_init, /* init module */ - NULL, /* init process */ + ngx_quic_bpf_process_init, /* init process */ NULL, /* init thread */ NULL, /* exit thread */ NULL, /* exit process */ @@ -121,30 +123,67 @@ ngx_quic_bpf_create_conf(ngx_cycle_t *cy } +static char * +ngx_quic_bpf_init_conf(ngx_cycle_t *cycle, void *conf) +{ + ngx_quic_bpf_conf_t *bcf = conf; + + ngx_uint_t i, nelts; + ngx_core_conf_t *ccf; + ngx_listening_t *ls, *nls; + + ngx_conf_init_value(bcf->enabled, 0); + + if (!bcf->enabled || ngx_test_config) { + return NGX_CONF_OK; + } + + ccf = ngx_core_get_conf(cycle); + bcf->map_size = ccf->worker_processes * 4; + + ls = cycle->listening.elts; + nelts = cycle->listening.nelts; + + for (i = 0; i < nelts; i++) { + + if (!ls[i].quic || !ls[i].reuseport || ls[i].worker != 0) { + continue; + } + + nls = ngx_array_push(&cycle->listening); + if (nls == NULL) { + return NGX_CONF_ERROR; + } + + *nls = ls[i]; + + /* cloning may change cycle->listening.elts */ + + ls = cycle->listening.elts; + + /* make new listening first to inherit fd */ + + ls[i].worker = (ngx_uint_t) -1; + } + + return NGX_CONF_OK; +} + + static ngx_int_t ngx_quic_bpf_module_init(ngx_cycle_t *cycle) { ngx_uint_t i; ngx_listening_t *ls; - ngx_core_conf_t *ccf; ngx_pool_cleanup_t *cln; ngx_quic_bpf_conf_t *bcf; - if (ngx_test_config) { - /* - * during config test, SO_REUSEPORT socket option is - * not set, thus making further processing meaningless - */ + bcf = ngx_quic_bpf_get_conf(cycle); + + if (!bcf->enabled || ngx_test_config) { return NGX_OK; } - ccf = ngx_core_get_conf(cycle); - bcf = ngx_quic_bpf_get_conf(cycle); - - ngx_conf_init_value(bcf->enabled, 0); - - bcf->map_size = ccf->worker_processes * 4; - cln = ngx_pool_cleanup_add(cycle->pool, 0); if (cln == NULL) { goto failed; @@ -203,6 +242,46 @@ failed: } +static ngx_int_t +ngx_quic_bpf_process_init(ngx_cycle_t *cycle) +{ + ngx_uint_t i, j; + ngx_listening_t *ls; + ngx_quic_bpf_conf_t *bcf; + + bcf = ngx_quic_bpf_get_conf(cycle); + + if (!bcf->enabled) { + return NGX_OK; + } + + ls = cycle->listening.elts; + for (i = 0; i < cycle->listening.nelts; i++) { + + if (!ls[i].quic || ls[i].worker != (ngx_uint_t) -1) { + continue; + } + + for (j = i + 1; j < cycle->listening.nelts; j++) { + + if (ls[j].quic + && ls[j].reuseport + && ls[j].worker == ngx_worker + && ls[j].type == ls[i].type + && ngx_cmp_sockaddr(ls[j].sockaddr, ls[j].socklen, + ls[i].sockaddr, ls[i].socklen, 1) + == 0) + { + ls[i].main = &ls[j]; + break; + } + } + } + + return NGX_OK; +} + + static void ngx_quic_bpf_cleanup(void *data) { @@ -419,26 +498,32 @@ static ngx_int_t ngx_quic_bpf_group_add_socket(ngx_cycle_t *cycle, ngx_listening_t *ls) { uint64_t cookie; - ngx_quic_bpf_conf_t *bcf; ngx_quic_sock_group_t *grp; - bcf = ngx_quic_bpf_get_conf(cycle); - grp = ngx_quic_bpf_get_group(cycle, ls); - if (grp == NULL) { - if (!bcf->enabled) { - return NGX_OK; - } - return NGX_ERROR; } grp->unused = 0; - cookie = ngx_quic_bpf_socket_key(ls->fd, cycle->log); - if (cookie == (uint64_t) NGX_ERROR) { - return NGX_ERROR; + if (ls->worker == (ngx_uint_t) -1) { + cookie = 0; + + } else { + cookie = ngx_quic_bpf_socket_key(ls->fd, cycle->log); + if (cookie == (uint64_t) NGX_ERROR) { + return NGX_ERROR; + } + + /* do not inherit this socket */ + ls->ignore = 1; + + if (fcntl(ls->fd, F_SETFD, FD_CLOEXEC) == -1) { + ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno, + "fcntl(FD_CLOEXEC) fd:%d failed", ls->fd); + return NGX_ERROR; + } } /* map[cookie] = socket; for use in kernel helper */ @@ -452,9 +537,6 @@ ngx_quic_bpf_group_add_socket(ngx_cycle_ "quic bpf sockmap fd:%d add socket:%d cookie:0x%xL worker:%ui", grp->map_fd, ls->fd, cookie, ls->worker); - /* do not inherit this socket */ - ls->ignore = 1; - return NGX_OK; } diff --git a/src/event/quic/ngx_event_quic_bpf_code.c b/src/event/quic/ngx_event_quic_bpf_code.c --- a/src/event/quic/ngx_event_quic_bpf_code.c +++ b/src/event/quic/ngx_event_quic_bpf_code.c @@ -7,71 +7,93 @@ static ngx_bpf_reloc_t bpf_reloc_prog_ngx_quic_reuseport_helper[] = { - { "ngx_quic_sockmap", 55 }, + { "ngx_quic_sockmap", 57 }, + { "ngx_quic_sockmap", 71 }, }; static struct bpf_insn bpf_insn_prog_ngx_quic_reuseport_helper[] = { /* opcode dst src offset imm */ - { 0x79, BPF_REG_4, BPF_REG_1, (int16_t) 0, 0x0 }, - { 0x79, BPF_REG_3, BPF_REG_1, (int16_t) 8, 0x0 }, - { 0xbf, BPF_REG_2, BPF_REG_4, (int16_t) 0, 0x0 }, - { 0x7, BPF_REG_2, BPF_REG_0, (int16_t) 0, 0x8 }, - { 0x2d, BPF_REG_2, BPF_REG_3, (int16_t) 54, 0x0 }, - { 0xbf, BPF_REG_5, BPF_REG_4, (int16_t) 0, 0x0 }, - { 0x7, BPF_REG_5, BPF_REG_0, (int16_t) 0, 0x9 }, - { 0x2d, BPF_REG_5, BPF_REG_3, (int16_t) 51, 0x0 }, - { 0xb7, BPF_REG_5, BPF_REG_0, (int16_t) 0, 0x14 }, - { 0xb7, BPF_REG_0, BPF_REG_0, (int16_t) 0, 0x9 }, - { 0x71, BPF_REG_6, BPF_REG_2, (int16_t) 0, 0x0 }, - { 0x67, BPF_REG_6, BPF_REG_0, (int16_t) 0, 0x38 }, - { 0xc7, BPF_REG_6, BPF_REG_0, (int16_t) 0, 0x38 }, - { 0x65, BPF_REG_6, BPF_REG_0, (int16_t) 10, 0xffffffff }, - { 0xbf, BPF_REG_2, BPF_REG_4, (int16_t) 0, 0x0 }, - { 0x7, BPF_REG_2, BPF_REG_0, (int16_t) 0, 0xd }, - { 0x2d, BPF_REG_2, BPF_REG_3, (int16_t) 42, 0x0 }, - { 0xbf, BPF_REG_5, BPF_REG_4, (int16_t) 0, 0x0 }, - { 0x7, BPF_REG_5, BPF_REG_0, (int16_t) 0, 0xe }, - { 0x2d, BPF_REG_5, BPF_REG_3, (int16_t) 39, 0x0 }, - { 0xb7, BPF_REG_0, BPF_REG_0, (int16_t) 0, 0xe }, - { 0x71, BPF_REG_5, BPF_REG_2, (int16_t) 0, 0x0 }, - { 0xb7, BPF_REG_6, BPF_REG_0, (int16_t) 0, 0x8 }, - { 0x2d, BPF_REG_6, BPF_REG_5, (int16_t) 35, 0x0 }, - { 0xf, BPF_REG_5, BPF_REG_0, (int16_t) 0, 0x0 }, + { 0xbf, BPF_REG_6, BPF_REG_1, (int16_t) 0, 0x0 }, + { 0x79, BPF_REG_3, BPF_REG_6, (int16_t) 0, 0x0 }, + { 0x79, BPF_REG_2, BPF_REG_6, (int16_t) 8, 0x0 }, + { 0xbf, BPF_REG_1, BPF_REG_3, (int16_t) 0, 0x0 }, + { 0x7, BPF_REG_1, BPF_REG_0, (int16_t) 0, 0x8 }, + { 0x2d, BPF_REG_1, BPF_REG_2, (int16_t) 60, 0x0 }, + { 0xbf, BPF_REG_4, BPF_REG_3, (int16_t) 0, 0x0 }, + { 0x7, BPF_REG_4, BPF_REG_0, (int16_t) 0, 0x9 }, + { 0x2d, BPF_REG_4, BPF_REG_2, (int16_t) 57, 0x0 }, + { 0xb7, BPF_REG_4, BPF_REG_0, (int16_t) 0, 0x14 }, + { 0xb7, BPF_REG_5, BPF_REG_0, (int16_t) 0, 0x9 }, + { 0x71, BPF_REG_0, BPF_REG_1, (int16_t) 0, 0x0 }, + { 0x67, BPF_REG_0, BPF_REG_0, (int16_t) 0, 0x38 }, + { 0xc7, BPF_REG_0, BPF_REG_0, (int16_t) 0, 0x38 }, + { 0x65, BPF_REG_0, BPF_REG_0, (int16_t) 10, 0xffffffff }, + { 0xbf, BPF_REG_1, BPF_REG_3, (int16_t) 0, 0x0 }, + { 0x7, BPF_REG_1, BPF_REG_0, (int16_t) 0, 0xd }, + { 0x2d, BPF_REG_1, BPF_REG_2, (int16_t) 48, 0x0 }, + { 0xbf, BPF_REG_4, BPF_REG_3, (int16_t) 0, 0x0 }, + { 0x7, BPF_REG_4, BPF_REG_0, (int16_t) 0, 0xe }, + { 0x2d, BPF_REG_4, BPF_REG_2, (int16_t) 45, 0x0 }, + { 0xb7, BPF_REG_5, BPF_REG_0, (int16_t) 0, 0xe }, + { 0x71, BPF_REG_4, BPF_REG_1, (int16_t) 0, 0x0 }, + { 0xb7, BPF_REG_0, BPF_REG_0, (int16_t) 0, 0x8 }, + { 0x2d, BPF_REG_0, BPF_REG_4, (int16_t) 41, 0x0 }, { 0xf, BPF_REG_4, BPF_REG_5, (int16_t) 0, 0x0 }, - { 0x2d, BPF_REG_4, BPF_REG_3, (int16_t) 32, 0x0 }, - { 0xbf, BPF_REG_4, BPF_REG_2, (int16_t) 0, 0x0 }, - { 0x7, BPF_REG_4, BPF_REG_0, (int16_t) 0, 0x9 }, - { 0x2d, BPF_REG_4, BPF_REG_3, (int16_t) 29, 0x0 }, - { 0x71, BPF_REG_4, BPF_REG_2, (int16_t) 1, 0x0 }, - { 0x67, BPF_REG_4, BPF_REG_0, (int16_t) 0, 0x38 }, - { 0x71, BPF_REG_3, BPF_REG_2, (int16_t) 2, 0x0 }, - { 0x67, BPF_REG_3, BPF_REG_0, (int16_t) 0, 0x30 }, - { 0x4f, BPF_REG_3, BPF_REG_4, (int16_t) 0, 0x0 }, - { 0x71, BPF_REG_4, BPF_REG_2, (int16_t) 3, 0x0 }, - { 0x67, BPF_REG_4, BPF_REG_0, (int16_t) 0, 0x28 }, - { 0x4f, BPF_REG_3, BPF_REG_4, (int16_t) 0, 0x0 }, - { 0x71, BPF_REG_4, BPF_REG_2, (int16_t) 4, 0x0 }, - { 0x67, BPF_REG_4, BPF_REG_0, (int16_t) 0, 0x20 }, - { 0x4f, BPF_REG_3, BPF_REG_4, (int16_t) 0, 0x0 }, - { 0x71, BPF_REG_4, BPF_REG_2, (int16_t) 5, 0x0 }, - { 0x67, BPF_REG_4, BPF_REG_0, (int16_t) 0, 0x18 }, - { 0x4f, BPF_REG_3, BPF_REG_4, (int16_t) 0, 0x0 }, - { 0x71, BPF_REG_4, BPF_REG_2, (int16_t) 6, 0x0 }, - { 0x67, BPF_REG_4, BPF_REG_0, (int16_t) 0, 0x10 }, - { 0x4f, BPF_REG_3, BPF_REG_4, (int16_t) 0, 0x0 }, - { 0x71, BPF_REG_4, BPF_REG_2, (int16_t) 7, 0x0 }, - { 0x67, BPF_REG_4, BPF_REG_0, (int16_t) 0, 0x8 }, - { 0x4f, BPF_REG_3, BPF_REG_4, (int16_t) 0, 0x0 }, - { 0x71, BPF_REG_2, BPF_REG_2, (int16_t) 8, 0x0 }, - { 0x4f, BPF_REG_3, BPF_REG_2, (int16_t) 0, 0x0 }, - { 0x7b, BPF_REG_10, BPF_REG_3, (int16_t) 65528, 0x0 }, + { 0xf, BPF_REG_3, BPF_REG_4, (int16_t) 0, 0x0 }, + { 0x2d, BPF_REG_3, BPF_REG_2, (int16_t) 38, 0x0 }, + { 0xbf, BPF_REG_3, BPF_REG_1, (int16_t) 0, 0x0 }, + { 0x7, BPF_REG_3, BPF_REG_0, (int16_t) 0, 0x9 }, + { 0x2d, BPF_REG_3, BPF_REG_2, (int16_t) 35, 0x0 }, + { 0x71, BPF_REG_3, BPF_REG_1, (int16_t) 1, 0x0 }, + { 0x67, BPF_REG_3, BPF_REG_0, (int16_t) 0, 0x38 }, + { 0x71, BPF_REG_2, BPF_REG_1, (int16_t) 2, 0x0 }, + { 0x67, BPF_REG_2, BPF_REG_0, (int16_t) 0, 0x30 }, + { 0x4f, BPF_REG_2, BPF_REG_3, (int16_t) 0, 0x0 }, + { 0x71, BPF_REG_3, BPF_REG_1, (int16_t) 3, 0x0 }, + { 0x67, BPF_REG_3, BPF_REG_0, (int16_t) 0, 0x28 }, + { 0x4f, BPF_REG_2, BPF_REG_3, (int16_t) 0, 0x0 }, + { 0x71, BPF_REG_3, BPF_REG_1, (int16_t) 4, 0x0 }, + { 0x67, BPF_REG_3, BPF_REG_0, (int16_t) 0, 0x20 }, + { 0x4f, BPF_REG_2, BPF_REG_3, (int16_t) 0, 0x0 }, + { 0x71, BPF_REG_3, BPF_REG_1, (int16_t) 5, 0x0 }, + { 0x67, BPF_REG_3, BPF_REG_0, (int16_t) 0, 0x18 }, + { 0x4f, BPF_REG_2, BPF_REG_3, (int16_t) 0, 0x0 }, + { 0x71, BPF_REG_3, BPF_REG_1, (int16_t) 6, 0x0 }, + { 0x67, BPF_REG_3, BPF_REG_0, (int16_t) 0, 0x10 }, + { 0x4f, BPF_REG_2, BPF_REG_3, (int16_t) 0, 0x0 }, + { 0x71, BPF_REG_3, BPF_REG_1, (int16_t) 7, 0x0 }, + { 0x67, BPF_REG_3, BPF_REG_0, (int16_t) 0, 0x8 }, + { 0x4f, BPF_REG_2, BPF_REG_3, (int16_t) 0, 0x0 }, + { 0x71, BPF_REG_1, BPF_REG_1, (int16_t) 8, 0x0 }, + { 0x4f, BPF_REG_2, BPF_REG_1, (int16_t) 0, 0x0 }, + { 0x7b, BPF_REG_10, BPF_REG_2, (int16_t) 65528, 0x0 }, { 0xbf, BPF_REG_3, BPF_REG_10, (int16_t) 0, 0x0 }, { 0x7, BPF_REG_3, BPF_REG_0, (int16_t) 0, 0xfffffff8 }, + { 0xbf, BPF_REG_1, BPF_REG_6, (int16_t) 0, 0x0 }, { 0x18, BPF_REG_2, BPF_REG_0, (int16_t) 0, 0x0 }, { 0x0, BPF_REG_0, BPF_REG_0, (int16_t) 0, 0x0 }, { 0xb7, BPF_REG_4, BPF_REG_0, (int16_t) 0, 0x0 }, { 0x85, BPF_REG_0, BPF_REG_0, (int16_t) 0, 0x52 }, + { 0xbf, BPF_REG_1, BPF_REG_0, (int16_t) 0, 0x0 }, { 0xb7, BPF_REG_0, BPF_REG_0, (int16_t) 0, 0x1 }, + { 0x67, BPF_REG_1, BPF_REG_0, (int16_t) 0, 0x20 }, + { 0x77, BPF_REG_1, BPF_REG_0, (int16_t) 0, 0x20 }, + { 0x15, BPF_REG_1, BPF_REG_0, (int16_t) 15, 0x0 }, + { 0xb7, BPF_REG_1, BPF_REG_0, (int16_t) 0, 0x0 }, + { 0x7b, BPF_REG_10, BPF_REG_1, (int16_t) 65528, 0x0 }, + { 0xbf, BPF_REG_3, BPF_REG_10, (int16_t) 0, 0x0 }, + { 0x7, BPF_REG_3, BPF_REG_0, (int16_t) 0, 0xfffffff8 }, + { 0xbf, BPF_REG_1, BPF_REG_6, (int16_t) 0, 0x0 }, + { 0x18, BPF_REG_2, BPF_REG_0, (int16_t) 0, 0x0 }, + { 0x0, BPF_REG_0, BPF_REG_0, (int16_t) 0, 0x0 }, + { 0xb7, BPF_REG_4, BPF_REG_0, (int16_t) 0, 0x0 }, + { 0x85, BPF_REG_0, BPF_REG_0, (int16_t) 0, 0x52 }, + { 0xbf, BPF_REG_1, BPF_REG_0, (int16_t) 0, 0x0 }, + { 0x67, BPF_REG_1, BPF_REG_0, (int16_t) 0, 0x20 }, + { 0x77, BPF_REG_1, BPF_REG_0, (int16_t) 0, 0x20 }, + { 0xb7, BPF_REG_0, BPF_REG_0, (int16_t) 0, 0x1 }, + { 0x15, BPF_REG_1, BPF_REG_0, (int16_t) 1, 0x0 }, + { 0xb7, BPF_REG_0, BPF_REG_0, (int16_t) 0, 0x0 }, { 0x95, BPF_REG_0, BPF_REG_0, (int16_t) 0, 0x0 }, }; @@ -86,3 +108,4 @@ ngx_bpf_program_t ngx_quic_reuseport_hel .license = "BSD", .type = BPF_PROG_TYPE_SK_REUSEPORT, }; + diff --git a/src/event/quic/ngx_event_quic_udp.c b/src/event/quic/ngx_event_quic_udp.c --- a/src/event/quic/ngx_event_quic_udp.c +++ b/src/event/quic/ngx_event_quic_udp.c @@ -30,7 +30,7 @@ ngx_quic_recvmsg(ngx_event_t *ev) struct msghdr msg; ngx_sockaddr_t sa, lsa; struct sockaddr *sockaddr, *local_sockaddr; - ngx_listening_t *ls; + ngx_listening_t *ls, *mls; ngx_event_conf_t *ecf; ngx_connection_t *c, *lc; ngx_quic_socket_t *qsock; @@ -56,11 +56,12 @@ ngx_quic_recvmsg(ngx_event_t *ev) lc = ev->data; ls = lc->listening; + mls = ls->main ? ls->main : ls; ev->ready = 0; - ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, - "quic recvmsg on %V, ready: %d", - &ls->addr_text, ev->available); + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, ev->log, 0, + "quic recvmsg on %V, fd:%d, ready: %d", + &ls->addr_text, lc->fd, ev->available); do { ngx_memzero(&msg, sizeof(struct msghdr)); @@ -156,7 +157,8 @@ ngx_quic_recvmsg(ngx_event_t *ev) goto next; } - c = ngx_quic_lookup_connection(ls, &key, local_sockaddr, local_socklen); + c = ngx_quic_lookup_connection(mls, &key, + local_sockaddr, local_socklen); if (c) { @@ -204,6 +206,12 @@ ngx_quic_recvmsg(ngx_event_t *ev) goto next; } + if ((ngx_terminate || ngx_exiting) && ls->worker != (ngx_uint_t) -1) { + ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, + "quic ignore new connections in a closing worker"); + goto next; + } + #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1); #endif @@ -211,7 +219,7 @@ ngx_quic_recvmsg(ngx_event_t *ev) ngx_accept_disabled = ngx_cycle->connection_n / 8 - ngx_cycle->free_connection_n; - c = ngx_get_connection(lc->fd, ev->log); + c = ngx_get_connection(mls->connection->fd, ev->log); if (c == NULL) { return; } @@ -248,7 +256,7 @@ ngx_quic_recvmsg(ngx_event_t *ev) c->log = log; c->pool->log = log; - c->listening = ls; + c->listening = mls; if (local_sockaddr == &lsa.sockaddr) { local_sockaddr = ngx_palloc(c->pool, local_socklen); @@ -345,6 +353,10 @@ ngx_quic_recvmsg(ngx_event_t *ev) } } while (ev->available); + +#if (NGX_HAVE_EPOLLEXCLUSIVE) + ngx_reorder_accept_events(ls); +#endif } _______________________________________________ nginx-devel mailing list -- nginx-devel@nginx.org To unsubscribe send an email to nginx-devel-le...@nginx.org