On Mon, 2009-10-19 at 00:00 +0200, Stefan Fritsch wrote: > Randomly killing possibly legitimate connections is some kind of DoS, > too.
For sure. Anything we do, including reducing connection timeout on a heavily loaded server is DoS. We just pick the type of DoS we want, instead of the one attacker wants :-) > But it's probably better than the current behaviour. It could > even improve the situation in the case where many processes are stuck > waiting for a broken ldap/backend/etc. server. Yep, that's the deal. > In any case, you should try to kill workers with SERVER_BUSY_KEEPALIVE > before randomly killing processes. True. I meant the patch more as a proof of concept than a real solution (that SIGINT is a poke in the eye and I haven't touched worker at all). I've attached what you probably meant anyway. -- Bojan
--- httpd-2.2.14-v/server/mpm/prefork/prefork.c 2009-02-01 07:54:55.000000000 +1100 +++ httpd-2.2.14/server/mpm/prefork/prefork.c 2009-10-19 09:19:45.031674761 +1100 @@ -48,6 +48,7 @@ #include "ap_listen.h" #include "ap_mmn.h" #include "apr_poll.h" +#include "apr_md5.h" #ifdef HAVE_BSTRING_H #include <bstring.h> /* for IRIX, FD_SET calls bzero() */ @@ -336,6 +337,17 @@ die_now = 1; } +static int volatile client_socket = -1; + +static void close_client_socket(int sig) +{ + if (client_socket != -1) { + close(client_socket); + } + + client_socket = -1; +} + /* volatile just in case */ static int volatile shutdown_pending; static int volatile restart_pending; @@ -659,8 +671,12 @@ current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc); if (current_conn) { + apr_os_sock_get((apr_os_sock_t *)&client_socket, csd); + ap_process_connection(current_conn, csd); ap_lingering_close(current_conn); + + client_socket = -1; } /* Check the pod and the generation number after processing a @@ -755,6 +771,7 @@ * The pod is used for signalling the graceful restart. */ apr_signal(AP_SIG_GRACEFUL, stop_listening); + apr_signal(SIGINT, close_client_socket); child_main(slot); } @@ -803,6 +820,8 @@ int free_slots[MAX_SPAWN_RATE]; int last_non_dead; int total_non_dead; + int status; + static apr_time_t maxed_out = 0; /* initialize the free_list */ free_length = 0; @@ -813,8 +832,6 @@ total_non_dead = 0; for (i = 0; i < ap_daemons_limit; ++i) { - int status; - if (i >= ap_max_daemons_limit && free_length == idle_spawn_rate) break; ws = &ap_scoreboard_image->servers[i][0]; @@ -856,12 +873,17 @@ */ ap_mpm_pod_signal(pod); idle_spawn_rate = 1; + maxed_out = 0; } else if (idle_count < ap_daemons_min_free) { /* terminate the free list */ if (free_length == 0) { /* only report this condition once */ static int reported = 0; + static unsigned char sb_digest[APR_MD5_DIGESTSIZE]; + apr_time_t now = apr_time_now(); + apr_md5_ctx_t ctx; + pid_t pid; if (!reported) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, @@ -870,6 +892,83 @@ reported = 1; } idle_spawn_rate = 1; + + /* If after one maintenace interval we still see the same + * situation on the scoreboard, close all client sockets in + * read state and at least 10% of all client sockets. + * Crude, but seems to clear things out. + */ + if (maxed_out) { + apr_time_t diff = now - maxed_out; + + if (diff >= SCOREBOARD_MAINTENANCE_INTERVAL) { + unsigned char cur_digest[APR_MD5_DIGESTSIZE]; + + /* Current digest of the scoreboard. + */ + apr_md5_init(&ctx); + for (i = 0; i < ap_daemons_limit; ++i) { + pid = ap_scoreboard_image->parent[i].pid; + apr_md5_update(&ctx, &pid, sizeof(pid)); + + status = ap_scoreboard_image->servers[i][0].status; + apr_md5_update(&ctx, &status, sizeof(status)); + } + apr_md5_final(cur_digest, &ctx); + + /* If we haven't had a change for one maintenance + * interval, we need to make room. + */ + if (memcmp(sb_digest, cur_digest, APR_MD5_DIGESTSIZE)) { + maxed_out = 0; + } + else { + int rdrs = 0, cull = ap_daemons_limit / 10; + + /* Disconnect all readers (includes keep alive). + */ + for (i = 0; i < ap_daemons_limit; ++i) { + pid = ap_scoreboard_image->parent[i].pid; + status = ap_scoreboard_image->servers[i][0].status; + + if (status == SERVER_BUSY_READ || + status == SERVER_BUSY_KEEPALIVE) { + ap_mpm_safe_kill(pid, SIGINT); + rdrs++; + } + } + + /* Make up to 10% of all sockets, if required. + */ + for (i = 0; i < ap_daemons_limit && cull > rdrs; ++i) { + pid = ap_scoreboard_image->parent[i].pid; + status = ap_scoreboard_image->servers[i][0].status; + + if (status != SERVER_BUSY_READ && + status != SERVER_BUSY_KEEPALIVE) { + ap_mpm_safe_kill(pid, SIGINT); + cull--; + } + } + } + } + } + else { + /* Create digest of the scoreboard, see if things + * change next time around. + */ + apr_md5_init(&ctx); + for (i = 0; i < ap_daemons_limit; ++i) { + pid = ap_scoreboard_image->parent[i].pid; + apr_md5_update(&ctx, &pid, sizeof(pid)); + + status = ap_scoreboard_image->servers[i][0].status; + apr_md5_update(&ctx, &status, sizeof(status)); + } + apr_md5_final(sb_digest, &ctx); + + maxed_out = now; + } } else { if (idle_spawn_rate >= 8) { @@ -902,10 +1001,13 @@ else if (idle_spawn_rate < MAX_SPAWN_RATE) { idle_spawn_rate *= 2; } + + maxed_out = 0; } } else { idle_spawn_rate = 1; + maxed_out = 0; } }