Hi Stefan,

On Sun, Feb 5, 2017 at 7:51 PM, Stefan Priebe - Profihost AG
<s.pri...@profihost.ag> wrote:
>
> tested your patch against mod_ssl. I haven't seen any pool crashes again
> so it seems to fix this issue.
>
> But two new ones:

Possibly a promising fix suggested (in another thread) by yet another
talented Stefan :)
He has spotted a possible issue in mpm_event which could affect mainly
mod_http2.
I applied his suggestion in the attached patch, and did some
extrapolation for similar code in mod_h2 (so all possible errors are
mine...).
Would you test this one please?

@icing: I changed the parent pool of the slave connection from the
mplx pool to the master connection's (ptrans), just to simplify the
allocators in place for now.
I don't see it was needed from a concurrency POV, but if you wanted to
avoid locking when creating slaves' pool we can probably keep the
dedicated allocator finally (to reduce possible contention on
ptrans->mutex? No mutex needed I think since mplx doesn't seem to have
other subpools. Trade off with memory consumption, though).


Regards,
Yann.
Index: server/mpm/event/event.c
===================================================================
--- server/mpm/event/event.c	(revision 1781789)
+++ server/mpm/event/event.c	(working copy)
@@ -1749,6 +1749,7 @@ static void * APR_THREAD_FUNC listener_thread(apr_
                     if (ptrans == NULL) {
                         /* create a new transaction pool for each accepted socket */
                         apr_allocator_t *allocator;
+                        apr_thread_mutex_t *mutex = NULL;
 
                         apr_allocator_create(&allocator);
                         apr_allocator_max_free_set(allocator,
@@ -1762,6 +1763,8 @@ static void * APR_THREAD_FUNC listener_thread(apr_
                             signal_threads(ST_GRACEFUL);
                             return NULL;
                         }
+                        apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, ptrans);
+                        apr_allocator_mutex_set(allocator, mutex);
                     }
                     apr_pool_tag(ptrans, "transaction");
 
Index: modules/http2/h2_conn.c
===================================================================
--- modules/http2/h2_conn.c	(revision 1781789)
+++ modules/http2/h2_conn.c	(working copy)
@@ -247,9 +247,10 @@ apr_status_t h2_conn_pre_close(struct h2_ctx *ctx,
     return status;
 }
 
-conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent)
+conn_rec *h2_slave_create(conn_rec *master, int slave_id)
 {
-    apr_allocator_t *allocator;
+    apr_allocator_t *allocator = NULL;
+    apr_thread_mutex_t *mutex = NULL;
     apr_pool_t *pool;
     conn_rec *c;
     void *cfg;
@@ -264,9 +265,11 @@ apr_status_t h2_conn_pre_close(struct h2_ctx *ctx,
      * another thread.
      */
     apr_allocator_create(&allocator);
-    apr_pool_create_ex(&pool, parent, NULL, allocator);
+    apr_pool_create_ex(&pool, master->pool, NULL, allocator);
     apr_pool_tag(pool, "h2_slave_conn");
     apr_allocator_owner_set(allocator, pool);
+    apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, pool);
+    apr_allocator_mutex_set(allocator, mutex);
 
     c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
     if (c == NULL) {
Index: modules/http2/h2_conn.h
===================================================================
--- modules/http2/h2_conn.h	(revision 1781789)
+++ modules/http2/h2_conn.h	(working copy)
@@ -66,7 +66,7 @@ typedef enum {
 h2_mpm_type_t h2_conn_mpm_type(void);
 
 
-conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent);
+conn_rec *h2_slave_create(conn_rec *master, int slave_id);
 void h2_slave_destroy(conn_rec *slave);
 
 apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd);
Index: modules/http2/h2_mplx.c
===================================================================
--- modules/http2/h2_mplx.c	(revision 1781789)
+++ modules/http2/h2_mplx.c	(working copy)
@@ -33,6 +33,7 @@
 #include "h2_private.h"
 #include "h2_bucket_beam.h"
 #include "h2_config.h"
+#include "h2_session.h"
 #include "h2_conn.h"
 #include "h2_ctx.h"
 #include "h2_h2.h"
@@ -245,7 +246,7 @@ static void h2_mplx_destroy(h2_mplx *m)
                   m->id, (int)h2_ihash_count(m->tasks));
     check_tx_free(m);
     /* pool will be destroyed as child of h2_session->pool,
-       slave connection pools are children of m->pool */
+       slave connection pools are children of m->c->pool */
 }
 
 /**
@@ -259,32 +260,27 @@ static void h2_mplx_destroy(h2_mplx *m)
  *   their HTTP/1 cousins, the separate allocator seems to work better
  *   than protecting a shared h2_session one with an own lock.
  */
-h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, 
-                        const h2_config *conf, 
-                        apr_interval_time_t stream_timeout,
-                        h2_workers *workers)
+h2_mplx *h2_mplx_create(h2_session *session, h2_workers *workers)
 {
     apr_status_t status = APR_SUCCESS;
-    apr_allocator_t *allocator = NULL;
+    apr_pool_t *parent = session->pool;
+    const h2_config *conf = session->config;
     h2_mplx *m;
     ap_assert(conf);
     
-    status = apr_allocator_create(&allocator);
-    if (status != APR_SUCCESS) {
-        return NULL;
-    }
-
     m = apr_pcalloc(parent, sizeof(h2_mplx));
     if (m) {
+        conn_rec *c = session->c;
+
         m->id = c->id;
         APR_RING_ELEM_INIT(m, link);
         m->c = c;
-        apr_pool_create_ex(&m->pool, parent, NULL, allocator);
+
+        apr_pool_create(&m->pool, parent);
         if (!m->pool) {
             return NULL;
         }
         apr_pool_tag(m->pool, "h2_mplx");
-        apr_allocator_owner_set(allocator, m->pool);
         
         status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
                                          m->pool);
@@ -311,7 +307,7 @@ static void h2_mplx_destroy(h2_mplx *m)
         m->tasks = h2_ihash_create(m->pool, offsetof(h2_task,stream_id));
         m->redo_tasks = h2_ihash_create(m->pool, offsetof(h2_task, stream_id));
 
-        m->stream_timeout = stream_timeout;
+        m->stream_timeout = session->s->timeout;
         m->workers = workers;
         m->workers_max = workers->max_workers;
         m->workers_limit = 6; /* the original h1 max parallel connections */
@@ -891,7 +887,7 @@ static h2_task *next_stream_task(h2_mplx *m)
                 slave = *pslave;
             }
             else {
-                slave = h2_slave_create(m->c, stream->id, m->pool);
+                slave = h2_slave_create(m->c, stream->id);
                 new_conn = 1;
             }
             
Index: modules/http2/h2_mplx.h
===================================================================
--- modules/http2/h2_mplx.h	(revision 1781789)
+++ modules/http2/h2_mplx.h	(working copy)
@@ -38,7 +38,7 @@ struct apr_pool_t;
 struct apr_thread_mutex_t;
 struct apr_thread_cond_t;
 struct h2_bucket_beam;
-struct h2_config;
+struct h2_session;
 struct h2_ihash_t;
 struct h2_task;
 struct h2_stream;
@@ -124,9 +124,7 @@ apr_status_t h2_mplx_child_init(apr_pool_t *pool,
  * Create the multiplexer for the given HTTP2 session. 
  * Implicitly has reference count 1.
  */
-h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *master, 
-                        const struct h2_config *conf, 
-                        apr_interval_time_t stream_timeout,
+h2_mplx *h2_mplx_create(struct h2_session *session,
                         struct h2_workers *workers);
 
 /**
Index: modules/http2/h2_session.c
===================================================================
--- modules/http2/h2_session.c	(revision 1781789)
+++ modules/http2/h2_session.c	(working copy)
@@ -920,8 +920,7 @@ static h2_session *h2_session_create_int(conn_rec
             return NULL;
         }
         
-        session->mplx = h2_mplx_create(c, session->pool, session->config, 
-                                       session->s->timeout, workers);
+        session->mplx = h2_mplx_create(session, workers);
         
         h2_mplx_set_consumed_cb(session->mplx, update_window, session);
         

Reply via email to