On Mon, Aug 1, 2022 at 8:15 PM Ruediger Pluem <rpl...@apache.org> wrote:
>
> On 8/1/22 5:05 PM, Ivan Zhakov wrote:
> >
> > My overall concern is that with the current solution as of r1902858, there 
> > are valid and
> > reasonable allocation patterns that will cause an unbounded memory usage.
> >
> > For example, nothing prevents current or future PCRE versions from doing 
> > this:
> >
> >     for (int i = 0; i < N; i++)
> >     {
> >         void *p = private_malloc();
> >         private_free(p);
> >     }
> >
> > which is going to cause an unbounded memory usage because private_free() is
> > a no-op and retains everything that was allocated.
>
> This is true, but these kind of allocation patterns would also cause a lot of 
> problems with a malloc/free backend and thus I think
> they are unlikely and would need to be addressed within PCRE. But even if 
> they are happening and cannot be fixed for our users
> by adjusting PCRE e.g. because they are stuck to distribution versions we 
> still could tackle this then with an apr_bucket_alloc /
> apr_bucket_free approach or worst case even with a malloc / free approach for 
> particular "bad" PCRE versions. But for now the
> current code seems to work well with the current PCRE allocation pattern.

We could switch to an apr_allocator+apr_memnode_t pattern just now,
and trade the 8K used by the current (sub)pool for an overhead of
sizeof(apr_memnode_t)+8 only, and private_free() just calls
apr_allocator_free().

Something like the attached patch?


Regards;
Yann.
Index: server/util_pcre.c
===================================================================
--- server/util_pcre.c	(revision 1902909)
+++ server/util_pcre.c	(working copy)
@@ -294,7 +294,17 @@ typedef int*        match_vector_pt;
 #endif
 
 #if APREG_USE_THREAD_LOCAL
-static AP_THREAD_LOCAL apr_pool_t *thread_pool;
+#include "mpm_common.h" /* for ap_max_mem_free */
+
+static AP_THREAD_LOCAL apr_allocator_t *thread_alloc;
+static apr_status_t destroy_thread_alloc(void *alloc)
+{
+    apr_allocator_destroy(alloc);
+    return APR_SUCCESS;
+}
+
+/* Alignment for the apr_memnode_t pointer saved by each allocation */
+#define SIZEOF_VOIDP_ALIGNED APR_ALIGN_DEFAULT(APR_SIZEOF_VOIDP)
 #endif
 
 struct match_data_state {
@@ -303,8 +313,8 @@ struct match_data_state {
     apr_size_t buf_used;
 
 #if APREG_USE_THREAD_LOCAL
-    apr_thread_t *thd;
-    apr_pool_t *pool;
+    apr_pool_t *tp;
+    apr_allocator_t *ta;
 #endif
 
 #ifdef HAVE_PCRE2
@@ -326,17 +336,36 @@ static void * private_malloc(size_t size, void *ct
     }
 
 #if APREG_USE_THREAD_LOCAL
-    if (state->thd) {
-        apr_pool_t *pool = state->pool;
-        if (pool == NULL) {
-            pool = thread_pool;
-            if (pool == NULL) {
-                apr_pool_create(&pool, apr_thread_pool_get(state->thd));
-                thread_pool = pool;
+    if (state->tp) {
+        apr_allocator_t *ta = state->ta;
+        apr_memnode_t *node;
+
+        if (ta == NULL) {
+            ta = thread_alloc;
+            if (ta == NULL) {
+                if (apr_allocator_create(&ta) != APR_SUCCESS) {
+                    apr_abortfunc_t abort_fn = apr_pool_abort_get(state->tp);
+                    if (abort_fn)
+                        abort_fn(APR_ENOMEM);
+                    return NULL;
+                }
+                apr_allocator_max_free_set(ta, ap_max_mem_free);
+                apr_pool_cleanup_register(state->tp, ta, destroy_thread_alloc,
+                                          apr_pool_cleanup_null);
+                thread_alloc = ta;
             }
-            state->pool = pool;
+            state->ta = ta;
         }
-        return apr_palloc(pool, size);
+
+        node = apr_allocator_alloc(ta, SIZEOF_VOIDP_ALIGNED + size);
+        if (node == NULL) {
+            apr_abortfunc_t abort_fn = apr_pool_abort_get(state->tp);
+            if (abort_fn)
+                abort_fn(APR_ENOMEM);
+            return NULL;
+        }
+        *(void **)node->first_avail = node;
+        return node->first_avail + SIZEOF_VOIDP_ALIGNED;
     }
 #endif
 
@@ -354,8 +383,10 @@ static void private_free(void *block, void *ctx)
     }
 
 #if APREG_USE_THREAD_LOCAL
-    if (state->thd) {
-        /* Freed in cleanup_state() eventually. */
+    if (state->ta) {
+        /* This node allocated from thread allocator. Free/recycle it. */
+        apr_memnode_t *node = *(void **)(p - SIZEOF_VOIDP_ALIGNED);
+        apr_allocator_free(state->ta, node);
         return;
     }
 #endif
@@ -369,8 +400,11 @@ int setup_state(struct match_data_state *state, ap
     state->buf_used = 0;
 
 #if APREG_USE_THREAD_LOCAL
-    state->thd = ap_thread_current();
-    state->pool = NULL;
+    {
+        apr_thread_t *thd = ap_thread_current();
+        state->tp = (thd) ? apr_thread_pool_get(thd) : NULL;
+        state->ta = NULL;
+    }
 #endif
 
 #ifdef HAVE_PCRE2
@@ -412,15 +446,6 @@ void cleanup_state(struct match_data_state *state)
         private_free(state->match_data, state);
     }
 #endif
-
-#if APREG_USE_THREAD_LOCAL
-    if (state->pool) {
-        /* Let the thread's pool allocator recycle or free according
-         * to its max_free setting.
-         */
-        apr_pool_clear(state->pool);
-    }
-#endif
 }
 
 AP_DECLARE(int) ap_regexec(const ap_regex_t *preg, const char *string,

Reply via email to