Author: rhuijben
Date: Sun Nov 15 12:52:43 2015
New Revision: 1714449

URL: http://svn.apache.org/viewvc?rev=1714449&view=rev
Log:
Start replacing a whole lot of return APR_EONOTIMPL in the serf listening
and server side code with actual connection setup code, to prepare some
actual http2 testing in our testsuite.

With http/2 the server and client side can re-use a lot of code and I don't
feel like writing a dedicated test server that will need its own testing.

I will try to prepare the server for allowing http/1 too, but that is not
a priority to me right now.

* context.c
  (check_dirty_pollsets): Update incoming connections like we do outgoing.
  (serf_context_create_ex): Init new list.

* incoming.c
  (client_detect_eof): New function.
  (client_connected): New function.
  (serf__process_client): Call client_connected at initial connect.
  (serf__process_listener): Properly handle EINPROGRESS.
  (incoming_cleanup): New function.
  (serf_incoming_create): Rename to...
  (serf_incoming_create2): ... this and add assential arguments to allow
    supporting things like https, etc.
  (serf_incoming_create): New function over serf_incoming_create2.
  (serf_listener_create): Handle error scenarios. Remove setting
    APR_SO_REUSEADDR (see r1711233 for reasons).
  (serf__incoming_update_pollset): New function.

* serf.h
  (serf_incoming_closed_t): New typedef.
  (serf_incoming_create): Add ### comment.
  (serf_incoming_create2): New function.

* serf_private.h
  (serf_context_t): Use bool for dirty pollset. Add incomings array.
  (serf_incoming_t): Add many variables, similar to their outgoing counterparts.
  (serf_connection_t): Use bool for dirty_conn.
  (serf__incoming_update_pollset): New function.

* test/test_all.c
  (testlist): Declare new testfile.

* test/test_serf.h
  (test_server): New function.

* test/test_server.c
  New file.

* test/test_util.c
  (setup_test_client_context): Don't overwrite config if already set.

Added:
    serf/trunk/test/test_server.c   (with props)
Modified:
    serf/trunk/context.c
    serf/trunk/incoming.c
    serf/trunk/serf.h
    serf/trunk/serf_private.h
    serf/trunk/test/test_all.c
    serf/trunk/test/test_serf.h
    serf/trunk/test/test_util.c

Modified: serf/trunk/context.c
URL: 
http://svn.apache.org/viewvc/serf/trunk/context.c?rev=1714449&r1=1714448&r2=1714449&view=diff
==============================================================================
--- serf/trunk/context.c (original)
+++ serf/trunk/context.c Sun Nov 15 12:52:43 2015
@@ -75,6 +75,20 @@ static apr_status_t check_dirty_pollsets
             return status;
     }
 
+    for (i = ctx->incomings->nelts; i--; ) {
+        serf_incoming_t *incoming = GET_INCOMING(ctx, i);
+        apr_status_t status;
+
+        if (!incoming->dirty_conn) {
+            continue;
+        }
+
+        incoming->dirty_conn = false;
+
+        if ((status = serf__incoming_update_pollset(incoming)) != APR_SUCCESS)
+            return status;
+    }
+
     /* reset our context flag now */
     ctx->dirty_pollset = 0;
 
@@ -168,6 +182,9 @@ serf_context_t *serf_context_create_ex(
     /* default to a single connection since that is the typical case */
     ctx->conns = apr_array_make(pool, 1, sizeof(serf_connection_t *));
 
+    /* and we typically have no servers */
+    ctx->incomings = apr_array_make(pool, 0, sizeof(serf_incoming_t *));
+
     /* Initialize progress status */
     ctx->progress_read = 0;
     ctx->progress_written = 0;

Modified: serf/trunk/incoming.c
URL: 
http://svn.apache.org/viewvc/serf/trunk/incoming.c?rev=1714449&r1=1714448&r2=1714449&view=diff
==============================================================================
--- serf/trunk/incoming.c (original)
+++ serf/trunk/incoming.c Sun Nov 15 12:52:43 2015
@@ -27,6 +27,68 @@
 
 #include "serf_private.h"
 
+static apr_status_t client_detect_eof(void *baton,
+                                      serf_bucket_t *aggregator)
+{
+    serf_incoming_t *client = baton;
+    client->hit_eof = true;
+    return APR_EAGAIN;
+}
+
+static apr_status_t client_connected(serf_incoming_t *client)
+{
+    /* serf_context_t *ctx = client->ctx; */
+    apr_status_t status;
+    serf_bucket_t *ostream;
+
+    /* ### TODO: Store ip address in config for logging */
+
+    serf__log(LOGLVL_DEBUG, LOGCOMP_CONN, __FILE__, client->config,
+              "socket for client 0x%x connected\n", client);
+
+    /* ### Connection does auth setup here */
+
+    if (client->ostream_head == NULL) {
+        client->ostream_head = serf_bucket_aggregate_create(client->allocator);
+    }
+
+    if (client->ostream_tail == NULL) {
+        client->ostream_tail = serf_bucket_aggregate_create(client->allocator);
+
+        serf_bucket_aggregate_hold_open(client->ostream_tail,
+                                        client_detect_eof, client);
+    }
+
+    ostream = client->ostream_tail;
+
+    status = client->setup(client->skt,
+                           &client->stream,
+                           &ostream,
+                           client->setup_baton, client->pool);
+
+    if (status) {
+        /* extra destroy here since it wasn't added to the head bucket yet. */
+        serf_bucket_destroy(client->ostream_tail);
+        /* ### Cleanup! (serf__connection_pre_cleanup) */
+        return status;
+    }
+
+    /* Share the configuration with all the buckets in the newly created output
+    chain (see PLAIN or ENCRYPTED scenario's), including the request buckets
+    created by the application (ostream_tail will handle this for us). */
+    serf_bucket_set_config(client->ostream_head, client->config);
+
+    /* Share the configuration with the ssl_decrypt and socket buckets. The
+    response buckets wrapping the ssl_decrypt/socket buckets won't get the
+    config automatically because they are upstream. */
+    serf_bucket_set_config(client->stream, client->config);
+
+    serf_bucket_aggregate_append(client->ostream_head,
+                                 ostream);
+
+    return status;
+}
+
 static apr_status_t read_from_client(serf_incoming_t *client)
 {
     return APR_ENOTIMPL;
@@ -40,6 +102,15 @@ static apr_status_t write_to_client(serf
 apr_status_t serf__process_client(serf_incoming_t *client, apr_int16_t events)
 {
     apr_status_t rv;
+
+    if (client->wait_for_connect && (events & (APR_POLLIN | APR_POLLOUT))) {
+        rv = client_connected(client);
+        client->wait_for_connect = FALSE;
+        if (rv) {
+            return rv;
+        }
+    }
+
     if ((events & APR_POLLIN) != 0) {
         rv = read_from_client(client);
         if (rv) {
@@ -67,58 +138,163 @@ apr_status_t serf__process_client(serf_i
 
 apr_status_t serf__process_listener(serf_listener_t *l)
 {
-    apr_status_t rv;
+    apr_status_t status;
     apr_socket_t *in;
     apr_pool_t *p;
     /* THIS IS NOT OPTIMAL */
     apr_pool_create(&p, l->pool);
 
-    rv = apr_socket_accept(&in, l->skt, p);
+    status = apr_socket_accept(&in, l->skt, p);
+
+    if (status != APR_SUCCESS
+        && !APR_STATUS_IS_EINPROGRESS(status)) {
 
-    if (rv) {
         apr_pool_destroy(p);
-        return rv;
+        return status;
     }
 
-    rv = l->accept_func(l->ctx, l, l->accept_baton, in, p);
+    status = l->accept_func(l->ctx, l, l->accept_baton, in, p);
 
-    if (rv) {
+    if (status) {
         apr_pool_destroy(p);
-        return rv;
     }
 
-    return rv;
+    return status;
 }
 
+static apr_status_t incoming_cleanup(void *baton)
+{
+    serf_incoming_t *incoming = baton;
 
-apr_status_t serf_incoming_create(
+    apr_socket_close(incoming->skt);
+
+
+    return APR_SUCCESS;
+}
+
+apr_status_t serf_incoming_create2(
     serf_incoming_t **client,
     serf_context_t *ctx,
     apr_socket_t *insock,
-    void *request_baton,
+    serf_connection_setup_t setup,
+    void *setup_baton,
+    serf_incoming_closed_t closed,
+    void *closed_baton,
     serf_incoming_request_cb_t request,
+    void *request_baton,
     apr_pool_t *pool)
 {
     apr_status_t rv;
-    serf_incoming_t *ic = apr_palloc(pool, sizeof(*ic));
+    apr_pool_t *ic_pool;
+
+    apr_pool_create(&ic_pool, pool);
+
+    serf_incoming_t *ic = apr_palloc(ic_pool, sizeof(*ic));
 
     ic->ctx = ctx;
+    ic->pool = ic_pool;
+    ic->allocator = serf_bucket_allocator_create(ic_pool, NULL, NULL);
     ic->baton.type = SERF_IO_CLIENT;
     ic->baton.u.client = ic;
     ic->request_baton =  request_baton;
     ic->request = request;
     ic->skt = insock;
+
+    ic->dirty_conn = false;
+    ic->wait_for_connect = true;
+
+    ic->setup = setup;
+    ic->setup_baton = setup_baton;
+    ic->closed = closed;
+    ic->closed_baton = closed_baton;
+
+    /* A bucket wrapped around our socket (for reading responses). */
+    ic->stream = NULL;
+    ic->ostream_head = NULL;
+    ic->ostream_tail = NULL;
+    ic->ssltunnel_ostream = NULL;
+
     ic->desc.desc_type = APR_POLL_SOCKET;
     ic->desc.desc.s = ic->skt;
-    ic->desc.reqevents = APR_POLLIN;
+    ic->desc.reqevents = APR_POLLIN | APR_POLLERR | APR_POLLHUP;
+
+    /* Store the connection specific info in the configuration store */
+    /* ### Doesn't work... Doesn't support listeners yet*/
+    /*rv = serf__config_store_get_config(ctx, ic, &config, pool);
+    if (rv) {
+    apr_pool_destroy(l->pool);
+    return rv;
+    }
+    ic->config = config;*/
+    ic->config = NULL; /* FIX!! */
 
     rv = ctx->pollset_add(ctx->pollset_baton,
                          &ic->desc, &ic->baton);
-    *client = ic;
+
+    if (!rv) {
+        apr_pool_cleanup_register(ic->pool, ic, incoming_cleanup,
+                                  apr_pool_cleanup_null);
+        *client = ic;
+    }
+    else {
+        apr_pool_destroy(ic_pool);
+        /* Let caller handle the socket */
+    }
 
     return rv;
 }
 
+typedef struct ic_setup_baton_t
+{
+  serf_incoming_t *incoming;
+} ic_setup_baton_t;
+
+static apr_status_t dummy_setup(apr_socket_t *skt,
+                                serf_bucket_t **read_bkt,
+                                serf_bucket_t **write_bkt,
+                                void *setup_baton,
+                                apr_pool_t *pool)
+{
+  ic_setup_baton_t *isb = setup_baton;
+
+  *read_bkt = serf_bucket_socket_create(skt, isb->incoming->allocator);
+
+  return APR_SUCCESS;
+}
+
+static apr_status_t dummy_closed(serf_incoming_t *incoming,
+                                 void *closed_baton,
+                                 apr_status_t why,
+                                 apr_pool_t *pool)
+{
+  return APR_SUCCESS;
+}
+
+apr_status_t serf_incoming_create(
+    serf_incoming_t **client,
+    serf_context_t *ctx,
+    apr_socket_t *insock,
+    void *request_baton,
+    serf_incoming_request_cb_t request,
+    apr_pool_t *pool)
+{
+  ic_setup_baton_t *isb;
+  apr_status_t status;
+
+  /* Allocate baton to hand over created listener
+     (to get access to its allocator) */
+  isb = apr_pcalloc(pool, sizeof(*isb));
+
+  status = serf_incoming_create2(client, ctx, insock,
+                                 dummy_setup, isb,
+                                 dummy_closed, isb,
+                                 request, request_baton, pool);
+
+  if (!status)
+    isb->incoming = *client;
+
+  return status;
+}
 
 apr_status_t serf_listener_create(
     serf_listener_t **listener,
@@ -142,8 +318,10 @@ apr_status_t serf_listener_create(
     apr_pool_create(&l->pool, pool);
 
     rv = apr_sockaddr_info_get(&sa, host, APR_UNSPEC, port, 0, l->pool);
-    if (rv)
+    if (rv) {
+        apr_pool_destroy(l->pool);
         return rv;
+    }
 
     rv = apr_socket_create(&l->skt, sa->family,
                            SOCK_STREAM,
@@ -154,17 +332,17 @@ apr_status_t serf_listener_create(
     if (rv)
         return rv;
 
-    rv = apr_socket_opt_set(l->skt, APR_SO_REUSEADDR, 1);
-    if (rv)
-        return rv;
-
     rv = apr_socket_bind(l->skt, sa);
-    if (rv)
-        return rv;
+    if (rv) {
+      apr_pool_destroy(l->pool);
+      return rv;
+    }
 
     rv = apr_socket_listen(l->skt, 5);
-    if (rv)
+    if (rv) {
+        apr_pool_destroy(l->pool);
         return rv;
+    }
 
     l->desc.desc_type = APR_POLL_SOCKET;
     l->desc.desc.s = l->skt;
@@ -172,10 +350,100 @@ apr_status_t serf_listener_create(
 
     rv = ctx->pollset_add(ctx->pollset_baton,
                             &l->desc, &l->baton);
-    if (rv)
+    if (rv) {
+        apr_pool_destroy(l->pool);
         return rv;
+    }
 
     *listener = l;
 
     return APR_SUCCESS;
 }
+
+apr_status_t serf__incoming_update_pollset(serf_incoming_t *incoming)
+{
+    serf_context_t *ctx = incoming->ctx;
+    apr_status_t status;
+    apr_pollfd_t desc = { 0 };
+    bool data_waiting;
+
+    if (!incoming->skt) {
+        return APR_SUCCESS;
+    }
+
+    /* Remove the socket from the poll set. */
+    desc.desc_type = APR_POLL_SOCKET;
+    desc.desc.s = incoming->skt;
+    desc.reqevents = incoming->reqevents;
+
+    status = ctx->pollset_rm(ctx->pollset_baton,
+                             &desc, &incoming->baton);
+    if (status && !APR_STATUS_IS_NOTFOUND(status))
+        return status;
+
+    /* Now put it back in with the correct read/write values. */
+    desc.reqevents = APR_POLLIN | APR_POLLHUP | APR_POLLERR;
+
+    /* If we are not connected yet, we just want to know when we are */
+    if (incoming->wait_for_connect) {
+        data_waiting = true;
+        desc.reqevents |= APR_POLLOUT;
+    }
+    else {
+        /* Directly look at the connection data. While this may look
+           more expensive than the cheap checks later this peek is
+           just checking a bit of ram.
+
+           But it also has the nice side effect of removing references
+           from the aggregate to requests that are done.
+         */
+        if (incoming->vec_len) {
+            /* We still have vecs in the connection, which lifetime is
+               managed by buckets inside conn->ostream_head.
+
+               Don't touch ostream as that might destroy the vecs */
+
+            data_waiting = true;
+        }
+        else {
+            serf_bucket_t *ostream;
+
+            ostream = incoming->ostream_head;
+
+            if (!ostream)
+              ostream = incoming->ssltunnel_ostream;
+
+            if (ostream) {
+                const char *dummy_data;
+                apr_size_t len;
+
+                status = serf_bucket_peek(ostream, &dummy_data, &len);
+
+                if (SERF_BUCKET_READ_ERROR(status) || len > 0) {
+                    /* DATA or error waiting */
+                    data_waiting = TRUE; /* Error waiting */
+                }
+                else if (! status || APR_STATUS_IS_EOF(status)) {
+                    data_waiting = FALSE;
+                }
+                else
+                    data_waiting = FALSE; /* EAGAIN / EOF / WAIT_CONN */
+            }
+            else
+                data_waiting = FALSE;
+        }
+
+        if (data_waiting) {
+            desc.reqevents |= APR_POLLOUT;
+        }
+    }
+
+    /* save our reqevents, so we can pass it in to remove later. */
+    incoming->reqevents = desc.reqevents;
+
+    /* Note: even if we don't want to read/write this socket, we still
+     * want to poll it for hangups and errors.
+     */
+    return ctx->pollset_add(ctx->pollset_baton,
+                            &desc, &incoming->baton);
+}

Modified: serf/trunk/serf.h
URL: 
http://svn.apache.org/viewvc/serf/trunk/serf.h?rev=1714449&r1=1714448&r2=1714449&view=diff
==============================================================================
--- serf/trunk/serf.h (original)
+++ serf/trunk/serf.h Sun Nov 15 12:52:43 2015
@@ -377,6 +377,15 @@ typedef void (*serf_connection_closed_t)
     apr_pool_t *pool);
 
 /**
+ * Like serf_connection_closed_t, but applies to incoming connections.
+ */
+typedef void(*serf_incoming_closed_t)(
+    serf_incoming_t *incoming,
+    void *closed_baton,
+    apr_status_t why,
+    apr_pool_t *pool);
+
+/**
  * Response data has arrived and should be processed.
  *
  * Whenever response data for @a request arrives (initially, or continued data
@@ -512,6 +521,7 @@ typedef apr_status_t (*serf_incoming_req
     void *request_baton,
     apr_pool_t *pool);
 
+/* ### Arguments in bad order. Doesn't support SSL */
 apr_status_t serf_incoming_create(
     serf_incoming_t **client,
     serf_context_t *ctx,
@@ -520,6 +530,17 @@ apr_status_t serf_incoming_create(
     serf_incoming_request_cb_t request,
     apr_pool_t *pool);
 
+apr_status_t serf_incoming_create2(
+    serf_incoming_t **client,
+    serf_context_t *ctx,
+    apr_socket_t *insock,
+    serf_connection_setup_t setup,
+    void *setup_baton,
+    serf_incoming_closed_t closed,
+    void *closed_baton,
+    serf_incoming_request_cb_t request,
+    void *request_baton,
+    apr_pool_t *pool);
 
 
 

Modified: serf/trunk/serf_private.h
URL: 
http://svn.apache.org/viewvc/serf/trunk/serf_private.h?rev=1714449&r1=1714448&r2=1714449&view=diff
==============================================================================
--- serf/trunk/serf_private.h (original)
+++ serf/trunk/serf_private.h Sun Nov 15 12:52:43 2015
@@ -279,12 +279,15 @@ struct serf_context_t {
     serf_socket_remove_t pollset_rm;
 
     /* one of our connections has a dirty pollset state. */
-    int dirty_pollset;
+    bool dirty_pollset;
 
     /* the list of active connections */
     apr_array_header_t *conns;
 #define GET_CONN(ctx, i) (((serf_connection_t **)(ctx)->conns->elts)[i])
 
+    apr_array_header_t *incomings;
+#define GET_INCOMING(ctx, i) (((serf_incoming_t **)(ctx)->incomings->elts)[i])
+
     /* Proxy server address */
     apr_sockaddr_t *proxy_address;
 
@@ -328,8 +331,40 @@ struct serf_incoming_t {
     serf_io_baton_t baton;
     void *request_baton;
     serf_incoming_request_cb_t request;
-    apr_socket_t *skt;
+
+    apr_socket_t *skt; /* Lives in parent of POOL */
+    apr_pool_t *pool; 
+    serf_bucket_alloc_t *allocator;
+
     apr_pollfd_t desc;
+
+    /* the last reqevents we gave to pollset_add */
+    apr_int16_t reqevents;
+
+    struct iovec vec[IOV_MAX];
+    int vec_len;
+
+    serf_connection_setup_t setup;
+    void *setup_baton;
+    serf_incoming_closed_t closed;
+    void *closed_baton;
+
+    bool dirty_conn;
+    bool wait_for_connect;
+    bool hit_eof;
+
+    /* A bucket wrapped around our socket (for reading responses). */
+    serf_bucket_t *stream;
+    /* A reference to the aggregate bucket that provides the boundary between
+    * request level buckets and connection level buckets.
+    */
+    serf_bucket_t *ostream_head;
+    serf_bucket_t *ostream_tail;
+
+    /* Aggregate bucket used to send the CONNECT request. */
+    serf_bucket_t *ssltunnel_ostream;
+
+    serf_config_t *config;
 };
 
 /* States for the different stages in the lifecyle of a connection. */
@@ -362,7 +397,7 @@ struct serf_connection_t {
     apr_int16_t seen_in_pollset;
 
     /* are we a dirty connection that needs its poll status updated? */
-    int dirty_conn;
+    bool dirty_conn;
 
     /* number of completed requests we've sent */
     unsigned int completed_requests;
@@ -565,6 +600,7 @@ void serf__context_progress_delta(void *
 /* from incoming.c */
 apr_status_t serf__process_client(serf_incoming_t *l, apr_int16_t events);
 apr_status_t serf__process_listener(serf_listener_t *l);
+apr_status_t serf__incoming_update_pollset(serf_incoming_t *incoming);
 
 /* from outgoing.c */
 apr_status_t serf__open_connections(serf_context_t *ctx);

Modified: serf/trunk/test/test_all.c
URL: 
http://svn.apache.org/viewvc/serf/trunk/test/test_all.c?rev=1714449&r1=1714448&r2=1714449&view=diff
==============================================================================
--- serf/trunk/test/test_all.c (original)
+++ serf/trunk/test/test_all.c Sun Nov 15 12:52:43 2015
@@ -29,11 +29,12 @@ static const struct testlist {
     const char *testname;
     CuSuite *(*func)(void);
 } tests[] = {
-    {"context", test_context},
-    {"buckets", test_buckets},
-    {"ssl",     test_ssl},
-    {"auth",    test_auth},
-    {"internal", test_internal},
+    {"context",     test_context},
+    {"buckets",     test_buckets},
+    {"ssl",         test_ssl},
+    {"auth",        test_auth},
+    {"internal",    test_internal},
+    {"server",      test_server},
 #if 0
     /* internal test for the mock bucket. */
     {"mock",    test_mock_bucket},

Modified: serf/trunk/test/test_serf.h
URL: 
http://svn.apache.org/viewvc/serf/trunk/test/test_serf.h?rev=1714449&r1=1714448&r2=1714449&view=diff
==============================================================================
--- serf/trunk/test/test_serf.h (original)
+++ serf/trunk/test/test_serf.h Sun Nov 15 12:52:43 2015
@@ -60,6 +60,7 @@ CuSuite *test_buckets(void);
 CuSuite *test_ssl(void);
 CuSuite *test_auth(void);
 CuSuite *test_internal(void);
+CuSuite *test_server(void);
 CuSuite *test_mock_bucket(void);
 
 /* Test setup declarations */

Added: serf/trunk/test/test_server.c
URL: 
http://svn.apache.org/viewvc/serf/trunk/test/test_server.c?rev=1714449&view=auto
==============================================================================
--- serf/trunk/test/test_server.c (added)
+++ serf/trunk/test/test_server.c Sun Nov 15 12:52:43 2015
@@ -0,0 +1,171 @@
+/* ====================================================================
+ *    Licensed to the Apache Software Foundation (ASF) under one
+ *    or more contributor license agreements.  See the NOTICE file
+ *    distributed with this work for additional information
+ *    regarding copyright ownership.  The ASF licenses this file
+ *    to you under the Apache License, Version 2.0 (the
+ *    "License"); you may not use this file except in compliance
+ *    with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing,
+ *    software distributed under the License is distributed on an
+ *    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ *    KIND, either express or implied.  See the License for the
+ *    specific language governing permissions and limitations
+ *    under the License.
+ * ====================================================================
+ */
+
+#include <stdlib.h>
+
+#include <apr.h>
+#include <apr_pools.h>
+#include <apr_strings.h>
+#include <apr_version.h>
+
+#include "serf.h"
+
+#include "test_serf.h"
+
+static apr_status_t client_setup(apr_socket_t *skt,
+                                 serf_bucket_t **read_bkt,
+                                 serf_bucket_t **write_bkt,
+                                 void *setup_baton,
+                                 apr_pool_t *pool)
+{
+  test_baton_t *tb = setup_baton;
+
+  *read_bkt = serf_bucket_socket_create(skt, tb->bkt_alloc);
+  return APR_SUCCESS;
+}
+
+static apr_status_t client_closed(serf_incoming_t *client,
+                                  void *closed_baton,
+                                  apr_status_t why,
+                                  apr_pool_t *pool)
+{
+  return APR_ENOTIMPL;
+}
+
+static apr_status_t client_request_acceptor(serf_context_t *ctx,
+                                            serf_incoming_request_t *req,
+                                            void *request_baton,
+                                            apr_pool_t *pool)
+{
+  return APR_ENOTIMPL;
+}
+
+static apr_status_t client_acceptor(serf_context_t *ctx,
+                                    serf_listener_t *l,
+                                    void *accept_baton,
+                                    apr_socket_t *insock,
+                                    apr_pool_t *pool)
+{
+  serf_incoming_t *incoming;
+  test_baton_t *tb = accept_baton;
+
+  return serf_incoming_create2(&incoming, ctx, insock,
+                               client_setup, tb,
+                               client_closed, tb,
+                               client_request_acceptor, tb,
+                               pool);
+}
+
+void setup_test_server(test_baton_t *tb)
+{
+  serf_listener_t *listener;
+  apr_status_t status;
+  apr_port_t listen_port = 47080;
+
+  if (!tb->mh)    /* TODO: move this to test_setup */
+    tb->mh = mhInit();
+
+  tb->context = serf_context_create(tb->pool);
+
+  while ((status = serf_listener_create(&listener, tb->context,
+                                        "localhost", listen_port,
+                                        tb, client_acceptor,
+                                        tb->pool)) != APR_SUCCESS)
+    {
+      listen_port++;
+    }
+
+  tb->serv_port = listen_port;
+  tb->serv_host = apr_psprintf(tb->pool, "%s:%d", "localhost", tb->serv_port);
+  tb->serv_url = apr_psprintf(tb->pool, "http://%s";, tb->serv_host);
+}
+
+static apr_status_t
+run_client_server_loop(test_baton_t *tb,
+                       int num_requests,
+                       handler_baton_t handler_ctx[],
+                       apr_pool_t *pool)
+{
+  apr_pool_t *iter_pool;
+  int i, done = 0;
+  apr_status_t status;
+  apr_time_t finish_time = apr_time_now() + apr_time_from_sec(15);
+
+  apr_pool_create(&iter_pool, pool);
+
+  while (!done)
+  {
+    apr_pool_clear(iter_pool);
+
+
+    /* Even if the mock server returned an error, it may have written
+    something to the client. So process that data first, handle the error
+    later. */
+
+    /* run client event loop */
+    status = serf_context_run(tb->context, 0, iter_pool);
+    if (!APR_STATUS_IS_TIMEUP(status) &&
+        SERF_BUCKET_READ_ERROR(status))
+      return status;
+
+    done = 1;
+    for (i = 0; i < num_requests; i++)
+      done &= handler_ctx[i].done;
+
+    if (!done && (apr_time_now() > finish_time))
+      return APR_ETIMEDOUT;
+  }
+  apr_pool_destroy(iter_pool);
+
+  return APR_SUCCESS;
+}
+
+void test_listen_http(CuTest *tc)
+{
+  test_baton_t *tb = tc->testBaton;
+  apr_status_t status;
+  handler_baton_t handler_ctx[2];
+  const int num_requests = sizeof(handler_ctx) / sizeof(handler_ctx[0]);
+
+  setup_test_server(tb);
+
+  status = setup_test_client_context(tb, NULL, tb->pool);
+  CuAssertIntEquals(tc, APR_SUCCESS, status);
+
+  create_new_request(tb, &handler_ctx[0], "GET", "/", 1);
+  create_new_request(tb, &handler_ctx[1], "GET", "/", 2);
+
+  status = run_client_server_loop(tb, num_requests,
+                                  handler_ctx, tb->pool);
+  CuAssertIntEquals(tc, APR_ENOTIMPL, status);
+}
+
+
+/*****************************************************************************/
+CuSuite *test_server(void)
+{
+  CuSuite *suite = CuSuiteNew();
+
+  CuSuiteSetSetupTeardownCallbacks(suite, test_setup, test_teardown);
+
+  SUITE_ADD_TEST(suite, test_listen_http);
+
+  return suite;
+}

Propchange: serf/trunk/test/test_server.c
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: serf/trunk/test/test_util.c
URL: 
http://svn.apache.org/viewvc/serf/trunk/test/test_util.c?rev=1714449&r1=1714448&r2=1714449&view=diff
==============================================================================
--- serf/trunk/test/test_util.c (original)
+++ serf/trunk/test/test_util.c Sun Nov 15 12:52:43 2015
@@ -420,7 +420,9 @@ setup_test_client_context(test_baton_t *
 {
     apr_status_t status;
 
-    tb->context = serf_context_create(pool);
+    if (!tb->context)
+        tb->context = serf_context_create(pool);
+
     tb->conn_setup = conn_setup ? conn_setup :
                                   default_http_conn_setup;
     status = use_new_connection(tb, pool);


Reply via email to