The branch, master has been updated
       via  c2ca9e0 s3:vfs_default: optimize vfswrap_asys_finished() and read 
as much as we can
       via  719595b lib/tsocket: optimize syscalls in tstream_readv_pdu_send()
       via  e42889f lib/tsocket: disable the syscall optimization for 
recvfrom/readv by default
      from  71e1c08 libads: Always free the talloc_stackframe() on error path

http://gitweb.samba.org/?p=samba.git;a=shortlog;h=master


- Log -----------------------------------------------------------------
commit c2ca9e02106108c024b0daf27325e8eba35437f2
Author: Stefan Metzmacher <[email protected]>
Date:   Fri Nov 2 12:52:51 2012 +0100

    s3:vfs_default: optimize vfswrap_asys_finished() and read as much as we can
    
    Signed-off-by: Stefan Metzmacher <[email protected]>
    
    Autobuild-User(master): Volker Lendecke <[email protected]>
    Autobuild-Date(master): Mon Nov  5 19:01:13 CET 2012 on sn-devel-104

commit 719595b6f7f8745f2608dddb2b86476b9cc2f598
Author: Stefan Metzmacher <[email protected]>
Date:   Fri Nov 2 13:56:53 2012 +0100

    lib/tsocket: optimize syscalls in tstream_readv_pdu_send()
    
    Once we've got the first part of a pdu we try to optimize
    readv calls for the rest of the pdu.
    
    Signed-off-by: Stefan Metzmacher <[email protected]>

commit e42889f83f261e2ac34014649476fae638a6e1f2
Author: Stefan Metzmacher <[email protected]>
Date:   Fri Nov 2 13:45:49 2012 +0100

    lib/tsocket: disable the syscall optimization for recvfrom/readv by default
    
    We only do the optimization on recvfrom/readv if the caller asked for it.
    
    This is needed because in most cases we preferr to flush send
    buffers before receiving incoming requests.
    
    Signed-off-by: Stefan Metzmacher <[email protected]>

-----------------------------------------------------------------------

Summary of changes:
 lib/tsocket/tsocket.h         |   42 +++++++++++++++++++++++
 lib/tsocket/tsocket_bsd.c     |   73 +++++++++++++++++++++++++++++++++++++---
 lib/tsocket/tsocket_helpers.c |   29 ++++++++++++++++
 source3/modules/vfs_default.c |   49 ++++++++++++++++++---------
 4 files changed, 171 insertions(+), 22 deletions(-)


Changeset truncated at 500 lines:

diff --git a/lib/tsocket/tsocket.h b/lib/tsocket/tsocket.h
index 3aca536..98f864e 100644
--- a/lib/tsocket/tsocket.h
+++ b/lib/tsocket/tsocket.h
@@ -627,6 +627,27 @@ int _tsocket_address_unix_from_path(TALLOC_CTX *mem_ctx,
 char *tsocket_address_unix_path(const struct tsocket_address *addr,
                                TALLOC_CTX *mem_ctx);
 
+/**
+ * @brief Request a syscall optimization for tdgram_recvfrom_send()
+ *
+ * This function is only used to reduce the amount of syscalls and
+ * optimize performance. You should only use this if you know
+ * what you're doing.
+ *
+ * The optimization is off by default.
+ *
+ * @param[in]  dgram    The tdgram_context of a bsd socket, if this
+ *                      not a bsd socket the function does nothing.
+ *
+ * @param[in]  on       The boolean value to turn the optimization on and off.
+ *
+ * @return              The old boolean value.
+ *
+ * @see tdgram_recvfrom_send()
+ */
+bool tdgram_bsd_optimize_recvfrom(struct tdgram_context *dgram,
+                                 bool on);
+
 #ifdef DOXYGEN
 /**
  * @brief Create a tdgram_context for a ipv4 or ipv6 UDP communication.
@@ -689,6 +710,27 @@ int _tdgram_unix_socket(const struct tsocket_address 
*local,
 #endif
 
 /**
+ * @brief Request a syscall optimization for tstream_readv_send()
+ *
+ * This function is only used to reduce the amount of syscalls and
+ * optimize performance. You should only use this if you know
+ * what you're doing.
+ *
+ * The optimization is off by default.
+ *
+ * @param[in]  stream   The tstream_context of a bsd socket, if this
+ *                      not a bsd socket the function does nothing.
+ *
+ * @param[in]  on       The boolean value to turn the optimization on and off.
+ *
+ * @return              The old boolean value.
+ *
+ * @see tstream_readv_send()
+ */
+bool tstream_bsd_optimize_readv(struct tstream_context *stream,
+                               bool on);
+
+/**
  * @brief Connect async to a TCP endpoint and create a tstream_context for the
  * stream based communication.
  *
diff --git a/lib/tsocket/tsocket_bsd.c b/lib/tsocket/tsocket_bsd.c
index 135fd02..56dff68 100644
--- a/lib/tsocket/tsocket_bsd.c
+++ b/lib/tsocket/tsocket_bsd.c
@@ -654,6 +654,7 @@ struct tdgram_bsd {
 
        void *event_ptr;
        struct tevent_fd *fde;
+       bool optimize_recvfrom;
 
        void *readable_private;
        void (*readable_handler)(void *private_data);
@@ -661,6 +662,25 @@ struct tdgram_bsd {
        void (*writeable_handler)(void *private_data);
 };
 
+bool tdgram_bsd_optimize_recvfrom(struct tdgram_context *dgram,
+                                 bool on)
+{
+       struct tdgram_bsd *bsds =
+               talloc_get_type(_tdgram_context_data(dgram),
+               struct tdgram_bsd);
+       bool old;
+
+       if (bsds == NULL) {
+               /* not a bsd socket */
+               return false;
+       }
+
+       old = bsds->optimize_recvfrom;
+       bsds->optimize_recvfrom = on;
+
+       return old;
+}
+
 static void tdgram_bsd_fde_handler(struct tevent_context *ev,
                                   struct tevent_fd *fde,
                                   uint16_t flags,
@@ -838,14 +858,25 @@ static struct tevent_req 
*tdgram_bsd_recvfrom_send(TALLOC_CTX *mem_ctx,
                goto post;
        }
 
+
        /*
         * this is a fast path, not waiting for the
         * socket to become explicit readable gains
         * about 10%-20% performance in benchmark tests.
         */
-       tdgram_bsd_recvfrom_handler(req);
-       if (!tevent_req_is_in_progress(req)) {
-               goto post;
+       if (bsds->optimize_recvfrom) {
+               /*
+                * We only do the optimization on
+                * recvfrom if the caller asked for it.
+                *
+                * This is needed because in most cases
+                * we preferr to flush send buffers before
+                * receiving incoming requests.
+                */
+               tdgram_bsd_recvfrom_handler(req);
+               if (!tevent_req_is_in_progress(req)) {
+                       goto post;
+               }
        }
 
        ret = tdgram_bsd_set_readable_handler(bsds, ev,
@@ -1405,6 +1436,7 @@ struct tstream_bsd {
 
        void *event_ptr;
        struct tevent_fd *fde;
+       bool optimize_readv;
 
        void *readable_private;
        void (*readable_handler)(void *private_data);
@@ -1412,6 +1444,25 @@ struct tstream_bsd {
        void (*writeable_handler)(void *private_data);
 };
 
+bool tstream_bsd_optimize_readv(struct tstream_context *stream,
+                               bool on)
+{
+       struct tstream_bsd *bsds =
+               talloc_get_type(_tstream_context_data(stream),
+               struct tstream_bsd);
+       bool old;
+
+       if (bsds == NULL) {
+               /* not a bsd socket */
+               return false;
+       }
+
+       old = bsds->optimize_readv;
+       bsds->optimize_readv = on;
+
+       return old;
+}
+
 static void tstream_bsd_fde_handler(struct tevent_context *ev,
                                    struct tevent_fd *fde,
                                    uint16_t flags,
@@ -1624,9 +1675,19 @@ static struct tevent_req 
*tstream_bsd_readv_send(TALLOC_CTX *mem_ctx,
         * socket to become explicit readable gains
         * about 10%-20% performance in benchmark tests.
         */
-       tstream_bsd_readv_handler(req);
-       if (!tevent_req_is_in_progress(req)) {
-               goto post;
+       if (bsds->optimize_readv) {
+               /*
+                * We only do the optimization on
+                * readv if the caller asked for it.
+                *
+                * This is needed because in most cases
+                * we preferr to flush send buffers before
+                * receiving incoming requests.
+                */
+               tstream_bsd_readv_handler(req);
+               if (!tevent_req_is_in_progress(req)) {
+                       goto post;
+               }
        }
 
        ret = tstream_bsd_set_readable_handler(bsds, ev,
diff --git a/lib/tsocket/tsocket_helpers.c b/lib/tsocket/tsocket_helpers.c
index 1b92b9f..49c6840 100644
--- a/lib/tsocket/tsocket_helpers.c
+++ b/lib/tsocket/tsocket_helpers.c
@@ -215,6 +215,20 @@ static void tstream_readv_pdu_ask_for_next_vector(struct 
tevent_req *req)
        size_t to_read = 0;
        size_t i;
        struct tevent_req *subreq;
+       bool optimize = false;
+       bool save_optimize = false;
+
+       if (state->count > 0) {
+               /*
+                * This is not the first time we asked for a vector,
+                * which means parts of the pdu already arrived.
+                *
+                * In this case it make sense to enable
+                * a syscall/performance optimization if the
+                * low level tstream implementation supports it.
+                */
+               optimize = true;
+       }
 
        TALLOC_FREE(state->vector);
        state->count = 0;
@@ -258,11 +272,26 @@ static void tstream_readv_pdu_ask_for_next_vector(struct 
tevent_req *req)
                return;
        }
 
+       if (optimize) {
+               /*
+                * If the low level stream is a bsd socket
+                * we will get syscall optimization.
+                *
+                * If it is not a bsd socket
+                * tstream_bsd_optimize_readv() just returns.
+                */
+               save_optimize = tstream_bsd_optimize_readv(state->caller.stream,
+                                                          true);
+       }
        subreq = tstream_readv_send(state,
                                    state->caller.ev,
                                    state->caller.stream,
                                    state->vector,
                                    state->count);
+       if (optimize) {
+               tstream_bsd_optimize_readv(state->caller.stream,
+                                          save_optimize);
+       }
        if (tevent_req_nomem(subreq, req)) {
                return;
        }
diff --git a/source3/modules/vfs_default.c b/source3/modules/vfs_default.c
index 8e980e0..0f651dc 100644
--- a/source3/modules/vfs_default.c
+++ b/source3/modules/vfs_default.c
@@ -637,6 +637,7 @@ static void vfswrap_asys_finished(struct tevent_context *ev,
 static bool vfswrap_init_asys_ctx(struct smbXsrv_connection *conn)
 {
        int ret;
+       int fd;
 
        if (conn->asys_ctx != NULL) {
                return true;
@@ -646,8 +647,12 @@ static bool vfswrap_init_asys_ctx(struct 
smbXsrv_connection *conn)
                DEBUG(1, ("asys_context_init failed: %s\n", strerror(ret)));
                return false;
        }
-       conn->asys_fde = tevent_add_fd(conn->ev_ctx, conn,
-                                      asys_signalfd(conn->asys_ctx),
+
+       fd = asys_signalfd(conn->asys_ctx);
+
+       set_blocking(fd, false);
+
+       conn->asys_fde = tevent_add_fd(conn->ev_ctx, conn, fd,
                                       TEVENT_FD_READ,
                                       vfswrap_asys_finished,
                                       conn->asys_ctx);
@@ -783,24 +788,36 @@ static void vfswrap_asys_finished(struct tevent_context 
*ev,
                return;
        }
 
-       res = asys_result(asys_ctx, &ret, &err, &private_data);
-       if (res == ECANCELED) {
-               return;
-       }
+       while (true) {
+               res = asys_result(asys_ctx, &ret, &err, &private_data);
+               if (res == EINTR || res == EAGAIN) {
+                       return;
+               }
+#ifdef EWOULDBLOCK
+               if (res == EWOULDBLOCK) {
+                       return;
+               }
+#endif
 
-       if (res != 0) {
-               DEBUG(1, ("asys_result returned %s\n", strerror(res)));
-               return;
-       }
+               if (res == ECANCELED) {
+                       return;
+               }
 
-       req = talloc_get_type_abort(private_data, struct tevent_req);
-       state = tevent_req_data(req, struct vfswrap_asys_state);
+               if (res != 0) {
+                       DEBUG(1, ("asys_result returned %s\n", strerror(res)));
+                       return;
+               }
+
+               req = talloc_get_type_abort(private_data, struct tevent_req);
+               state = tevent_req_data(req, struct vfswrap_asys_state);
 
-       talloc_set_destructor(state, NULL);
+               talloc_set_destructor(state, NULL);
 
-       state->ret = ret;
-       state->err = err;
-       tevent_req_done(req);
+               state->ret = ret;
+               state->err = err;
+               tevent_req_defer_callback(req, ev);
+               tevent_req_done(req);
+       }
 }
 
 static ssize_t vfswrap_asys_ssize_t_recv(struct tevent_req *req, int *err)


-- 
Samba Shared Repository

Reply via email to