The branch, master has been updated
       via  7c71520 auth/gensec_gssapi: gss_krb5_lucid_context_v1_t is not 
shared with the gse code anymore
       via  ebcfa61 s4:librpc/rpc: avoid an unused talloc_reference() from 
dcerpc_request_send()
       via  bb75905 s4:librpc/rpc: pass a mem_ctx to dcerpc_request_send()
       via  cd1d5a2 s4:librpc/rpc: use tevent_req_defer_callback() in 
dcerpc_alter_context_*
       via  6d631e5 s4:librpc/rpc: use tevent_req_defer_callback() in 
dcerpc_bind_*
       via  198c5ac s4:librpc/rpc: convert dcerpc_alter_context_send/recv to 
tevent_req
       via  6b81d71 s4:librpc/rpc: convert dcerpc_bind_send/recv to tevent_req
       via  946eca4 s4:librpc/rpc: also notify requests which are not shipped 
about a dead connection
       via  080549f s4:librpc/rpc: ship requests via an immediate event
       via  553a892 s4:librpc/rpc: if the connecion is dead we're done
       via  6949a79 s4:librpc/rpc: if the connection is dead we don't want to 
be called recursivly.
      from  e64b118 s3: Update waf build to include missed dependancy on Lion.

http://gitweb.samba.org/?p=samba.git;a=shortlog;h=master


- Log -----------------------------------------------------------------
commit 7c715207ad123db53b496444a148fb56438dd7b1
Author: Stefan Metzmacher <[email protected]>
Date:   Fri Mar 2 22:01:48 2012 +0100

    auth/gensec_gssapi: gss_krb5_lucid_context_v1_t is not shared with the gse 
code anymore
    
    metze
    
    Autobuild-User: Stefan Metzmacher <[email protected]>
    Autobuild-Date: Thu Mar 15 09:16:16 CET 2012 on sn-devel-104

commit ebcfa61d9f712db8400acd722dfc43c07021c9b0
Author: Stefan Metzmacher <[email protected]>
Date:   Tue Mar 13 15:18:54 2012 +0100

    s4:librpc/rpc: avoid an unused talloc_reference() from dcerpc_request_send()
    
    metze

commit bb75905b6a6ef48a45039826a7efa0d90e4edbc8
Author: Stefan Metzmacher <[email protected]>
Date:   Tue Mar 13 15:18:08 2012 +0100

    s4:librpc/rpc: pass a mem_ctx to dcerpc_request_send()
    
    metze

commit cd1d5a29d4cf810acbdc79f8824c85e608eae129
Author: Stefan Metzmacher <[email protected]>
Date:   Tue Mar 13 15:41:16 2012 +0100

    s4:librpc/rpc: use tevent_req_defer_callback() in dcerpc_alter_context_*
    
    metze

commit 6d631e52cf53bb1b66e8a1f4ed7f5091014aa359
Author: Stefan Metzmacher <[email protected]>
Date:   Tue Mar 13 15:14:33 2012 +0100

    s4:librpc/rpc: use tevent_req_defer_callback() in dcerpc_bind_*
    
    metze

commit 198c5ace6f9a7d1b1f99473168b0b5b1d191f355
Author: Stefan Metzmacher <[email protected]>
Date:   Tue Mar 13 15:37:49 2012 +0100

    s4:librpc/rpc: convert dcerpc_alter_context_send/recv to tevent_req
    
    Many thanks to Andrew Bartlett <[email protected]> for the
    debugging, which lead to the following line:
    
           talloc_steal(state, raw_packet->data);
    
    metze

commit 6b81d71f3e0ae89bf1352ea469446a902613de01
Author: Stefan Metzmacher <[email protected]>
Date:   Tue Mar 13 14:52:40 2012 +0100

    s4:librpc/rpc: convert dcerpc_bind_send/recv to tevent_req
    
    Many thanks to Andrew Bartlett <[email protected]> for the
    debugging, which lead to the following line:
    
           talloc_steal(state, raw_packet->data);
    
    metze

commit 946eca438d0e0beb34e66f031f64acd44afea074
Author: Stefan Metzmacher <[email protected]>
Date:   Wed Mar 14 14:59:45 2012 +0100

    s4:librpc/rpc: also notify requests which are not shipped about a dead 
connection
    
    metze

commit 080549f4675484d0de16c5bfae162513f13fcab6
Author: Stefan Metzmacher <[email protected]>
Date:   Wed Mar 14 14:57:32 2012 +0100

    s4:librpc/rpc: ship requests via an immediate event
    
    Deep inside dcerpc_ship_next_request() some code path
    could trigger dcerpc_connection_dead(), which means
    it's not safe to do any processing after calling dcerpc_ship_next_request().
    
    metze

commit 553a8921a453d839b87b941719b1ad44c694904f
Author: Stefan Metzmacher <[email protected]>
Date:   Wed Mar 14 14:25:48 2012 +0100

    s4:librpc/rpc: if the connecion is dead we're done
    
    There's no point in doing anymore processing,
    it can just lead to crashes.
    
    metze

commit 6949a7908729bbbac50a5b59e404224264b5f184
Author: Stefan Metzmacher <[email protected]>
Date:   Wed Mar 14 14:24:54 2012 +0100

    s4:librpc/rpc: if the connection is dead we don't want to be called 
recursivly.
    
    metze

-----------------------------------------------------------------------

Summary of changes:
 auth/gensec/gensec_gssapi.h      |    2 +-
 source4/librpc/rpc/dcerpc.c      |  629 +++++++++++++++++++++++++-------------
 source4/librpc/rpc/dcerpc.h      |    3 +
 source4/librpc/rpc/dcerpc_auth.c |   74 +++--
 4 files changed, 467 insertions(+), 241 deletions(-)


Changeset truncated at 500 lines:

diff --git a/auth/gensec/gensec_gssapi.h b/auth/gensec/gensec_gssapi.h
index 96389b2..694ca6b 100644
--- a/auth/gensec/gensec_gssapi.h
+++ b/auth/gensec/gensec_gssapi.h
@@ -43,9 +43,9 @@ struct gensec_gssapi_state {
 
        gss_cred_id_t delegated_cred_handle;
 
-       gss_krb5_lucid_context_v1_t *lucid;
 
        /* gensec_gssapi only */
+       gss_krb5_lucid_context_v1_t *lucid;
        gss_OID gss_oid;
 
        struct gss_channel_bindings_struct *input_chan_bindings;
diff --git a/source4/librpc/rpc/dcerpc.c b/source4/librpc/rpc/dcerpc.c
index 599ad78..ebf6f33 100644
--- a/source4/librpc/rpc/dcerpc.c
+++ b/source4/librpc/rpc/dcerpc.c
@@ -27,7 +27,6 @@
 #include "librpc/rpc/dcerpc_proto.h"
 #include "librpc/gen_ndr/ndr_misc.h"
 #include "librpc/gen_ndr/ndr_dcerpc.h"
-#include "libcli/composite/composite.h"
 #include "auth/gensec/gensec.h"
 #include "param/param.h"
 #include "lib/util/tevent_ntstatus.h"
@@ -82,9 +81,10 @@ _PUBLIC_ NTSTATUS dcerpc_init(void)
 }
 
 static void dcerpc_connection_dead(struct dcecli_connection *conn, NTSTATUS 
status);
-static void dcerpc_ship_next_request(struct dcecli_connection *c);
+static void dcerpc_schedule_io_trigger(struct dcecli_connection *c);
 
-static struct rpc_request *dcerpc_request_send(struct dcerpc_pipe *p,
+static struct rpc_request *dcerpc_request_send(TALLOC_CTX *mem_ctx,
+                                              struct dcerpc_pipe *p,
                                               const struct GUID *object,
                                               uint16_t opnum,
                                               DATA_BLOB *stub_data);
@@ -147,6 +147,12 @@ static struct dcecli_connection 
*dcerpc_connection_init(TALLOC_CTX *mem_ctx,
        c->srv_max_recv_frag = 0;
        c->pending = NULL;
 
+       c->io_trigger = tevent_create_immediate(c);
+       if (c->io_trigger == NULL) {
+               talloc_free(c);
+               return NULL;
+       }
+
        talloc_set_destructor(c, dcerpc_connection_destructor);
 
        return c;
@@ -235,7 +241,8 @@ static struct tevent_req 
*dcerpc_bh_raw_call_send(TALLOC_CTX *mem_ctx,
                return tevent_req_post(req, ev);
        }
 
-       subreq = dcerpc_request_send(hs->p,
+       subreq = dcerpc_request_send(state,
+                                    hs->p,
                                     object,
                                     opnum,
                                     &state->in_data);
@@ -952,16 +959,6 @@ static NTSTATUS dcerpc_map_reason(uint16_t reason)
 }
 
 /*
-  a bind or alter context has failed
-*/
-static void dcerpc_composite_fail(struct rpc_request *req)
-{
-       struct composite_context *c = talloc_get_type(req->async.private_data, 
-                                                     struct composite_context);
-       composite_error(c, req->status);
-}
-
-/*
   remove requests from the pending or queued queues
  */
 static int dcerpc_req_dequeue(struct rpc_request *req)
@@ -989,6 +986,11 @@ static void dcerpc_connection_dead(struct 
dcecli_connection *conn, NTSTATUS stat
 
        conn->dead = true;
 
+       TALLOC_FREE(conn->io_trigger);
+       conn->io_trigger_pending = false;
+
+       conn->transport.recv_data = NULL;
+
        if (conn->transport.shutdown_pipe) {
                conn->transport.shutdown_pipe(conn, status);
        }
@@ -1004,6 +1006,17 @@ static void dcerpc_connection_dead(struct 
dcecli_connection *conn, NTSTATUS stat
                }
        }       
 
+       /* all requests, which are not shipped */
+       while (conn->request_queue) {
+               struct rpc_request *req = conn->request_queue;
+               dcerpc_req_dequeue(req);
+               req->state = RPC_REQUEST_DONE;
+               req->status = status;
+               if (req->async.callback) {
+                       req->async.callback(req);
+               }
+       }
+
        talloc_set_destructor(conn, NULL);
        if (conn->free_skipped) {
                talloc_free(conn);
@@ -1043,68 +1056,10 @@ static void dcerpc_recv_data(struct dcecli_connection 
*conn, DATA_BLOB *blob, NT
        if (!NT_STATUS_IS_OK(status)) {
                data_blob_free(blob);
                dcerpc_connection_dead(conn, status);
-       }
-
-       dcerpc_request_recv_data(conn, blob, &pkt);
-}
-
-/*
-  Receive a bind reply from the transport
-*/
-static void dcerpc_bind_recv_handler(struct rpc_request *req, 
-                                    DATA_BLOB *raw_packet, struct ncacn_packet 
*pkt)
-{
-       struct composite_context *c;
-       struct dcecli_connection *conn;
-
-       c = talloc_get_type(req->async.private_data, struct composite_context);
-
-       if (pkt->ptype == DCERPC_PKT_BIND_NAK) {
-               DEBUG(2,("dcerpc: bind_nak reason %d\n",
-                        pkt->u.bind_nak.reject_reason));
-               composite_error(c, dcerpc_map_reason(pkt->u.bind_nak.
-                                                    reject_reason));
-               return;
-       }
-
-       if ((pkt->ptype != DCERPC_PKT_BIND_ACK) ||
-           (pkt->u.bind_ack.num_results == 0) ||
-           (pkt->u.bind_ack.ctx_list[0].result != 0)) {
-               req->p->last_fault_code = DCERPC_NCA_S_PROTO_ERROR;
-               composite_error(c, NT_STATUS_NET_WRITE_FAULT);
                return;
        }
 
-       conn = req->p->conn;
-
-       conn->srv_max_xmit_frag = pkt->u.bind_ack.max_xmit_frag;
-       conn->srv_max_recv_frag = pkt->u.bind_ack.max_recv_frag;
-
-       if ((req->p->binding->flags & DCERPC_CONCURRENT_MULTIPLEX) &&
-           (pkt->pfc_flags & DCERPC_PFC_FLAG_CONC_MPX)) {
-               conn->flags |= DCERPC_CONCURRENT_MULTIPLEX;
-       }
-
-       if ((req->p->binding->flags & DCERPC_HEADER_SIGNING) &&
-           (pkt->pfc_flags & DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN)) {
-               conn->flags |= DCERPC_HEADER_SIGNING;
-       }
-
-       /* the bind_ack might contain a reply set of credentials */
-       if (conn->security_state.auth_info && pkt->u.bind_ack.auth_info.length) 
{
-               NTSTATUS status;
-               uint32_t auth_length;
-               status = dcerpc_pull_auth_trailer(pkt, conn, 
&pkt->u.bind_ack.auth_info,
-                                                 
conn->security_state.auth_info, &auth_length, true);
-               if (!NT_STATUS_IS_OK(status)) {
-                       composite_error(c, status);
-                       return;
-               }
-       }
-
-       req->p->assoc_group_id = pkt->u.bind_ack.assoc_group_id;
-
-       composite_done(c);
+       dcerpc_request_recv_data(conn, blob, &pkt);
 }
 
 /*
@@ -1128,23 +1083,37 @@ static void dcerpc_timeout_handler(struct 
tevent_context *ev, struct tevent_time
        dcerpc_connection_dead(req->p->conn, NT_STATUS_IO_TIMEOUT);
 }
 
-/*
-  send a async dcerpc bind request
-*/
-struct composite_context *dcerpc_bind_send(struct dcerpc_pipe *p,
-                                          TALLOC_CTX *mem_ctx,
-                                          const struct ndr_syntax_id *syntax,
-                                          const struct ndr_syntax_id 
*transfer_syntax)
+struct dcerpc_bind_state {
+       struct tevent_context *ev;
+       struct dcerpc_pipe *p;
+};
+
+static void dcerpc_bind_fail_handler(struct rpc_request *subreq);
+static void dcerpc_bind_recv_handler(struct rpc_request *subreq,
+                                    DATA_BLOB *raw_packet,
+                                    struct ncacn_packet *pkt);
+
+struct tevent_req *dcerpc_bind_send(TALLOC_CTX *mem_ctx,
+                                   struct tevent_context *ev,
+                                   struct dcerpc_pipe *p,
+                                   const struct ndr_syntax_id *syntax,
+                                   const struct ndr_syntax_id *transfer_syntax)
 {
-       struct composite_context *c;
+       struct tevent_req *req;
+       struct dcerpc_bind_state *state;
        struct ncacn_packet pkt;
        DATA_BLOB blob;
-       struct rpc_request *req;
+       NTSTATUS status;
+       struct rpc_request *subreq;
 
-       c = composite_create(mem_ctx,p->conn->event_ctx);
-       if (c == NULL) return NULL;
+       req = tevent_req_create(mem_ctx, &state,
+                               struct dcerpc_bind_state);
+       if (req == NULL) {
+               return NULL;
+       }
 
-       c->private_data = p;
+       state->ev = ev;
+       state->p = p;
 
        p->syntax = *syntax;
        p->transfer_syntax = *transfer_syntax;
@@ -1169,7 +1138,9 @@ struct composite_context *dcerpc_bind_send(struct 
dcerpc_pipe *p,
        pkt.u.bind.assoc_group_id = p->binding->assoc_group_id;
        pkt.u.bind.num_contexts = 1;
        pkt.u.bind.ctx_list = talloc_array(mem_ctx, struct dcerpc_ctx_list, 1);
-       if (composite_nomem(pkt.u.bind.ctx_list, c)) return c;
+       if (tevent_req_nomem(pkt.u.bind.ctx_list, req)) {
+               return tevent_req_post(req, ev);
+       }
        pkt.u.bind.ctx_list[0].context_id = p->context_id;
        pkt.u.bind.ctx_list[0].num_transfer_syntaxes = 1;
        pkt.u.bind.ctx_list[0].abstract_syntax = p->syntax;
@@ -1177,9 +1148,11 @@ struct composite_context *dcerpc_bind_send(struct 
dcerpc_pipe *p,
        pkt.u.bind.auth_info = data_blob(NULL, 0);
 
        /* construct the NDR form of the packet */
-       c->status = ncacn_push_auth(&blob, c, &pkt,
-                                   p->conn->security_state.auth_info);
-       if (!composite_is_ok(c)) return c;
+       status = ncacn_push_auth(&blob, state, &pkt,
+                                p->conn->security_state.auth_info);
+       if (tevent_req_nterror(req, status)) {
+               return tevent_req_post(req, ev);
+       }
 
        p->conn->transport.recv_data = dcerpc_recv_data;
 
@@ -1187,37 +1160,141 @@ struct composite_context *dcerpc_bind_send(struct 
dcerpc_pipe *p,
         * we allocate a dcerpc_request so we can be in the same
         * request queue as normal requests
         */
-       req = talloc_zero(c, struct rpc_request);
-       if (composite_nomem(req, c)) return c;
+       subreq = talloc_zero(state, struct rpc_request);
+       if (tevent_req_nomem(subreq, req)) {
+               return tevent_req_post(req, ev);
+       }
 
-       req->state = RPC_REQUEST_PENDING;
-       req->call_id = pkt.call_id;
-       req->async.private_data = c;
-       req->async.callback = dcerpc_composite_fail;
-       req->p = p;
-       req->recv_handler = dcerpc_bind_recv_handler;
-       DLIST_ADD_END(p->conn->pending, req, struct rpc_request *);
-       talloc_set_destructor(req, dcerpc_req_dequeue);
+       subreq->state = RPC_REQUEST_PENDING;
+       subreq->call_id = pkt.call_id;
+       subreq->async.private_data = req;
+       subreq->async.callback = dcerpc_bind_fail_handler;
+       subreq->p = p;
+       subreq->recv_handler = dcerpc_bind_recv_handler;
+       DLIST_ADD_END(p->conn->pending, subreq, struct rpc_request *);
+       talloc_set_destructor(subreq, dcerpc_req_dequeue);
+
+       status = p->conn->transport.send_request(p->conn, &blob, true);
+       if (tevent_req_nterror(req, status)) {
+               return tevent_req_post(req, ev);
+       }
 
-       c->status = p->conn->transport.send_request(p->conn, &blob,
-                                                   true);
-       if (!composite_is_ok(c)) return c;
+       tevent_add_timer(ev, subreq,
+                        timeval_current_ofs(DCERPC_REQUEST_TIMEOUT, 0),
+                        dcerpc_timeout_handler, subreq);
 
-       tevent_add_timer(c->event_ctx, req,
-                       timeval_current_ofs(DCERPC_REQUEST_TIMEOUT, 0),
-                       dcerpc_timeout_handler, req);
+       return req;
+}
 
-       return c;
+static void dcerpc_bind_fail_handler(struct rpc_request *subreq)
+{
+       struct tevent_req *req =
+               talloc_get_type_abort(subreq->async.private_data,
+               struct tevent_req);
+       struct dcerpc_bind_state *state =
+               tevent_req_data(req,
+               struct dcerpc_bind_state);
+       NTSTATUS status = subreq->status;
+
+       TALLOC_FREE(subreq);
+
+       /*
+        * We trigger the callback in the next event run
+        * because the code in this file might trigger
+        * multiple request callbacks from within a single
+        * while loop.
+        *
+        * In order to avoid segfaults from within
+        * dcerpc_connection_dead() we call
+        * tevent_req_defer_callback().
+        */
+       tevent_req_defer_callback(req, state->ev);
+
+       tevent_req_nterror(req, status);
 }
 
-/*
-  recv side of async dcerpc bind request
-*/
-NTSTATUS dcerpc_bind_recv(struct composite_context *ctx)
+static void dcerpc_bind_recv_handler(struct rpc_request *subreq,
+                                    DATA_BLOB *raw_packet,
+                                    struct ncacn_packet *pkt)
 {
-       NTSTATUS result = composite_wait(ctx);
-       talloc_free(ctx);
-       return result;
+       struct tevent_req *req =
+               talloc_get_type_abort(subreq->async.private_data,
+               struct tevent_req);
+       struct dcerpc_bind_state *state =
+               tevent_req_data(req,
+               struct dcerpc_bind_state);
+       struct dcecli_connection *conn = state->p->conn;
+       NTSTATUS status;
+
+       /*
+        * Note that pkt is allocated under raw_packet->data,
+        * while raw_packet->data is a child of subreq.
+        */
+       talloc_steal(state, raw_packet->data);
+       TALLOC_FREE(subreq);
+
+       /*
+        * We trigger the callback in the next event run
+        * because the code in this file might trigger
+        * multiple request callbacks from within a single
+        * while loop.
+        *
+        * In order to avoid segfaults from within
+        * dcerpc_connection_dead() we call
+        * tevent_req_defer_callback().
+        */
+       tevent_req_defer_callback(req, state->ev);
+
+       if (pkt->ptype == DCERPC_PKT_BIND_NAK) {
+               status = dcerpc_map_reason(pkt->u.bind_nak.reject_reason);
+
+               DEBUG(2,("dcerpc: bind_nak reason %d - %s\n",
+                        pkt->u.bind_nak.reject_reason, nt_errstr(status)));
+
+               tevent_req_nterror(req, status);
+               return;
+       }
+
+       if ((pkt->ptype != DCERPC_PKT_BIND_ACK) ||
+           (pkt->u.bind_ack.num_results == 0) ||
+           (pkt->u.bind_ack.ctx_list[0].result != 0)) {
+               state->p->last_fault_code = DCERPC_NCA_S_PROTO_ERROR;
+               tevent_req_nterror(req, NT_STATUS_NET_WRITE_FAULT);
+               return;
+       }
+
+       conn->srv_max_xmit_frag = pkt->u.bind_ack.max_xmit_frag;
+       conn->srv_max_recv_frag = pkt->u.bind_ack.max_recv_frag;
+
+       if ((state->p->binding->flags & DCERPC_CONCURRENT_MULTIPLEX) &&
+           (pkt->pfc_flags & DCERPC_PFC_FLAG_CONC_MPX)) {
+               conn->flags |= DCERPC_CONCURRENT_MULTIPLEX;
+       }
+
+       if ((state->p->binding->flags & DCERPC_HEADER_SIGNING) &&
+           (pkt->pfc_flags & DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN)) {
+               conn->flags |= DCERPC_HEADER_SIGNING;
+       }
+
+       /* the bind_ack might contain a reply set of credentials */
+       if (conn->security_state.auth_info && pkt->u.bind_ack.auth_info.length) 
{
+               uint32_t auth_length;
+
+               status = dcerpc_pull_auth_trailer(pkt, conn, 
&pkt->u.bind_ack.auth_info,
+                                                 
conn->security_state.auth_info, &auth_length, true);
+               if (tevent_req_nterror(req, status)) {
+                       return;
+               }
+       }
+
+       state->p->assoc_group_id = pkt->u.bind_ack.assoc_group_id;
+
+       tevent_req_done(req);
+}
+
+NTSTATUS dcerpc_bind_recv(struct tevent_req *req)
+{
+       return tevent_req_simple_recv_ntstatus(req);
 }
 
 /* 
@@ -1373,11 +1450,11 @@ req_done:
        req->state = RPC_REQUEST_DONE;
        DLIST_REMOVE(c->pending, req);
 
-       if (c->request_queue != NULL) {
-               /* We have to look at shipping further requests before calling
-                * the async function, that one might close the pipe */
-               dcerpc_ship_next_request(c);
-       }
+       /*
+        * We have to look at shipping further requests before calling
+        * the async function, that one might close the pipe
+        */
+       dcerpc_schedule_io_trigger(c);
 
        if (req->async.callback) {
                req->async.callback(req);
@@ -1387,7 +1464,8 @@ req_done:
 /*
   perform the send side of a async dcerpc request
 */
-static struct rpc_request *dcerpc_request_send(struct dcerpc_pipe *p, 
+static struct rpc_request *dcerpc_request_send(TALLOC_CTX *mem_ctx,
+                                              struct dcerpc_pipe *p,
                                               const struct GUID *object,
                                               uint16_t opnum,
                                               DATA_BLOB *stub_data)
@@ -1396,7 +1474,7 @@ static struct rpc_request *dcerpc_request_send(struct 
dcerpc_pipe *p,
 
        p->conn->transport.recv_data = dcerpc_recv_data;
 
-       req = talloc(p, struct rpc_request);
+       req = talloc(mem_ctx, struct rpc_request);
        if (req == NULL) {
                return NULL;
        }
@@ -1425,15 +1503,12 @@ static struct rpc_request *dcerpc_request_send(struct 
dcerpc_pipe *p,
 
        req->opnum = opnum;
        req->request_data.length = stub_data->length;
-       req->request_data.data = talloc_reference(req, stub_data->data);
-       if (req->request_data.length && req->request_data.data == NULL) {
-               return NULL;
-       }
+       req->request_data.data = stub_data->data;
 
        DLIST_ADD_END(p->conn->request_queue, req, struct rpc_request *);
        talloc_set_destructor(req, dcerpc_req_dequeue);
 
-       dcerpc_ship_next_request(p->conn);
+       dcerpc_schedule_io_trigger(p->conn);
 
        if (p->request_timeout) {
                tevent_add_timer(dcerpc_event_context(p), req,
@@ -1561,6 +1636,43 @@ static void dcerpc_ship_next_request(struct 
dcecli_connection *c)
        }
 }
 
+static void dcerpc_io_trigger(struct tevent_context *ctx,
+                             struct tevent_immediate *im,
+                             void *private_data)
+{
+       struct dcecli_connection *c =
+               talloc_get_type_abort(private_data,
+               struct dcecli_connection);
+
+       c->io_trigger_pending = false;
+
+       dcerpc_schedule_io_trigger(c);
+
+       dcerpc_ship_next_request(c);
+}
+
+static void dcerpc_schedule_io_trigger(struct dcecli_connection *c)
+{
+       if (c->dead) {
+               return;
+       }


-- 
Samba Shared Repository

Reply via email to