On Thu, 24 Feb 2011, Nils Carlson wrote:

A finer scheme could be implemented by using system V semaphores
and could look like this:

thread executing tracectl command:
      lock(fork_sem_mutex)
      increment(fork_sem)
      unlock(fork_sem_mutex)
      execute_command()
      decrement(fork_sem)

thread executing fork:
      lock(fork_sem_mutex) // now no new commands can be issued
      wait(fork_sem == 0) // wait for all commands to finish
      fork()
      unlock(fork_sem_mutex) // let commands start again.

I think this would work well, but system V semaphores are sort
of heavy. So maybe somebody can come up with something prettier?

This could probably be done nicer with a rwlock as well or a cond_wait.
I think these alternatives would both be simpler than system v semaphores.

/Nils


This is a little bit of a RFC patch, so please say what you think.

Signed-off-by: Nils Carlson <[email protected]>
---
libust/tracectl.c |   16 +++++++++++++---
1 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/libust/tracectl.c b/libust/tracectl.c
index f720244..fd69da6 100644
--- a/libust/tracectl.c
+++ b/libust/tracectl.c
@@ -71,6 +71,9 @@ static struct cds_list_head open_buffers_list = 
CDS_LIST_HEAD_INIT(open_buffers_

static struct cds_list_head ust_socks = CDS_LIST_HEAD_INIT(ust_socks);

+/* Fork protection mechanism */
+static pthread_mutex_t fork_mutex = PTHREAD_MUTEX_INITIALIZER;
+
/* volatile because shared between the listener and the main thread */
int buffers_to_export = 0;

@@ -1080,6 +1083,7 @@ void *listener_main(void *p)
                }

                for (i = 0; i < nfds; i++) {
+                       pthread_mutex_lock(&fork_mutex);
                        epoll_sock = (struct ustcomm_sock *)events[i].data.ptr;
                        if (epoll_sock == listen_sock) {
                                addr_size = sizeof(struct sockaddr);
@@ -1088,6 +1092,7 @@ void *listener_main(void *p)
                                                   (socklen_t *)&addr_size);
                                if (accept_fd == -1) {
                                        PERROR("listener_main: accept failed");
+                                       pthread_mutex_unlock(&fork_mutex);
                                        continue;
                                }
                                ustcomm_init_sock(accept_fd, epoll_fd,
@@ -1108,6 +1113,7 @@ void *listener_main(void *p)
                                                           epoll_sock->fd);
                                }
                        }
+                       pthread_mutex_unlock(&fork_mutex);
                }
        }

@@ -1578,9 +1584,6 @@ static void ust_fork(void)
        /* Get the pid of the new process */
        processpid = getpid();

-       /* break lock if necessary */
-       ltt_unlock_traces();
-
        /*
         * FIXME: This could be prettier, we loop over the list twice and
         * following good locking practice should lock around the loop
@@ -1657,6 +1660,11 @@ void ust_before_fork(ust_fork_info_t *fork_info)
                - only do this if tracing is active
        */

+       /* Take the fork lock to make sure we are not in the middle of
+        * something in the listener thread.
+        */
+       pthread_mutex_lock(&fork_mutex);
+
        /* Disable signals */
        sigfillset(&all_sigs);
        result = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
@@ -1677,6 +1685,8 @@ static void ust_after_fork_common(ust_fork_info_t 
*fork_info)
                PERROR("sigprocmask");
                return;
        }
+
+       pthread_mutex_unlock(&fork_mutex);
}

void ust_after_fork_parent(ust_fork_info_t *fork_info)
--
1.7.1



_______________________________________________
ltt-dev mailing list
[email protected]
http://lists.casi.polymtl.ca/cgi-bin/mailman/listinfo/ltt-dev

Reply via email to