This is an unmodified resent of my previously posted patch to make
/proc/xenomai/{sched,stat} O(1) wrt nklock. It also solve a potential race 
during
thread cleanup when accessing the dying thread's name.

Jan


---
 include/nucleus/pod.h |    1 
 ksrc/nucleus/module.c |   94 +++++++++++++++++++++++++++++++++-----------------
 ksrc/nucleus/pod.c    |    2 +
 3 files changed, 66 insertions(+), 31 deletions(-)

Index: xenomai/include/nucleus/pod.h
===================================================================
--- xenomai.orig/include/nucleus/pod.h
+++ xenomai/include/nucleus/pod.h
@@ -195,6 +195,7 @@ struct xnpod {
     xnsched_t sched[XNARCH_NR_CPUS]; /*!< Per-cpu scheduler slots. */
 
     xnqueue_t threadq;          /*!< All existing threads. */
+    int threadq_rev;            /*!< Modification counter of threadq. */
 
     volatile u_long schedlck;  /*!< Scheduler lock count. */
 
Index: xenomai/ksrc/nucleus/module.c
===================================================================
--- xenomai.orig/ksrc/nucleus/module.c
+++ xenomai/ksrc/nucleus/module.c
@@ -99,7 +99,7 @@ struct sched_seq_iterator {
        struct sched_seq_info {
                int cpu;
                pid_t pid;
-               const char *name;
+               char name[XNOBJECT_NAME_LEN];
                int cprio;
                xnticks_t timeout;
                xnflags_t status;
@@ -177,17 +177,26 @@ static struct seq_operations sched_op = 
 
 static int sched_seq_open(struct inode *inode, struct file *file)
 {
-       struct sched_seq_iterator *iter;
+       struct sched_seq_iterator *iter = NULL;
        struct seq_file *seq;
        xnholder_t *holder;
-       int err, count;
+       int err, count, rev;
        spl_t s;
 
        if (!nkpod)
                return -ESRCH;
 
+      restart:
+       xnlock_get_irqsave(&nklock, s);
+
+       rev = nkpod->threadq_rev;
        count = countq(&nkpod->threadq);        /* Cannot be empty (ROOT) */
+       holder = getheadq(&nkpod->threadq);
+
+       xnlock_put_irqrestore(&nklock, s);
 
+       if (iter)
+               kfree(iter);
        iter = kmalloc(sizeof(*iter)
                       + (count - 1) * sizeof(struct sched_seq_info),
                       GFP_KERNEL);
@@ -202,31 +211,37 @@ static int sched_seq_open(struct inode *
        }
 
        iter->nentries = 0;
+       iter->start_time = xntimer_get_jiffies();
 
-       /* Take a snapshot and release the nucleus lock immediately after,
-          so that dumping /proc/xenomai/sched with lots of entries won't
-          cause massive jittery. */
+       /* Take a snapshot element-wise, restart if something changes
+          underneath us. */
 
-       xnlock_get_irqsave(&nklock, s);
+       while (holder) {
+               xnthread_t *thread;
+               int n;
 
-       iter->start_time = xntimer_get_jiffies();
+               xnlock_get_irqsave(&nklock, s);
+
+               if (nkpod->threadq_rev != rev)
+                       goto restart;
+               rev = nkpod->threadq_rev;
 
-       for (holder = getheadq(&nkpod->threadq);
-            holder && count > 0;
-            holder = nextq(&nkpod->threadq, holder), count--) {
-               xnthread_t *thread = link2thread(holder, glink);
-               int n = iter->nentries++;
+               thread = link2thread(holder, glink);
+               n = iter->nentries++;
 
                iter->sched_info[n].cpu = xnsched_cpu(thread->sched);
                iter->sched_info[n].pid = xnthread_user_pid(thread);
-               iter->sched_info[n].name = thread->name;
+               memcpy(iter->sched_info[n].name, thread->name,
+                      sizeof(iter->sched_info[n].name));
                iter->sched_info[n].cprio = thread->cprio;
                iter->sched_info[n].timeout =
                    xnthread_get_timeout(thread, iter->start_time);
                iter->sched_info[n].status = thread->status;
-       }
 
-       xnlock_put_irqrestore(&nklock, s);
+               holder = nextq(&nkpod->threadq, holder);
+
+               xnlock_put_irqrestore(&nklock, s);
+       }
 
        seq = (struct seq_file *)file->private_data;
        seq->private = iter;
@@ -250,7 +265,7 @@ struct stat_seq_iterator {
                int cpu;
                pid_t pid;
                xnflags_t status;
-               const char *name;
+               char name[XNOBJECT_NAME_LEN];
                unsigned long ssw;
                unsigned long csw;
                unsigned long pf;
@@ -315,17 +330,26 @@ static struct seq_operations stat_op = {
 
 static int stat_seq_open(struct inode *inode, struct file *file)
 {
-       struct stat_seq_iterator *iter;
+       struct stat_seq_iterator *iter = NULL;
        struct seq_file *seq;
        xnholder_t *holder;
-       int err, count;
+       int err, count, rev;
        spl_t s;
 
        if (!nkpod)
                return -ESRCH;
 
+      restart:
+       xnlock_get_irqsave(&nklock, s);
+
+       rev = nkpod->threadq_rev;
        count = countq(&nkpod->threadq);        /* Cannot be empty (ROOT) */
+       holder = getheadq(&nkpod->threadq);
+
+       xnlock_put_irqrestore(&nklock, s);
 
+       if (iter)
+               kfree(iter);
        iter = kmalloc(sizeof(*iter)
                       + (count - 1) * sizeof(struct stat_seq_info),
                       GFP_KERNEL);
@@ -341,27 +365,35 @@ static int stat_seq_open(struct inode *i
 
        iter->nentries = 0;
 
-       /* Take a snapshot and release the nucleus lock immediately after,
-          so that dumping /proc/xenomai/stat with lots of entries won't
-          cause massive jittery. */
+       /* Take a snapshot element-wise, restart if something changes
+          underneath us. */
 
-       xnlock_get_irqsave(&nklock, s);
+       while (holder) {
+               xnthread_t *thread;
+               int n;
+
+               xnlock_get_irqsave(&nklock, s);
+
+               if (nkpod->threadq_rev != rev)
+                       goto restart;
+               rev = nkpod->threadq_rev;
+
+               thread = link2thread(holder, glink);
+               n = iter->nentries++;
 
-       for (holder = getheadq(&nkpod->threadq);
-            holder && count > 0;
-            holder = nextq(&nkpod->threadq, holder), count--) {
-               xnthread_t *thread = link2thread(holder, glink);
-               int n = iter->nentries++;
                iter->stat_info[n].cpu = xnsched_cpu(thread->sched);
                iter->stat_info[n].pid = xnthread_user_pid(thread);
-               iter->stat_info[n].name = thread->name;
+               memcpy(iter->stat_info[n].name, thread->name,
+                      sizeof(iter->stat_info[n].name));
                iter->stat_info[n].status = thread->status;
                iter->stat_info[n].ssw = thread->stat.ssw;
                iter->stat_info[n].csw = thread->stat.csw;
                iter->stat_info[n].pf = thread->stat.pf;
-       }
 
-       xnlock_put_irqrestore(&nklock, s);
+               holder = nextq(&nkpod->threadq, holder);
+
+               xnlock_put_irqrestore(&nklock, s);
+       }
 
        seq = (struct seq_file *)file->private_data;
        seq->private = iter;
Index: xenomai/ksrc/nucleus/pod.c
===================================================================
--- xenomai.orig/ksrc/nucleus/pod.c
+++ xenomai/ksrc/nucleus/pod.c
@@ -815,6 +815,7 @@ int xnpod_init_thread(xnthread_t *thread
        xnlock_get_irqsave(&nklock, s);
        thread->sched = xnpod_current_sched();
        appendq(&nkpod->threadq, &thread->glink);
+       nkpod->threadq_rev++;
        xnpod_suspend_thread(thread, XNDORMANT | (flags & XNSUSP), XN_INFINITE,
                             NULL);
        xnlock_put_irqrestore(&nklock, s);
@@ -1225,6 +1226,7 @@ void xnpod_delete_thread(xnthread_t *thr
        sched = thread->sched;
 
        removeq(&nkpod->threadq, &thread->glink);
+       nkpod->threadq_rev++;
 
        if (!testbits(thread->status, XNTHREAD_BLOCK_BITS)) {
                if (testbits(thread->status, XNREADY)) {





Attachment: signature.asc
Description: OpenPGP digital signature

_______________________________________________
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core

Reply via email to