Jan Kiszka wrote:

> Jan Kiszka wrote:
>   
>> Philippe Gerum wrote:
>>     
>>> On Thu, 2006-07-06 at 17:09 +0200, Jan Kiszka wrote:
>>>       
>>>>> We could do that from the current loop below, given that the
>>>>> accumulation routine is changed to use thread->sched implicitely.
>>>>>           
>>>> The idea is avoid adding even further load to the nklock-protected loop.
>>>> And we only update the current thread, not each and every.
>>>>
>>>>         
>>> Yes, but why? I mean, accumulated time so far remains significant even
>>> for non-running threads.
>>>       
>> Update must only happen for the currently running thread on each cpu,
>> otherwise you skew the stats up.
>>
>>
>> But looking at the whole code in stat_seq_open() again, I now wonder if
>> that whole routine isn't
>>
>> a) fragile on task removal and
>> b) still poorly scaling.
>>
>> The thread name is only copied by reference, a disappearing instance my
>> cause troubles on printing it. And, instead of holding the lock all the
>> time, shouldn't we better
>>
>> 1. pick some element from the queue and mark it somehow
>>    in-use under lock
>> 2. print or copy the entry
>> 3. reacquire the lock to proceed to the next one - after checking if
>>    that element happened to be removed from the queue meanwhile (in that
>>    case we could abort the output or try to resync)
>>     
>
> Not feasible (threads may not always be prevented from being deleted),
> but what about this:
>
> - maintain a modification counter for nkpod->threadq
> - in our /proc-loops (sched and latency e.g.):
>   1. take nklock
>   2. get current counter
>   3. compare with previous, restart whole loop if not matching
>   4. copy current element (including the name...)
>   5. forward element pointer
>   6. release nklock
>   7. goto 1 if not end-of-list
>
> As modifications on threadq should be fairly rare, I don't see a risk of
> looping endlessly here.
Here is such a patch + a reworked version of the exec-time stats. Compiles and 
boots,
doesn't cause obvious troubles when running in a loop while threads are created 
and
destroyed in parallel. Nevertheless, a quick hack which may contain bugs.

Jan

---
 include/nucleus/pod.h |    1 
 ksrc/nucleus/module.c |   94 +++++++++++++++++++++++++++++++++-----------------
 ksrc/nucleus/pod.c    |    2 +
 3 files changed, 66 insertions(+), 31 deletions(-)

Index: xenomai/include/nucleus/pod.h
===================================================================
--- xenomai.orig/include/nucleus/pod.h
+++ xenomai/include/nucleus/pod.h
@@ -195,6 +195,7 @@ struct xnpod {
     xnsched_t sched[XNARCH_NR_CPUS]; /*!< Per-cpu scheduler slots. */
 
     xnqueue_t threadq;          /*!< All existing threads. */
+    int threadq_rev;            /*!< Modification counter of threadq. */
 
     volatile u_long schedlck;  /*!< Scheduler lock count. */
 
Index: xenomai/ksrc/nucleus/module.c
===================================================================
--- xenomai.orig/ksrc/nucleus/module.c
+++ xenomai/ksrc/nucleus/module.c
@@ -99,7 +99,7 @@ struct sched_seq_iterator {
        struct sched_seq_info {
                int cpu;
                pid_t pid;
-               const char *name;
+               char name[XNOBJECT_NAME_LEN];
                int cprio;
                xnticks_t timeout;
                xnflags_t status;
@@ -177,17 +177,26 @@ static struct seq_operations sched_op = 
 
 static int sched_seq_open(struct inode *inode, struct file *file)
 {
-       struct sched_seq_iterator *iter;
+       struct sched_seq_iterator *iter = NULL;
        struct seq_file *seq;
        xnholder_t *holder;
-       int err, count;
+       int err, count, rev;
        spl_t s;
 
        if (!nkpod)
                return -ESRCH;
 
+      restart:
+       xnlock_get_irqsave(&nklock, s);
+
+       rev = nkpod->threadq_rev;
        count = countq(&nkpod->threadq);        /* Cannot be empty (ROOT) */
+       holder = getheadq(&nkpod->threadq);
+
+       xnlock_put_irqrestore(&nklock, s);
 
+       if (iter)
+               kfree(iter);
        iter = kmalloc(sizeof(*iter)
                       + (count - 1) * sizeof(struct sched_seq_info),
                       GFP_KERNEL);
@@ -202,31 +211,37 @@ static int sched_seq_open(struct inode *
        }
 
        iter->nentries = 0;
+       iter->start_time = xntimer_get_jiffies();
 
-       /* Take a snapshot and release the nucleus lock immediately after,
-          so that dumping /proc/xenomai/sched with lots of entries won't
-          cause massive jittery. */
+       /* Take a snapshot element-wise, restart if something changes
+          underneath us. */
 
-       xnlock_get_irqsave(&nklock, s);
+       while (holder) {
+               xnthread_t *thread;
+               int n;
 
-       iter->start_time = xntimer_get_jiffies();
+               xnlock_get_irqsave(&nklock, s);
+
+               if (nkpod->threadq_rev != rev)
+                       goto restart;
+               rev = nkpod->threadq_rev;
 
-       for (holder = getheadq(&nkpod->threadq);
-            holder && count > 0;
-            holder = nextq(&nkpod->threadq, holder), count--) {
-               xnthread_t *thread = link2thread(holder, glink);
-               int n = iter->nentries++;
+               thread = link2thread(holder, glink);
+               n = iter->nentries++;
 
                iter->sched_info[n].cpu = xnsched_cpu(thread->sched);
                iter->sched_info[n].pid = xnthread_user_pid(thread);
-               iter->sched_info[n].name = thread->name;
+               memcpy(iter->sched_info[n].name, thread->name,
+                      sizeof(iter->sched_info[n].name));
                iter->sched_info[n].cprio = thread->cprio;
                iter->sched_info[n].timeout =
                    xnthread_get_timeout(thread, iter->start_time);
                iter->sched_info[n].status = thread->status;
-       }
 
-       xnlock_put_irqrestore(&nklock, s);
+               holder = nextq(&nkpod->threadq, holder);
+
+               xnlock_put_irqrestore(&nklock, s);
+       }
 
        seq = (struct seq_file *)file->private_data;
        seq->private = iter;
@@ -250,7 +265,7 @@ struct stat_seq_iterator {
                int cpu;
                pid_t pid;
                xnflags_t status;
-               const char *name;
+               char name[XNOBJECT_NAME_LEN];
                unsigned long ssw;
                unsigned long csw;
                unsigned long pf;
@@ -315,17 +330,26 @@ static struct seq_operations stat_op = {
 
 static int stat_seq_open(struct inode *inode, struct file *file)
 {
-       struct stat_seq_iterator *iter;
+       struct stat_seq_iterator *iter = NULL;
        struct seq_file *seq;
        xnholder_t *holder;
-       int err, count;
+       int err, count, rev;
        spl_t s;
 
        if (!nkpod)
                return -ESRCH;
 
+      restart:
+       xnlock_get_irqsave(&nklock, s);
+
+       rev = nkpod->threadq_rev;
        count = countq(&nkpod->threadq);        /* Cannot be empty (ROOT) */
+       holder = getheadq(&nkpod->threadq);
+
+       xnlock_put_irqrestore(&nklock, s);
 
+       if (iter)
+               kfree(iter);
        iter = kmalloc(sizeof(*iter)
                       + (count - 1) * sizeof(struct stat_seq_info),
                       GFP_KERNEL);
@@ -341,27 +365,35 @@ static int stat_seq_open(struct inode *i
 
        iter->nentries = 0;
 
-       /* Take a snapshot and release the nucleus lock immediately after,
-          so that dumping /proc/xenomai/stat with lots of entries won't
-          cause massive jittery. */
+       /* Take a snapshot element-wise, restart if something changes
+          underneath us. */
 
-       xnlock_get_irqsave(&nklock, s);
+       while (holder) {
+               xnthread_t *thread;
+               int n;
+
+               xnlock_get_irqsave(&nklock, s);
+
+               if (nkpod->threadq_rev != rev)
+                       goto restart;
+               rev = nkpod->threadq_rev;
+
+               thread = link2thread(holder, glink);
+               n = iter->nentries++;
 
-       for (holder = getheadq(&nkpod->threadq);
-            holder && count > 0;
-            holder = nextq(&nkpod->threadq, holder), count--) {
-               xnthread_t *thread = link2thread(holder, glink);
-               int n = iter->nentries++;
                iter->stat_info[n].cpu = xnsched_cpu(thread->sched);
                iter->stat_info[n].pid = xnthread_user_pid(thread);
-               iter->stat_info[n].name = thread->name;
+               memcpy(iter->stat_info[n].name, thread->name,
+                      sizeof(iter->stat_info[n].name));
                iter->stat_info[n].status = thread->status;
                iter->stat_info[n].ssw = thread->stat.ssw;
                iter->stat_info[n].csw = thread->stat.csw;
                iter->stat_info[n].pf = thread->stat.pf;
-       }
 
-       xnlock_put_irqrestore(&nklock, s);
+               holder = nextq(&nkpod->threadq, holder);
+
+               xnlock_put_irqrestore(&nklock, s);
+       }
 
        seq = (struct seq_file *)file->private_data;
        seq->private = iter;
Index: xenomai/ksrc/nucleus/pod.c
===================================================================
--- xenomai.orig/ksrc/nucleus/pod.c
+++ xenomai/ksrc/nucleus/pod.c
@@ -815,6 +815,7 @@ int xnpod_init_thread(xnthread_t *thread
        xnlock_get_irqsave(&nklock, s);
        thread->sched = xnpod_current_sched();
        appendq(&nkpod->threadq, &thread->glink);
+       nkpod->threadq_rev++;
        xnpod_suspend_thread(thread, XNDORMANT | (flags & XNSUSP), XN_INFINITE,
                             NULL);
        xnlock_put_irqrestore(&nklock, s);
@@ -1225,6 +1226,7 @@ void xnpod_delete_thread(xnthread_t *thr
        sched = thread->sched;
 
        removeq(&nkpod->threadq, &thread->glink);
+       nkpod->threadq_rev++;
 
        if (!testbits(thread->status, XNTHREAD_BLOCK_BITS)) {
                if (testbits(thread->status, XNREADY)) {


---
 include/nucleus/pod.h    |   28 ++++++++++++++++++++++++++++
 include/nucleus/thread.h |    1 +
 ksrc/nucleus/module.c    |   26 +++++++++++++++++++++-----
 ksrc/nucleus/pod.c       |    5 +++++
 ksrc/nucleus/thread.c    |    1 +
 5 files changed, 56 insertions(+), 5 deletions(-)

Index: include/nucleus/thread.h
===================================================================
--- include/nucleus/thread.h.orig
+++ include/nucleus/thread.h
@@ -152,6 +152,7 @@ typedef struct xnthread {
        unsigned long csw;      /* Context switches (includes
                                   secondary -> primary switches) */
        unsigned long pf;       /* Number of page faults */
+       xnticks_t exec_time;    /* Accumulated execution time (ticks) */
     } stat;
 #endif /* CONFIG_XENO_OPT_STATS */
 
Index: include/nucleus/pod.h
===================================================================
--- include/nucleus/pod.h.orig
+++ include/nucleus/pod.h
@@ -145,6 +145,10 @@ typedef struct xnsched {
 
     xnthread_t rootcb;          /*!< Root thread control block. */
 
+#ifdef CONFIG_XENO_OPT_STATS
+    xnticks_t last_csw;         /*!< Last context switch (ticks). */
+#endif /* CONFIG_XENO_OPT_STATS */
+
 } xnsched_t;
 
 #ifdef CONFIG_SMP
@@ -545,6 +549,30 @@ static inline void xnpod_delete_self (vo
     xnpod_delete_thread(xnpod_current_thread());
 }
 
+#ifdef CONFIG_XENO_OPT_STATS
+static inline void xnpod_acc_exec_time(xnthread_t *thread)
+{
+    xnsched_t *sched = thread->sched;
+    xnticks_t now = xntimer_get_rawclock();
+
+    thread->stat.exec_time += now - sched->last_csw;
+    sched->last_csw = now;
+}
+
+static inline void xnpod_update_csw_date(xnsched_t *sched)
+{
+    sched->last_csw = xntimer_get_rawclock();
+}
+#else /* !CONFIG_XENO_OPT_STATS */
+static inline void xnpod_acc_exec_time(xnthread_t *thread)
+{
+}
+
+static inline void xnpod_update_csw_date(xnsched_t *sched)
+{
+}
+#endif /* CONFIG_XENO_OPT_STATS */
+
 #ifdef __cplusplus
 }
 #endif
Index: ksrc/nucleus/thread.c
===================================================================
--- ksrc/nucleus/thread.c.orig
+++ ksrc/nucleus/thread.c
@@ -90,6 +90,7 @@ int xnthread_init(xnthread_t *thread,
        thread->stat.ssw = 0;
        thread->stat.csw = 0;
        thread->stat.pf = 0;
+       thread->stat.exec_time = 0;
 #endif /* CONFIG_XENO_OPT_STATS */
 
        /* These will be filled by xnpod_start_thread() */
Index: ksrc/nucleus/pod.c
===================================================================
--- ksrc/nucleus/pod.c.orig
+++ ksrc/nucleus/pod.c
@@ -669,6 +669,9 @@ static inline void xnpod_switch_zombie(x
 
        xnthread_cleanup_tcb(threadout);
 
+       /* no need to update stats of dying thread */
+       xnpod_update_csw_date(sched);
+
        xnarch_finalize_and_switch(xnthread_archtcb(threadout),
                                   xnthread_archtcb(threadin));
 
@@ -2433,6 +2436,7 @@ void xnpod_schedule(void)
                xnarch_enter_root(xnthread_archtcb(threadin));
        }
 
+       xnpod_acc_exec_time(threadout);
        xnthread_inc_csw(threadin);
 
        xnarch_switch_to(xnthread_archtcb(threadout),
@@ -2604,6 +2608,7 @@ void xnpod_schedule_runnable(xnthread_t 
                nkpod->schedhook(runthread, XNREADY);
 #endif /* __XENO_SIM__ */
 
+       xnpod_acc_exec_time(runthread);
        xnthread_inc_csw(threadin);
 
        xnarch_switch_to(xnthread_archtcb(runthread),
Index: ksrc/nucleus/module.c
===================================================================
--- ksrc/nucleus/module.c.orig
+++ ksrc/nucleus/module.c
@@ -269,6 +269,7 @@ struct stat_seq_iterator {
                unsigned long ssw;
                unsigned long csw;
                unsigned long pf;
+               xnticks_t exec_time;
        } stat_info[1];
 };
 
@@ -309,13 +310,17 @@ static void stat_seq_stop(struct seq_fil
 static int stat_seq_show(struct seq_file *seq, void *v)
 {
        if (v == SEQ_START_TOKEN)
-               seq_printf(seq, "%-3s  %-6s %-10s %-10s %-4s  %-8s  %s\n",
-                          "CPU", "PID", "MSW", "CSW", "PF", "STAT", "NAME");
+               seq_printf(seq, "%-3s  %-6s %-10s %-10s %-4s  %-8s  %12s"
+                          "  %s\n",
+                          "CPU", "PID", "MSW", "CSW", "PF", "STAT", "TIME",
+                          "NAME");
        else {
                struct stat_seq_info *p = (struct stat_seq_info *)v;
-               seq_printf(seq, "%3u  %-6d %-10lu %-10lu %-4lu  %.8lx  %s\n",
+               unsigned long long exec_time = xnpod_ticks2ns(p->exec_time);
+               seq_printf(seq, "%3u  %-6d %-10lu %-10lu %-4lu  %.8lx  %12llu"
+                          "  %s\n",
                           p->cpu, p->pid, p->ssw, p->csw, p->pf, p->status,
-                          p->name);
+                          xnarch_ulldiv(exec_time, 1000, NULL), p->name);
        }
 
        return 0;
@@ -333,7 +338,7 @@ static int stat_seq_open(struct inode *i
        struct stat_seq_iterator *iter = NULL;
        struct seq_file *seq;
        xnholder_t *holder;
-       int err, count, rev;
+       int err, count, rev, cpu;
        spl_t s;
 
        if (!nkpod)
@@ -365,6 +370,16 @@ static int stat_seq_open(struct inode *i
 
        iter->nentries = 0;
 
+       /* update exec-time stats of currently running threads */
+       for_each_online_cpu(cpu) {
+               xnsched_t *sched;
+
+               xnlock_get_irqsave(&nklock, s);
+               sched = xnpod_sched_slot(cpu);
+               xnpod_acc_exec_time(sched->runthread);
+               xnlock_put_irqrestore(&nklock, s);
+       }
+
        /* Take a snapshot element-wise, restart if something changes
           underneath us. */
 
@@ -389,6 +404,7 @@ static int stat_seq_open(struct inode *i
                iter->stat_info[n].ssw = thread->stat.ssw;
                iter->stat_info[n].csw = thread->stat.csw;
                iter->stat_info[n].pf = thread->stat.pf;
+               iter->stat_info[n].exec_time = thread->stat.exec_time;
 
                holder = nextq(&nkpod->threadq, holder);
 



Attachment: signature.asc
Description: OpenPGP digital signature

_______________________________________________
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core

Reply via email to