Jan Kiszka wrote:
> This is version 2 of my patch, consequently applying the global mask
> idea also on IRQ affinity. I think this simple mechanism can solve quite
> a few basic SMP setup issue by forcing all user threads, driver threads,
> and IRQs that are created/attached during a certain period to a specific
> CPU set.
> 
> That's the theory. In practice, things look not that clear ATM. I
> noticed some oddity on a 4-way qemu box running the latency test and
> hacked the attached enhancement for /proc/xenomai/sched. This is what I
> get on that box:
> 
>> [EMAIL PROTECTED] :/root# cat /proc/xenomai/sched 
>> CPU  AFFINITY PID    PRI      PERIOD   TIMEOUT    STAT       NAME
>>   0  00000001 0       -1      0        0          R          ROOT/0
>>   1  00000002 0       -1      0        0          R          ROOT/1
>>   2  00000004 0       -1      0        0          R          ROOT/2
>>   3  00000008 0       -1      0        0          R          ROOT/3
>>   0  0000000c 917      0      0        0          W          display-916
>>   2  0000000c 918     99      5000000  1          D          sampling-916
>> [EMAIL PROTECTED] :/root# cat /proc/xenomai/affinity 
>> 0x0000000c
> 

[I should really stop believing heavy issues can be solved with
light-weight patches...]

OK, after reading a bit more in what I was hacking on, I'm starting to
understand the CPU selection mechanisms of shadow threads. Here comes
version 3 of the patch. Now it actually forces all threads to the
desired CPUs.

The reason why I failed with previous approaches could even be a SMP bug
of the original code. The Linux affinity of a shadow thread was only set
if no u_completion has been passed to xnshadow_map - which affected
native threads e.g. So I reordered the code, and it works.

I also changed the policy of /proc/xenoami/affinity. It will now only
override the mapping if the user didn't provide an explicit mask. I
think this provides more flexibility.

Comment?

Jan
---
 include/nucleus/pod.h |    2 +
 ksrc/nucleus/intr.c   |    3 ++
 ksrc/nucleus/module.c |   57 ++++++++++++++++++++++++++++++++++++++++++++++++++
 ksrc/nucleus/pod.c    |    4 ++-
 ksrc/nucleus/shadow.c |   15 +++++--------
 5 files changed, 71 insertions(+), 10 deletions(-)

Index: xenomai/include/nucleus/pod.h
===================================================================
--- xenomai.orig/include/nucleus/pod.h
+++ xenomai/include/nucleus/pod.h
@@ -253,6 +253,8 @@ extern u_long nktickdef;
 
 extern char *nkmsgbuf;
 
+extern xnarch_cpumask_t nkaffinity;
+
 #define xnprintf(fmt,args...)  xnarch_printf(fmt , ##args)
 #define xnloginfo(fmt,args...) xnarch_loginfo(fmt , ##args)
 #define xnlogwarn(fmt,args...) xnarch_logwarn(fmt , ##args)
Index: xenomai/ksrc/nucleus/intr.c
===================================================================
--- xenomai.orig/ksrc/nucleus/intr.c
+++ xenomai/ksrc/nucleus/intr.c
@@ -626,6 +626,9 @@ int xnintr_attach(xnintr_t *intr, void *
 
        xnlock_get_irqsave(&intrlock, s);
 
+#ifdef CONFIG_SMP
+       xnarch_set_irq_affinity(intr->irq, nkaffinity);
+#endif /* CONFIG_SMP */
        err = xnintr_irq_attach(intr);
 
        xnlock_put_irqrestore(&intrlock, s);
Index: xenomai/ksrc/nucleus/module.c
===================================================================
--- xenomai.orig/ksrc/nucleus/module.c
+++ xenomai/ksrc/nucleus/module.c
@@ -688,6 +688,59 @@ static int heap_read_proc(char *page,
        return len;
 }
 
+static int affinity_read_proc(char *page,
+                             char **start,
+                             off_t off, int count, int *eof, void *data)
+{
+       unsigned long val = 0;
+       int len, cpu;
+
+       for (cpu = 0; cpu < sizeof(val) * 8; cpu++)
+               if (xnarch_cpu_isset(cpu, nkaffinity))
+                       val |= (1 << cpu);
+
+       len = sprintf(page, "%08lx\n", val);
+       len -= off;
+       if (len <= off + count)
+               *eof = 1;
+       *start = page + off;
+       if (len > count)
+               len = count;
+       if (len < 0)
+               len = 0;
+
+       return len;
+}
+
+static int affinity_write_proc(struct file *file,
+                              const char __user * buffer,
+                              unsigned long count, void *data)
+{
+       char *end, buf[16];
+       unsigned long val;
+       xnarch_cpumask_t new_affinity;
+       int n, cpu;
+
+       n = count > sizeof(buf) - 1 ? sizeof(buf) - 1 : count;
+
+       if (copy_from_user(buf, buffer, n))
+               return -EFAULT;
+
+       buf[n] = '\0';
+       val = simple_strtol(buf, &end, 0);
+
+       if (*end != '\0' && !isspace(*end))
+               return -EINVAL;
+
+       xnarch_cpus_clear(new_affinity);
+       for (cpu = 0; cpu < sizeof(val) * 8; cpu++, val >>= 1)
+               if (val & 1)
+                       xnarch_cpu_set(cpu, new_affinity);
+       nkaffinity = new_affinity;
+
+       return count;
+}
+
 static struct proc_dir_entry *add_proc_leaf(const char *name,
                                            read_proc_t rdproc,
                                            write_proc_t wrproc,
@@ -760,6 +813,9 @@ void xnpod_init_proc(void)
 
        add_proc_leaf("heap", &heap_read_proc, NULL, NULL, rthal_proc_root);
 
+       add_proc_leaf("affinity", &affinity_read_proc, &affinity_write_proc,
+                     NULL, rthal_proc_root);
+
 #ifdef CONFIG_XENO_OPT_PERVASIVE
        iface_proc_root =
            create_proc_entry("interfaces", S_IFDIR, rthal_proc_root);
@@ -778,6 +834,7 @@ void xnpod_delete_proc(void)
 
        remove_proc_entry("interfaces", rthal_proc_root);
 #endif /* CONFIG_XENO_OPT_PERVASIVE */
+       remove_proc_entry("affinity", rthal_proc_root);
        remove_proc_entry("heap", rthal_proc_root);
        remove_proc_entry("irq", rthal_proc_root);
        remove_proc_entry("timer", rthal_proc_root);
Index: xenomai/ksrc/nucleus/pod.c
===================================================================
--- xenomai.orig/ksrc/nucleus/pod.c
+++ xenomai/ksrc/nucleus/pod.c
@@ -70,6 +70,8 @@ MODULE_PARM_DESC(tick_arg, "Fixed clock 
 
 char *nkmsgbuf = NULL;
 
+xnarch_cpumask_t nkaffinity = XNPOD_ALL_CPUS;
+
 const char *xnpod_fatal_helper(const char *format, ...)
 {
        const unsigned nr_cpus = xnarch_num_online_cpus();
@@ -910,7 +912,7 @@ int xnpod_start_thread(xnthread_t *threa
                return -EBUSY;
 
        if (xnarch_cpus_empty(affinity))
-               affinity = XNARCH_CPU_MASK_ALL;
+               affinity = nkaffinity;
 
        xnlock_get_irqsave(&nklock, s);
 
Index: xenomai/ksrc/nucleus/shadow.c
===================================================================
--- xenomai.orig/ksrc/nucleus/shadow.c
+++ xenomai/ksrc/nucleus/shadow.c
@@ -853,20 +853,17 @@ int xnshadow_map(xnthread_t *thread, xnc
        xnshadow_thrptd(current) = thread;
        xnpod_suspend_thread(thread, XNRELAX, XN_INFINITE, NULL);
 
+       affinity = thread->affinity;
+       if (xnarch_cpus_empty(affinity))
+               affinity = nkaffinity;
+       set_cpus_allowed(current, affinity);
+
        if (u_completion) {
                xnshadow_signal_completion(u_completion, 0);
                return 0;
        }
 
-       /* Nobody waits for us, so we may start the shadow immediately
-          after having forced the CPU affinity to the current
-          processor. Note that we don't use smp_processor_id() to prevent
-          kernel debug stuff to yell at us for calling it in a preemptible
-          section of code. */
-
-       affinity = xnarch_cpumask_of_cpu(rthal_processor_id());
-       set_cpus_allowed(current, affinity);
-
+       /* Nobody waits for us, so we may start the shadow immediately. */
        mode = thread->rrperiod != XN_INFINITE ? XNRRB : 0;
        xnpod_start_thread(thread, mode, 0, affinity, NULL, NULL);
 

Attachment: signature.asc
Description: OpenPGP digital signature

_______________________________________________
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core

Reply via email to