Re: [Xenomai-core] [RFC][PATCH 2/2] global CPU affinity control
Version 5 is basically a rebase over the first patch of this series. It also enforces that cpus_allowed of a shadow thread only contains a single CPU. Only compile-tested. Jan --- include/nucleus/pod.h|2 + ksrc/nucleus/intr.c |3 ++ ksrc/nucleus/module.c| 57 +++ ksrc/nucleus/pod.c |6 +++- ksrc/nucleus/shadow.c| 12 ++--- ksrc/skins/native/task.c |3 ++ 6 files changed, 77 insertions(+), 6 deletions(-) Index: xenomai/include/nucleus/pod.h === --- xenomai.orig/include/nucleus/pod.h +++ xenomai/include/nucleus/pod.h @@ -253,6 +253,8 @@ extern u_long nktickdef; extern char *nkmsgbuf; +extern xnarch_cpumask_t nkaffinity; + #define xnprintf(fmt,args...) xnarch_printf(fmt , ##args) #define xnloginfo(fmt,args...) xnarch_loginfo(fmt , ##args) #define xnlogwarn(fmt,args...) xnarch_logwarn(fmt , ##args) Index: xenomai/ksrc/nucleus/module.c === --- xenomai.orig/ksrc/nucleus/module.c +++ xenomai/ksrc/nucleus/module.c @@ -688,6 +688,59 @@ static int heap_read_proc(char *page, return len; } +static int affinity_read_proc(char *page, + char **start, + off_t off, int count, int *eof, void *data) +{ + unsigned long val = 0; + int len, cpu; + + for (cpu = 0; cpu sizeof(val) * 8; cpu++) + if (xnarch_cpu_isset(cpu, nkaffinity)) + val |= (1 cpu); + + len = sprintf(page, %08lx\n, val); + len -= off; + if (len = off + count) + *eof = 1; + *start = page + off; + if (len count) + len = count; + if (len 0) + len = 0; + + return len; +} + +static int affinity_write_proc(struct file *file, + const char __user * buffer, + unsigned long count, void *data) +{ + char *end, buf[16]; + unsigned long val; + xnarch_cpumask_t new_affinity; + int n, cpu; + + n = count sizeof(buf) - 1 ? sizeof(buf) - 1 : count; + + if (copy_from_user(buf, buffer, n)) + return -EFAULT; + + buf[n] = '\0'; + val = simple_strtol(buf, end, 0); + + if (*end != '\0' !isspace(*end)) + return -EINVAL; + + xnarch_cpus_clear(new_affinity); + for (cpu = 0; cpu sizeof(val) * 8; cpu++, val = 1) + if (val 1) + xnarch_cpu_set(cpu, new_affinity); + nkaffinity = new_affinity; + + return count; +} + static struct proc_dir_entry *add_proc_leaf(const char *name, read_proc_t rdproc, write_proc_t wrproc, @@ -760,6 +813,9 @@ void xnpod_init_proc(void) add_proc_leaf(heap, heap_read_proc, NULL, NULL, rthal_proc_root); + add_proc_leaf(affinity, affinity_read_proc, affinity_write_proc, + NULL, rthal_proc_root); + #ifdef CONFIG_XENO_OPT_PERVASIVE iface_proc_root = create_proc_entry(interfaces, S_IFDIR, rthal_proc_root); @@ -778,6 +834,7 @@ void xnpod_delete_proc(void) remove_proc_entry(interfaces, rthal_proc_root); #endif /* CONFIG_XENO_OPT_PERVASIVE */ + remove_proc_entry(affinity, rthal_proc_root); remove_proc_entry(heap, rthal_proc_root); remove_proc_entry(irq, rthal_proc_root); remove_proc_entry(timer, rthal_proc_root); Index: xenomai/ksrc/nucleus/pod.c === --- xenomai.orig/ksrc/nucleus/pod.c +++ xenomai/ksrc/nucleus/pod.c @@ -70,6 +70,8 @@ MODULE_PARM_DESC(tick_arg, Fixed clock char *nkmsgbuf = NULL; +xnarch_cpumask_t nkaffinity = XNPOD_ALL_CPUS; + const char *xnpod_fatal_helper(const char *format, ...) { const unsigned nr_cpus = xnarch_num_online_cpus(); @@ -909,8 +911,8 @@ int xnpod_start_thread(xnthread_t *threa if (!testbits(thread-status, XNDORMANT)) return -EBUSY; - if (xnarch_cpus_empty(affinity)) - affinity = XNARCH_CPU_MASK_ALL; + if (xnarch_cpus_equal(affinity, XNPOD_ALL_CPUS)) + affinity = nkaffinity; xnlock_get_irqsave(nklock, s); Index: xenomai/ksrc/skins/native/task.c === --- xenomai.orig/ksrc/skins/native/task.c +++ xenomai/ksrc/skins/native/task.c @@ -255,6 +255,9 @@ int rt_task_create(RT_TASK *task, if (cpumask 1) xnarch_cpu_set(cpu, task-affinity); + if (xnarch_cpus_empty(task-affinity)) + task-affinity = XNPOD_ALL_CPUS; + #ifdef CONFIG_XENO_OPT_NATIVE_MPS xnsynch_init(task-mrecv, XNSYNCH_FIFO); xnsynch_init(task-msendq, XNSYNCH_PRIO | XNSYNCH_PIP);
[Xenomai-core] [RFC][PATCH 2/2] global CPU affinity control
This is version 4 of the /proc/xenomai/affinity feature. It has been rebased on the reworked xnshadow_map code, now applying the default affinity mask in a (hopefully) correct way also on shadow threads. What has been said for the previous patch also applies here: test and comment! Mathias, you asked for this feature, please let us know if it fulfils your requirements. Philippe, you were looking for documentation of this new behaviour. Please let me know where you would like to see this. Surely in the nucleus API, I guess. Anywhere else? Jan --- include/nucleus/pod.h|2 + ksrc/nucleus/intr.c |3 ++ ksrc/nucleus/module.c| 57 +++ ksrc/nucleus/pod.c |6 +++- ksrc/nucleus/shadow.c|3 ++ ksrc/skins/native/task.c |3 ++ 6 files changed, 72 insertions(+), 2 deletions(-) Index: xenomai/include/nucleus/pod.h === --- xenomai.orig/include/nucleus/pod.h +++ xenomai/include/nucleus/pod.h @@ -256,6 +256,8 @@ extern u_long nktickdef; extern char *nkmsgbuf; +extern xnarch_cpumask_t nkaffinity; + #define xnprintf(fmt,args...) xnarch_printf(fmt , ##args) #define xnloginfo(fmt,args...) xnarch_loginfo(fmt , ##args) #define xnlogwarn(fmt,args...) xnarch_logwarn(fmt , ##args) Index: xenomai/ksrc/nucleus/module.c === --- xenomai.orig/ksrc/nucleus/module.c +++ xenomai/ksrc/nucleus/module.c @@ -688,6 +688,59 @@ static int heap_read_proc(char *page, return len; } +static int affinity_read_proc(char *page, + char **start, + off_t off, int count, int *eof, void *data) +{ + unsigned long val = 0; + int len, cpu; + + for (cpu = 0; cpu sizeof(val) * 8; cpu++) + if (xnarch_cpu_isset(cpu, nkaffinity)) + val |= (1 cpu); + + len = sprintf(page, %08lx\n, val); + len -= off; + if (len = off + count) + *eof = 1; + *start = page + off; + if (len count) + len = count; + if (len 0) + len = 0; + + return len; +} + +static int affinity_write_proc(struct file *file, + const char __user * buffer, + unsigned long count, void *data) +{ + char *end, buf[16]; + unsigned long val; + xnarch_cpumask_t new_affinity; + int n, cpu; + + n = count sizeof(buf) - 1 ? sizeof(buf) - 1 : count; + + if (copy_from_user(buf, buffer, n)) + return -EFAULT; + + buf[n] = '\0'; + val = simple_strtol(buf, end, 0); + + if (*end != '\0' !isspace(*end)) + return -EINVAL; + + xnarch_cpus_clear(new_affinity); + for (cpu = 0; cpu sizeof(val) * 8; cpu++, val = 1) + if (val 1) + xnarch_cpu_set(cpu, new_affinity); + nkaffinity = new_affinity; + + return count; +} + static struct proc_dir_entry *add_proc_leaf(const char *name, read_proc_t rdproc, write_proc_t wrproc, @@ -760,6 +813,9 @@ void xnpod_init_proc(void) add_proc_leaf(heap, heap_read_proc, NULL, NULL, rthal_proc_root); + add_proc_leaf(affinity, affinity_read_proc, affinity_write_proc, + NULL, rthal_proc_root); + #ifdef CONFIG_XENO_OPT_PERVASIVE iface_proc_root = create_proc_entry(interfaces, S_IFDIR, rthal_proc_root); @@ -778,6 +834,7 @@ void xnpod_delete_proc(void) remove_proc_entry(interfaces, rthal_proc_root); #endif /* CONFIG_XENO_OPT_PERVASIVE */ + remove_proc_entry(affinity, rthal_proc_root); remove_proc_entry(heap, rthal_proc_root); remove_proc_entry(irq, rthal_proc_root); remove_proc_entry(timer, rthal_proc_root); Index: xenomai/ksrc/nucleus/pod.c === --- xenomai.orig/ksrc/nucleus/pod.c +++ xenomai/ksrc/nucleus/pod.c @@ -70,6 +70,8 @@ MODULE_PARM_DESC(tick_arg, Fixed clock char *nkmsgbuf = NULL; +xnarch_cpumask_t nkaffinity = XNPOD_ALL_CPUS; + const char *xnpod_fatal_helper(const char *format, ...) { const unsigned nr_cpus = xnarch_num_online_cpus(); @@ -909,8 +911,8 @@ int xnpod_start_thread(xnthread_t *threa if (!testbits(thread-status, XNDORMANT)) return -EBUSY; - if (xnarch_cpus_empty(affinity)) - affinity = XNARCH_CPU_MASK_ALL; + if (xnarch_cpus_equal(affinity, XNPOD_ALL_CPUS)) + affinity = nkaffinity; xnlock_get_irqsave(nklock, s); Index: xenomai/ksrc/nucleus/shadow.c === --- xenomai.orig/ksrc/nucleus/shadow.c +++ xenomai/ksrc/nucleus/shadow.c @@ -847,6 +847,9 @@ int