[Xenomai-core] [PATCH] blackfin: build xenomai in FLAT format
This patch fixes xenomai-2.4.x branch build scripts, to build xenomai as FLAT on Blackfin. 1. -mfdpic option is by default set by bfin-linux-uclibc-gcc. It is not required and it can cause error for bfin-uclinux-gcc. 2. libpthread_rt.la should not depend on lpthread. -Yi diff -Nurp --exclude=.svn xenomai-2.4.x-clean/configure xenomai-2.4.x/configure --- xenomai-2.4.x-clean/configure 2008-12-04 19:21:40.0 +0800 +++ xenomai-2.4.x/configure 2009-02-23 17:12:14.0 +0800 @@ -22569,10 +22569,10 @@ case $XENO_TARGET_ARCH in ;; blackfin) # Produce libraries in FDPIC format. - XENO_USER_CFLAGS=$XENO_USER_CFLAGS -Wall -pipe -mfdpic - XENO_USER_LDFLAGS=$XENO_USER_CFLAGS -mfdpic - XENO_USER_APP_CFLAGS=$XENO_USER_APP_CFLAGS -mfdpic - XENO_USER_APP_LDFLAGS=$XENO_USER_APP_LDFLAGS -mfdpic + XENO_USER_CFLAGS=$XENO_USER_CFLAGS -Wall -pipe + XENO_USER_LDFLAGS=$XENO_USER_CFLAGS + XENO_USER_APP_CFLAGS=$XENO_USER_APP_CFLAGS + XENO_USER_APP_LDFLAGS=$XENO_USER_APP_LDFLAGS ;; arm) XENO_USER_CFLAGS=$XENO_USER_CFLAGS -Wall -pipe -march=armv$CONFIG_XENO_ARM_ARCH diff -Nurp --exclude=.svn xenomai-2.4.x-clean/configure.in xenomai-2.4.x/configure.in --- xenomai-2.4.x-clean/configure.in2008-12-04 19:21:40.0 +0800 +++ xenomai-2.4.x/configure.in 2009-02-23 17:12:14.0 +0800 @@ -648,10 +648,10 @@ case $XENO_TARGET_ARCH in ;; blackfin) # Produce libraries in FDPIC format. - XENO_USER_CFLAGS=$XENO_USER_CFLAGS -Wall -pipe -mfdpic - XENO_USER_LDFLAGS=$XENO_USER_CFLAGS -mfdpic - XENO_USER_APP_CFLAGS=$XENO_USER_APP_CFLAGS -mfdpic - XENO_USER_APP_LDFLAGS=$XENO_USER_APP_LDFLAGS -mfdpic + XENO_USER_CFLAGS=$XENO_USER_CFLAGS -Wall -pipe + XENO_USER_LDFLAGS=$XENO_USER_CFLAGS + XENO_USER_APP_CFLAGS=$XENO_USER_APP_CFLAGS + XENO_USER_APP_LDFLAGS=$XENO_USER_APP_LDFLAGS ;; arm) XENO_USER_CFLAGS=$XENO_USER_CFLAGS -Wall -pipe -march=armv$CONFIG_XENO_ARM_ARCH diff -Nurp --exclude=.svn xenomai-2.4.x-clean/src/skins/posix/Makefile.am xenomai-2.4.x/src/skins/posix/Makefile.am --- xenomai-2.4.x-clean/src/skins/posix/Makefile.am 2008-03-10 06:33:59.0 +0800 +++ xenomai-2.4.x/src/skins/posix/Makefile.am 2009-02-23 17:12:14.0 +0800 @@ -2,7 +2,7 @@ includedir = $(prefix)/include/posix lib_LTLIBRARIES = libpthread_rt.la -libpthread_rt_la_LDFLAGS = -version-info 1:0:0 -lpthread +libpthread_rt_la_LDFLAGS = -version-info 1:0:0 libpthread_rt_la_SOURCES = \ init.c \ diff -Nurp --exclude=.svn xenomai-2.4.x-clean/src/skins/posix/Makefile.in xenomai-2.4.x/src/skins/posix/Makefile.in --- xenomai-2.4.x-clean/src/skins/posix/Makefile.in 2008-06-02 05:32:28.0 +0800 +++ xenomai-2.4.x/src/skins/posix/Makefile.in 2009-02-23 17:12:14.0 +0800 @@ -239,7 +239,7 @@ target_vendor = @target_vendor@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ lib_LTLIBRARIES = libpthread_rt.la -libpthread_rt_la_LDFLAGS = -version-info 1:0:0 -lpthread +libpthread_rt_la_LDFLAGS = -version-info 1:0:0 libpthread_rt_la_SOURCES = \ init.c \ thread.c \ diff -Nurp --exclude=.svn xenomai-2.4.x-clean/src/testsuite/clocktest/Makefile.am xenomai-2.4.x/src/testsuite/clocktest/Makefile.am --- xenomai-2.4.x-clean/src/testsuite/clocktest/Makefile.am 2008-03-01 04:41:56.0 +0800 +++ xenomai-2.4.x/src/testsuite/clocktest/Makefile.am 2009-02-23 17:12:14.0 +0800 @@ -9,7 +9,7 @@ clocktest_CPPFLAGS = -I$(top_srcdir)/inc clocktest_LDFLAGS = $(XENO_POSIX_WRAPPERS) $(XENO_USER_LDFLAGS) clocktest_LDADD = \ - ../../skins/posix/libpthread_rt.la -lpthread -lrt + -lpthread -lrt ../../skins/posix/libpthread_rt.la install-data-local: $(mkinstalldirs) $(DESTDIR)$(testdir) diff -Nurp --exclude=.svn xenomai-2.4.x-clean/src/testsuite/clocktest/Makefile.in xenomai-2.4.x/src/testsuite/clocktest/Makefile.in --- xenomai-2.4.x-clean/src/testsuite/clocktest/Makefile.in 2008-06-02 05:32:28.0 +0800 +++ xenomai-2.4.x/src/testsuite/clocktest/Makefile.in 2009-02-23 17:12:14.0 +0800 @@ -232,7 +232,7 @@ clocktest_SOURCES = clocktest.c clocktest_CPPFLAGS = -I$(top_srcdir)/include/posix $(XENO_USER_CFLAGS) -I$(top_srcdir)/include clocktest_LDFLAGS = $(XENO_POSIX_WRAPPERS) $(XENO_USER_LDFLAGS) clocktest_LDADD = \ - ../../skins/posix/libpthread_rt.la -lpthread -lrt + -lpthread -lrt ../../skins/posix/libpthread_rt.la EXTRA_DIST = runinfo.in all: all-am diff -Nurp --exclude=.svn xenomai-2.4.x-clean/src/testsuite/cyclic/Makefile.am xenomai-2.4.x/src/testsuite/cyclic/Makefile.am --- xenomai-2.4.x-clean/src/testsuite/cyclic/Makefile.am2008-03-01 04:41:56.0 +0800 +++ xenomai-2.4.x/src/testsuite/cyclic/Makefile.am 2009-02-23 17:12:14.0 +0800 @@ -9,7 +9,7 @@ cyclictest_CPPFLAGS = -I$(top_srcdir)/in cyclictest_LDFLAGS = $(XENO_POSIX_WRAPPERS)
[Xenomai-core] [PATCH] Fix gatekeeper affinity
As the xnsched structures get initialized later, during xnpod_init, xnsched_cpu always returned 0 in the gatekeeper_thread prologue. That caused binding of all gatekeepers to CPU 0. Signed-off-by: Jan Kiszka jan.kis...@siemens.com --- ksrc/nucleus/shadow.c |5 - 1 files changed, 4 insertions(+), 1 deletions(-) diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c index 1dedd85..2243c0e 100644 --- a/ksrc/nucleus/shadow.c +++ b/ksrc/nucleus/shadow.c @@ -823,11 +823,14 @@ static int gatekeeper_thread(void *data) struct task_struct *this_task = current; DECLARE_WAITQUEUE(wait, this_task); struct xnsched *sched = data; - int cpu = xnsched_cpu(sched); struct xnthread *target; cpumask_t cpumask; + int cpu; spl_t s; + /* sched not fully initialized, xnsched_cpu does not work yet */ + cpu = sched - nkpod_struct.sched; + this_task-flags |= PF_NOFREEZE; sigfillset(this_task-blocked); cpumask = cpumask_of_cpu(cpu); ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core
Re: [Xenomai-core] [PATCH] Fix gatekeeper affinity
Jan Kiszka wrote: As the xnsched structures get initialized later, during xnpod_init, xnsched_cpu always returned 0 in the gatekeeper_thread prologue. That caused binding of all gatekeepers to CPU 0. Signed-off-by: Jan Kiszka jan.kis...@siemens.com --- ksrc/nucleus/shadow.c |5 - 1 files changed, 4 insertions(+), 1 deletions(-) diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c index 1dedd85..2243c0e 100644 --- a/ksrc/nucleus/shadow.c +++ b/ksrc/nucleus/shadow.c @@ -823,11 +823,14 @@ static int gatekeeper_thread(void *data) struct task_struct *this_task = current; DECLARE_WAITQUEUE(wait, this_task); struct xnsched *sched = data; - int cpu = xnsched_cpu(sched); struct xnthread *target; cpumask_t cpumask; + int cpu; spl_t s; + /* sched not fully initialized, xnsched_cpu does not work yet */ + cpu = sched - nkpod_struct.sched; + this_task-flags |= PF_NOFREEZE; sigfillset(this_task-blocked); cpumask = cpumask_of_cpu(cpu); This does not look good, it means that the gatekeeper accesses the sched structure before it is initialized. So, IMO, the proper fix would be to start the gatekeepers only after the sched structure has been initialized. -- Gilles. ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core
Re: [Xenomai-core] [PATCH] Fix gatekeeper affinity
Gilles Chanteperdrix wrote: Jan Kiszka wrote: As the xnsched structures get initialized later, during xnpod_init, xnsched_cpu always returned 0 in the gatekeeper_thread prologue. That caused binding of all gatekeepers to CPU 0. Signed-off-by: Jan Kiszka jan.kis...@siemens.com --- ksrc/nucleus/shadow.c |5 - 1 files changed, 4 insertions(+), 1 deletions(-) diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c index 1dedd85..2243c0e 100644 --- a/ksrc/nucleus/shadow.c +++ b/ksrc/nucleus/shadow.c @@ -823,11 +823,14 @@ static int gatekeeper_thread(void *data) struct task_struct *this_task = current; DECLARE_WAITQUEUE(wait, this_task); struct xnsched *sched = data; -int cpu = xnsched_cpu(sched); struct xnthread *target; cpumask_t cpumask; +int cpu; spl_t s; +/* sched not fully initialized, xnsched_cpu does not work yet */ +cpu = sched - nkpod_struct.sched; + this_task-flags |= PF_NOFREEZE; sigfillset(this_task-blocked); cpumask = cpumask_of_cpu(cpu); This does not look good, it means that the gatekeeper accesses the sched structure before it is initialized. So, IMO, the proper fix would be to start the gatekeepers only after the sched structure has been initialized. I briefly thought about moving xnshadow_mount into xnpod_init. But given the fact that it worked like this before and that I was not able to quickly exclude new regressions when reordering things, I decided to restore the old pattern. Jan -- Siemens AG, Corporate Technology, CT SE 2 Corporate Competence Center Embedded Linux ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core
Re: [Xenomai-core] [PATCH] Fix gatekeeper affinity
Jan Kiszka wrote: Gilles Chanteperdrix wrote: Jan Kiszka wrote: As the xnsched structures get initialized later, during xnpod_init, xnsched_cpu always returned 0 in the gatekeeper_thread prologue. That caused binding of all gatekeepers to CPU 0. Signed-off-by: Jan Kiszka jan.kis...@siemens.com --- ksrc/nucleus/shadow.c |5 - 1 files changed, 4 insertions(+), 1 deletions(-) diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c index 1dedd85..2243c0e 100644 --- a/ksrc/nucleus/shadow.c +++ b/ksrc/nucleus/shadow.c @@ -823,11 +823,14 @@ static int gatekeeper_thread(void *data) struct task_struct *this_task = current; DECLARE_WAITQUEUE(wait, this_task); struct xnsched *sched = data; - int cpu = xnsched_cpu(sched); struct xnthread *target; cpumask_t cpumask; + int cpu; spl_t s; + /* sched not fully initialized, xnsched_cpu does not work yet */ + cpu = sched - nkpod_struct.sched; + this_task-flags |= PF_NOFREEZE; sigfillset(this_task-blocked); cpumask = cpumask_of_cpu(cpu); This does not look good, it means that the gatekeeper accesses the sched structure before it is initialized. So, IMO, the proper fix would be to start the gatekeepers only after the sched structure has been initialized. I briefly thought about moving xnshadow_mount into xnpod_init. But given the fact that it worked like this before and that I was not able to quickly exclude new regressions when reordering things, I decided to restore the old pattern. Ok. What about passing cpu as the gatekeeper argument, and using xnpod_sched_slot(cpu) to find the sched pointer ? -- Gilles. ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core
Re: [Xenomai-core] [PATCH] Fix gatekeeper affinity
Gilles Chanteperdrix wrote: Jan Kiszka wrote: Gilles Chanteperdrix wrote: Jan Kiszka wrote: As the xnsched structures get initialized later, during xnpod_init, xnsched_cpu always returned 0 in the gatekeeper_thread prologue. That caused binding of all gatekeepers to CPU 0. Signed-off-by: Jan Kiszka jan.kis...@siemens.com --- ksrc/nucleus/shadow.c |5 - 1 files changed, 4 insertions(+), 1 deletions(-) diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c index 1dedd85..2243c0e 100644 --- a/ksrc/nucleus/shadow.c +++ b/ksrc/nucleus/shadow.c @@ -823,11 +823,14 @@ static int gatekeeper_thread(void *data) struct task_struct *this_task = current; DECLARE_WAITQUEUE(wait, this_task); struct xnsched *sched = data; - int cpu = xnsched_cpu(sched); struct xnthread *target; cpumask_t cpumask; + int cpu; spl_t s; + /* sched not fully initialized, xnsched_cpu does not work yet */ + cpu = sched - nkpod_struct.sched; + this_task-flags |= PF_NOFREEZE; sigfillset(this_task-blocked); cpumask = cpumask_of_cpu(cpu); This does not look good, it means that the gatekeeper accesses the sched structure before it is initialized. So, IMO, the proper fix would be to start the gatekeepers only after the sched structure has been initialized. I briefly thought about moving xnshadow_mount into xnpod_init. But given the fact that it worked like this before and that I was not able to quickly exclude new regressions when reordering things, I decided to restore the old pattern. Ok. What about passing cpu as the gatekeeper argument, and using xnpod_sched_slot(cpu) to find the sched pointer ? Something like this? Same result, but looks indeed a bit nicer. --- As the xnsched structures get initialized later, during xnpod_init, xnsched_cpu always returned 0 in the gatekeeper_thread prologue. That caused binding of all gatekeepers to CPU 0. Signed-off-by: Jan Kiszka jan.kis...@siemens.com --- ksrc/nucleus/shadow.c |8 1 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c index 1dedd85..6822fcb 100644 --- a/ksrc/nucleus/shadow.c +++ b/ksrc/nucleus/shadow.c @@ -822,8 +822,8 @@ static int gatekeeper_thread(void *data) { struct task_struct *this_task = current; DECLARE_WAITQUEUE(wait, this_task); - struct xnsched *sched = data; - int cpu = xnsched_cpu(sched); + int cpu = (long)data; + struct xnsched *sched = xnpod_sched_slot(cpu); struct xnthread *target; cpumask_t cpumask; spl_t s; @@ -2600,8 +2600,8 @@ int xnshadow_mount(void) sema_init(sched-gksync, 0); xnarch_memory_barrier(); sched-gatekeeper = - kthread_create(gatekeeper_thread, sched, gatekeeper/%d, - cpu); + kthread_create(gatekeeper_thread, (void *)(long)cpu, + gatekeeper/%d, cpu); wake_up_process(sched-gatekeeper); down(sched-gksync); } ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core
Re: [Xenomai-core] [PATCH] Fix gatekeeper affinity
Jan Kiszka wrote: As the xnsched structures get initialized later, during xnpod_init, xnsched_cpu always returned 0 in the gatekeeper_thread prologue. That caused binding of all gatekeepers to CPU 0. Hey, nice. This is why v2.4.x used pointer arithmetics to determine the cpu # in the first place. Signed-off-by: Jan Kiszka jan.kis...@siemens.com --- ksrc/nucleus/shadow.c |5 - 1 files changed, 4 insertions(+), 1 deletions(-) diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c index 1dedd85..2243c0e 100644 --- a/ksrc/nucleus/shadow.c +++ b/ksrc/nucleus/shadow.c @@ -823,11 +823,14 @@ static int gatekeeper_thread(void *data) struct task_struct *this_task = current; DECLARE_WAITQUEUE(wait, this_task); struct xnsched *sched = data; - int cpu = xnsched_cpu(sched); struct xnthread *target; cpumask_t cpumask; + int cpu; spl_t s; + /* sched not fully initialized, xnsched_cpu does not work yet */ + cpu = sched - nkpod_struct.sched; + this_task-flags |= PF_NOFREEZE; sigfillset(this_task-blocked); cpumask = cpumask_of_cpu(cpu); ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core -- Philippe. ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core
Re: [Xenomai-core] [PATCH] Fix gatekeeper affinity
Gilles Chanteperdrix wrote: Jan Kiszka wrote: As the xnsched structures get initialized later, during xnpod_init, xnsched_cpu always returned 0 in the gatekeeper_thread prologue. That caused binding of all gatekeepers to CPU 0. Signed-off-by: Jan Kiszka jan.kis...@siemens.com --- ksrc/nucleus/shadow.c |5 - 1 files changed, 4 insertions(+), 1 deletions(-) diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c index 1dedd85..2243c0e 100644 --- a/ksrc/nucleus/shadow.c +++ b/ksrc/nucleus/shadow.c @@ -823,11 +823,14 @@ static int gatekeeper_thread(void *data) struct task_struct *this_task = current; DECLARE_WAITQUEUE(wait, this_task); struct xnsched *sched = data; -int cpu = xnsched_cpu(sched); struct xnthread *target; cpumask_t cpumask; +int cpu; spl_t s; +/* sched not fully initialized, xnsched_cpu does not work yet */ +cpu = sched - nkpod_struct.sched; + this_task-flags |= PF_NOFREEZE; sigfillset(this_task-blocked); cpumask = cpumask_of_cpu(cpu); This does not look good, it means that the gatekeeper accesses the sched structure before it is initialized. Fortunately, no, this can't be. The gatekeeper only refers to the semaphore and the sync barrier and only that. Since the semaphore is initialized when the gk starts, the gk sets up the barrier on entry, and the barrier can't be signaled until the nucleus has fully initialized in xnpod_init(), we used to be safe. So, IMO, the proper fix would be to start the gatekeepers only after the sched structure has been initialized. -- Philippe. ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core
Re: [Xenomai-core] [PATCH] blackfin: build xenomai in FLAT format
yi li wrote: This patch fixes xenomai-2.4.x branch build scripts, to build xenomai as FLAT on Blackfin. 1. -mfdpic option is by default set by bfin-linux-uclibc-gcc. It is not required and it can cause error for bfin-uclinux-gcc. Ack. 2. libpthread_rt.la should not depend on lpthread. Nak. In flat mode, turning the link dependencies order upside down will not buy us anything. Two-phase link is the only way to prevent circular/invalid dependencies with a static only format when symbol wrapping is involved. -Yi diff -Nurp --exclude=.svn xenomai-2.4.x-clean/configure xenomai-2.4.x/configure --- xenomai-2.4.x-clean/configure 2008-12-04 19:21:40.0 +0800 +++ xenomai-2.4.x/configure 2009-02-23 17:12:14.0 +0800 @@ -22569,10 +22569,10 @@ case $XENO_TARGET_ARCH in ;; blackfin) # Produce libraries in FDPIC format. - XENO_USER_CFLAGS=$XENO_USER_CFLAGS -Wall -pipe -mfdpic - XENO_USER_LDFLAGS=$XENO_USER_CFLAGS -mfdpic - XENO_USER_APP_CFLAGS=$XENO_USER_APP_CFLAGS -mfdpic - XENO_USER_APP_LDFLAGS=$XENO_USER_APP_LDFLAGS -mfdpic + XENO_USER_CFLAGS=$XENO_USER_CFLAGS -Wall -pipe + XENO_USER_LDFLAGS=$XENO_USER_CFLAGS + XENO_USER_APP_CFLAGS=$XENO_USER_APP_CFLAGS + XENO_USER_APP_LDFLAGS=$XENO_USER_APP_LDFLAGS ;; arm) XENO_USER_CFLAGS=$XENO_USER_CFLAGS -Wall -pipe -march=armv$CONFIG_XENO_ARM_ARCH diff -Nurp --exclude=.svn xenomai-2.4.x-clean/configure.in xenomai-2.4.x/configure.in --- xenomai-2.4.x-clean/configure.in 2008-12-04 19:21:40.0 +0800 +++ xenomai-2.4.x/configure.in2009-02-23 17:12:14.0 +0800 @@ -648,10 +648,10 @@ case $XENO_TARGET_ARCH in ;; blackfin) # Produce libraries in FDPIC format. - XENO_USER_CFLAGS=$XENO_USER_CFLAGS -Wall -pipe -mfdpic - XENO_USER_LDFLAGS=$XENO_USER_CFLAGS -mfdpic - XENO_USER_APP_CFLAGS=$XENO_USER_APP_CFLAGS -mfdpic - XENO_USER_APP_LDFLAGS=$XENO_USER_APP_LDFLAGS -mfdpic + XENO_USER_CFLAGS=$XENO_USER_CFLAGS -Wall -pipe + XENO_USER_LDFLAGS=$XENO_USER_CFLAGS + XENO_USER_APP_CFLAGS=$XENO_USER_APP_CFLAGS + XENO_USER_APP_LDFLAGS=$XENO_USER_APP_LDFLAGS ;; arm) XENO_USER_CFLAGS=$XENO_USER_CFLAGS -Wall -pipe -march=armv$CONFIG_XENO_ARM_ARCH diff -Nurp --exclude=.svn xenomai-2.4.x-clean/src/skins/posix/Makefile.am xenomai-2.4.x/src/skins/posix/Makefile.am --- xenomai-2.4.x-clean/src/skins/posix/Makefile.am 2008-03-10 06:33:59.0 +0800 +++ xenomai-2.4.x/src/skins/posix/Makefile.am 2009-02-23 17:12:14.0 +0800 @@ -2,7 +2,7 @@ includedir = $(prefix)/include/posix lib_LTLIBRARIES = libpthread_rt.la -libpthread_rt_la_LDFLAGS = -version-info 1:0:0 -lpthread +libpthread_rt_la_LDFLAGS = -version-info 1:0:0 libpthread_rt_la_SOURCES = \ init.c \ diff -Nurp --exclude=.svn xenomai-2.4.x-clean/src/skins/posix/Makefile.in xenomai-2.4.x/src/skins/posix/Makefile.in --- xenomai-2.4.x-clean/src/skins/posix/Makefile.in 2008-06-02 05:32:28.0 +0800 +++ xenomai-2.4.x/src/skins/posix/Makefile.in 2009-02-23 17:12:14.0 +0800 @@ -239,7 +239,7 @@ target_vendor = @target_vendor@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ lib_LTLIBRARIES = libpthread_rt.la -libpthread_rt_la_LDFLAGS = -version-info 1:0:0 -lpthread +libpthread_rt_la_LDFLAGS = -version-info 1:0:0 libpthread_rt_la_SOURCES = \ init.c \ thread.c \ diff -Nurp --exclude=.svn xenomai-2.4.x-clean/src/testsuite/clocktest/Makefile.am xenomai-2.4.x/src/testsuite/clocktest/Makefile.am --- xenomai-2.4.x-clean/src/testsuite/clocktest/Makefile.am 2008-03-01 04:41:56.0 +0800 +++ xenomai-2.4.x/src/testsuite/clocktest/Makefile.am 2009-02-23 17:12:14.0 +0800 @@ -9,7 +9,7 @@ clocktest_CPPFLAGS = -I$(top_srcdir)/inc clocktest_LDFLAGS = $(XENO_POSIX_WRAPPERS) $(XENO_USER_LDFLAGS) clocktest_LDADD = \ - ../../skins/posix/libpthread_rt.la -lpthread -lrt + -lpthread -lrt ../../skins/posix/libpthread_rt.la install-data-local: $(mkinstalldirs) $(DESTDIR)$(testdir) diff -Nurp --exclude=.svn xenomai-2.4.x-clean/src/testsuite/clocktest/Makefile.in xenomai-2.4.x/src/testsuite/clocktest/Makefile.in --- xenomai-2.4.x-clean/src/testsuite/clocktest/Makefile.in 2008-06-02 05:32:28.0 +0800 +++ xenomai-2.4.x/src/testsuite/clocktest/Makefile.in 2009-02-23 17:12:14.0 +0800 @@ -232,7 +232,7 @@ clocktest_SOURCES = clocktest.c clocktest_CPPFLAGS = -I$(top_srcdir)/include/posix $(XENO_USER_CFLAGS) -I$(top_srcdir)/include clocktest_LDFLAGS = $(XENO_POSIX_WRAPPERS) $(XENO_USER_LDFLAGS) clocktest_LDADD = \ - ../../skins/posix/libpthread_rt.la -lpthread -lrt + -lpthread -lrt ../../skins/posix/libpthread_rt.la EXTRA_DIST = runinfo.in all: all-am diff -Nurp --exclude=.svn xenomai-2.4.x-clean/src/testsuite/cyclic/Makefile.am xenomai-2.4.x/src/testsuite/cyclic/Makefile.am ---
[Xenomai-core] [PATCH] Adjust stack size for x86 and tune default pool size
Given that x86-64 pushes quad words on the stack, we should better double its default size (just like PowerPC does). And as 64-bit archs uses larger task stacks, doubling their default stack pool size is a good idea, too. Signed-off-by: Jan Kiszka jan.kis...@siemens.com --- include/asm-x86/system_64.h |2 +- ksrc/nucleus/Kconfig| 10 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h index b02faa3..18310a4 100644 --- a/include/asm-x86/system_64.h +++ b/include/asm-x86/system_64.h @@ -28,7 +28,7 @@ #include linux/ptrace.h #include asm-generic/xenomai/system.h -#define XNARCH_THREAD_STACKSZ 4096 +#define XNARCH_THREAD_STACKSZ 8192 #define xnarch_stack_size(tcb) ((tcb)-stacksize) #define xnarch_stack_base(tcb) ((tcb)-stackbase) diff --git a/ksrc/nucleus/Kconfig b/ksrc/nucleus/Kconfig index eb262ce..1d6a566 100644 --- a/ksrc/nucleus/Kconfig +++ b/ksrc/nucleus/Kconfig @@ -186,16 +186,16 @@ config XENO_OPT_SYS_HEAPSZ config XENO_OPT_SYS_STACKPOOLSZ depends on XENO_GENERIC_STACKPOOL int Size of the private stack pool (Kb) - default 32 - range 0 128 + default 32 if !64BIT !BLACKFIN + default 64 if 64BIT || BLACKFIN help On this architecture, kernel-based Xenomai threads get the stack space they need from a private memory pool. If you don't start any kernel-based thread (i.e. no RTDM driver - thread, and no real-time task created from an application - embodied into a kernel module), you may leave a zero value for - this option. The size is expressed in Kilobytes. + thread, no real-time task created from an application embodied + into a kernel module, no switchtest driver), you may leave a + zero value for this option. The size is expressed in Kilobytes. if !XENO_GENERIC_STACKPOOL config XENO_OPT_SYS_STACKPOOLSZ ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core
[Xenomai-core] [PATCH] lttng: Update markers
Since LTTng for 2.6.27 (most recent release) trace_mark takes an additional argument: channel. Split up the existing marker names (channel_event) into this format (channel, event) and provide wrappers for older versions. Signed-off-by: Jan Kiszka jan.kis...@siemens.com --- include/asm-generic/wrappers.h | 25 + include/nucleus/pod.h |2 +- include/rtdm/rtdm_driver.h | 10 ksrc/nucleus/intr.c| 28 --- ksrc/nucleus/pod.c | 48 ksrc/nucleus/sched.c |6 +++-- ksrc/nucleus/shadow.c | 23 ++- ksrc/nucleus/synch.c | 14 ++-- ksrc/nucleus/timebase.c|8 +++ ksrc/nucleus/timer.c | 20 - ksrc/skins/rtdm/core.c | 26 -- ksrc/skins/rtdm/device.c | 10 ksrc/skins/rtdm/drvlib.c | 19 13 files changed, 125 insertions(+), 114 deletions(-) diff --git a/include/asm-generic/wrappers.h b/include/asm-generic/wrappers.h index 5fdcd24..285948b 100644 --- a/include/asm-generic/wrappers.h +++ b/include/asm-generic/wrappers.h @@ -454,17 +454,24 @@ unsigned long find_next_bit(const unsigned long *addr, #define IRQF_SHAREDSA_SHIRQ #endif /* 2.6.18 */ +#ifdef CONFIG_LTT + #if LINUX_VERSION_CODE KERNEL_VERSION(2,6,24) -/* For pre-2.6.24 kernel with LTTng add-on. */ -#ifdef CONFIG_MARKERS -#include linux/marker.h -#define trace_mark(ev, fmt, args...) MARK(ev, fmt , ##args) -#else /* !CONFIG_MARKERS */ -#define trace_mark(ev, fmt, args...) do { } while (0) -#endif /* !CONFIG_MARKERS */ -#else /* LINUX_VERSION_CODE = KERNEL_VERSION(2,6,24) */ +#define trace_mark(channel, ev, fmt, args...) \ + MARK(channel##_##ev, fmt , ##args) +#else /* = 2.6.24 */ #include linux/marker.h -#endif /* LINUX_VERSION_CODE = KERNEL_VERSION(2,6,24) */ +#if LINUX_VERSION_CODE KERNEL_VERSION(2,6,27) +#undef trace_mark +#define trace_mark(channel, ev, fmt, args...) \ + __trace_mark(0, channel##_##ev, NULL, fmt, ## args) +#endif /* 2.6.27 */ +#endif /* = 2.6.24 */ + +#else /* !CONFIG_LTT */ +#undef trace_mark +#define trace_mark(channel, ev, fmt, args...) do { } while (0) +#endif /* !CONFIG_LTT */ #if LINUX_VERSION_CODE KERNEL_VERSION(2,6,22) #define KMALLOC_MAX_SIZE 131072 diff --git a/include/nucleus/pod.h b/include/nucleus/pod.h index 1b231c8..cd9bd2a 100644 --- a/include/nucleus/pod.h +++ b/include/nucleus/pod.h @@ -280,7 +280,7 @@ static inline void xnpod_run_hooks(struct xnqueue *q, struct xnthread *thread, const char *type) { if (!emptyq_p(q) !xnthread_test_state(thread, XNROOT)) { - trace_mark(xn_nucleus_thread_callout, + trace_mark(xn_nucleus, thread_callout, thread %p thread_name %s hook %s, thread, xnthread_name(thread), type); xnpod_fire_callouts(q, thread); diff --git a/include/rtdm/rtdm_driver.h b/include/rtdm/rtdm_driver.h index 330f011..d03a12c 100644 --- a/include/rtdm/rtdm_driver.h +++ b/include/rtdm/rtdm_driver.h @@ -1075,13 +1075,13 @@ void __rtdm_synch_flush(xnsynch_t *synch, unsigned long reason); static inline void rtdm_event_pulse(rtdm_event_t *event) { - trace_mark(xn_rtdm_event_pulse, event %p, event); + trace_mark(xn_rtdm, event_pulse, event %p, event); __rtdm_synch_flush(event-synch_base, 0); } static inline void rtdm_event_destroy(rtdm_event_t *event) { - trace_mark(xn_rtdm_event_destroy, event %p, event); + trace_mark(xn_rtdm, event_destroy, event %p, event); __rtdm_synch_flush(event-synch_base, XNRMID); xnselect_destroy(event-select_block); } @@ -1110,7 +1110,7 @@ void rtdm_sem_up(rtdm_sem_t *sem); #ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */ static inline void rtdm_sem_destroy(rtdm_sem_t *sem) { - trace_mark(xn_rtdm_sem_destroy, sem %p, sem); + trace_mark(xn_rtdm, sem_destroy, sem %p, sem); __rtdm_synch_flush(sem-synch_base, XNRMID); xnselect_destroy(sem-select_block); } @@ -1132,7 +1132,7 @@ static inline void rtdm_mutex_unlock(rtdm_mutex_t *mutex) { XENO_ASSERT(RTDM, !xnpod_asynch_p(), return;); - trace_mark(xn_rtdm_mutex_unlock, mutex %p, mutex); + trace_mark(xn_rtdm, mutex_unlock, mutex %p, mutex); if (unlikely(xnsynch_release(mutex-synch_base) != NULL)) xnpod_schedule(); @@ -1140,7 +1140,7 @@ static inline void rtdm_mutex_unlock(rtdm_mutex_t *mutex) static inline void rtdm_mutex_destroy(rtdm_mutex_t *mutex) { - trace_mark(xn_rtdm_mutex_destroy, mutex %p, mutex); + trace_mark(xn_rtdm, mutex_destroy, mutex %p, mutex); __rtdm_synch_flush(mutex-synch_base, XNRMID); } diff --git a/ksrc/nucleus/intr.c b/ksrc/nucleus/intr.c index
Re: [Xenomai-core] [PATCH] blackfin: build xenomai in FLAT format
On Wed, Feb 25, 2009 at 12:30 AM, Philippe Gerum r...@xenomai.org wrote: 2. libpthread_rt.la should not depend on lpthread. Nak. In flat mode, turning the link dependencies order upside down will not buy us anything. Two-phase link is the only way to prevent circular/invalid dependencies with a static only format when symbol wrapping is involved. Agree. But libpthread_rt_la_LDFLAGS should not include -lpthread, otherwise libtool will think libpthread_rt.a depends on libpthread. I don't think there is such a dependency, isn't it? -Yi --- xenomai-2.4.x-clean/src/skins/posix/Makefile.in 2008-06-02 05:32:28.0 +0800 +++ xenomai-2.4.x/src/skins/posix/Makefile.in 2009-02-23 17:12:14.0 +0800 @@ -239,7 +239,7 @@ target_vendor = @target_vendor@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ lib_LTLIBRARIES = libpthread_rt.la -libpthread_rt_la_LDFLAGS = -version-info 1:0:0 -lpthread +libpthread_rt_la_LDFLAGS = -version-info 1:0:0 libpthread_rt_la_SOURCES = \ init.c \ thread.c \ ___ Xenomai-core mailing list Xenomai-core@gna.org https://mail.gna.org/listinfo/xenomai-core