[My favorite again... :->]

The initial rpi_push in xnshadow_start takes place for the caller's CPU,
instead of the thread's target CPU.

I haven't fully made up my mind about the practical impact of this bug,
I just came across it the hard way (rpi_push worked on uninitialized
data) while kicking CPUs out of the set that Xenomai shall support. At
least it should cause quite some RPI "confusion" for a while on both
involved CPUs.

Jan
---
 ChangeLog             |    5 ++++
 include/nucleus/pod.h |    8 +++----
 ksrc/nucleus/shadow.c |   56 ++++++++++++++++++++++++++------------------------
 3 files changed, 39 insertions(+), 30 deletions(-)

Index: b/include/nucleus/pod.h
===================================================================
--- a/include/nucleus/pod.h
+++ b/include/nucleus/pod.h
@@ -313,21 +313,21 @@ static inline void xnpod_finalize_zombie
 
 #define xnpod_idle_p()		xnpod_root_p()
 
-static inline void xnpod_renice_root(int prio)
+static inline void xnpod_renice_root(int cpu, int prio)
 {
 	xnthread_t *rootcb;
 	spl_t s;
 
 	xnlock_get_irqsave(&nklock, s);
-	rootcb = xnpod_current_root();
+	rootcb = &xnpod_sched_slot(cpu)->rootcb;
 	rootcb->cprio = prio;
 	xnpod_schedule_runnable(rootcb, XNPOD_SCHEDLIFO | XNPOD_NOSWITCH);
 	xnlock_put_irqrestore(&nklock, s);
 }
 
-static inline int xnpod_root_priority(void)
+static inline int xnpod_root_priority(int cpu)
 {
-	return xnthread_current_priority(xnpod_current_root());
+	return xnthread_current_priority(&xnpod_sched_slot(cpu)->rootcb);
 }
 
 int xnpod_init(void);
Index: b/ksrc/nucleus/shadow.c
===================================================================
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -198,15 +198,13 @@ static inline void rpi_none(xnthread_t *
 	xnarch_memory_barrier();
 }
 
-static void rpi_push(xnthread_t *thread)
+static void rpi_push(xnthread_t *thread, int cpu)
 {
-	struct xnrpi *rpislot;
+	struct xnrpi *rpislot = &gatekeeper[cpu].rpislot;
 	xnthread_t *top;
 	int prio;
 	spl_t s;
 
-	rpislot = &gatekeeper[rthal_processor_id()].rpislot;
-
 	/* non-RT shadows and RT shadows which disabled RPI cause the
 	   root priority to be lowered to its base level. The purpose
 	   of the following code is to enqueue the just thread
@@ -230,18 +228,17 @@ static void rpi_push(xnthread_t *thread)
 	} else
 		prio = XNCORE_IDLE_PRIO;
 
-	if (xnpod_root_priority() != prio)
-		xnpod_renice_root(prio);
+	if (xnpod_root_priority(cpu) != prio)
+		xnpod_renice_root(cpu, prio);
 }
 
 static void rpi_pop(xnthread_t *thread)
 {
-	struct xnrpi *rpislot;
+	int cpu = rthal_processor_id();
+	struct xnrpi *rpislot = &gatekeeper[cpu].rpislot;
 	int prio;
 	spl_t s;
 
-	rpislot = &gatekeeper[rthal_processor_id()].rpislot;
-
 	xnlock_get_irqsave(&rpislot->lock, s);
 
 	/* Make sure we don't try to unlink a shadow which is not
@@ -267,20 +264,22 @@ static void rpi_pop(xnthread_t *thread)
 
 	xnlock_put_irqrestore(&rpislot->lock, s);
 
-	if (xnpod_root_priority() != prio)
-		xnpod_renice_root(prio);
+	if (xnpod_root_priority(cpu) != prio)
+		xnpod_renice_root(cpu, prio);
 }
 
 static void rpi_update(xnthread_t *thread)
 {
-	struct xnrpi *rpislot;
+	int cpu = rthal_processor_id();
+	struct xnrpi *rpislot = &gatekeeper[cpu].rpislot;
 	spl_t s;
 
-	rpislot = &gatekeeper[rthal_processor_id()].rpislot;
 	xnlock_get_irqsave(&rpislot->lock, s);
+
 	sched_removepq(&rpislot->threadq, &thread->xlink);
 	rpi_none(thread);
-	rpi_push(thread);
+	rpi_push(thread, cpu);
+
 	xnlock_put_irqrestore(&rpislot->lock, s);
 }
 
@@ -358,7 +357,7 @@ static void rpi_clear_remote(xnthread_t 
 static void rpi_migrate(xnthread_t *thread)
 {
 	rpi_clear_remote(thread);
-	rpi_push(thread);
+	rpi_push(thread, rthal_processor_id());
 }
 
 #else  /* !CONFIG_SMP */
@@ -368,6 +367,7 @@ static void rpi_migrate(xnthread_t *thre
 
 static inline void rpi_switch(struct task_struct *next)
 {
+	int cpu = rthal_processor_id();
 	xnthread_t *threadin, *threadout;
 	struct xnrpi *rpislot;
 	int oldprio, newprio;
@@ -375,8 +375,8 @@ static inline void rpi_switch(struct tas
 
 	threadout = xnshadow_thread(current);
 	threadin = xnshadow_thread(next);
-	rpislot = &gatekeeper[rthal_processor_id()].rpislot;
-	oldprio = xnpod_root_priority();
+	rpislot = &gatekeeper[cpu].rpislot;
+	oldprio = xnpod_root_priority(cpu);
 
 	if (threadout &&
 	    current->state != TASK_RUNNING &&
@@ -456,7 +456,7 @@ boost_root:
 	if (newprio == oldprio)
 		return;
 
-	xnpod_renice_root(newprio);
+	xnpod_renice_root(cpu, newprio);
 
 	if (newprio < oldprio)
 		/* Subtle: by downgrading the root thread priority,
@@ -470,8 +470,9 @@ boost_root:
 
 static inline void rpi_clear_local(xnthread_t *thread)
 {
-	if (thread == NULL && xnpod_root_priority() != XNCORE_IDLE_PRIO)
-		xnpod_renice_root(XNCORE_IDLE_PRIO);
+	int cpu = rthal_processor_id();
+	if (thread == NULL && xnpod_root_priority(cpu) != XNCORE_IDLE_PRIO)
+		xnpod_renice_root(cpu, XNCORE_IDLE_PRIO);
 }
 
 #ifdef CONFIG_SMP
@@ -483,15 +484,16 @@ void xnshadow_rpi_check(void)
 	 * otherwise, we would have to mask them while testing the
 	 * queue for emptiness _and_ demoting the boost level.
 	 */
-	struct xnrpi *rpislot = &gatekeeper[rthal_processor_id()].rpislot;
+	int cpu = rthal_processor_id();
+	struct xnrpi *rpislot = &gatekeeper[cpu].rpislot;
 	int norpi;
  
  	xnlock_get(&rpislot->lock);
  	norpi = sched_emptypq_p(&rpislot->threadq);
  	xnlock_put(&rpislot->lock);
 
-	if (norpi && xnpod_root_priority() != XNCORE_IDLE_PRIO)
-		xnpod_renice_root(XNCORE_IDLE_PRIO);
+	if (norpi && xnpod_root_priority(cpu) != XNCORE_IDLE_PRIO)
+		xnpod_renice_root(cpu, XNCORE_IDLE_PRIO);
 }
 
 #endif	/* CONFIG_SMP */
@@ -502,7 +504,7 @@ void xnshadow_rpi_check(void)
 #define rpi_init_gk(gk)		do { } while(0)
 #define rpi_clear_local(t)	do { } while(0)
 #define rpi_clear_remote(t)	do { } while(0)
-#define rpi_push(t)		do { } while(0)
+#define rpi_push(t, cpu)	do { } while(0)
 #define rpi_pop(t)		do { } while(0)
 #define rpi_update(t)		do { } while(0)
 #define rpi_switch(n)		do { } while(0)
@@ -1209,7 +1211,7 @@ void xnshadow_relax(int notify)
 
 	schedule_linux_call(LO_WAKEUP_REQ, current, 0);
 
-	rpi_push(thread);
+	rpi_push(thread, rthal_processor_id());
 
 	clear_task_nowakeup(current);
 
@@ -1469,7 +1471,9 @@ void xnshadow_start(xnthread_t *thread)
 {
 	struct task_struct *p = xnthread_archtcb(thread)->user_task;
 
-	rpi_push(thread);	/* A shadow always starts in relaxed mode. */
+	/* A shadow always starts in relaxed mode. */
+	rpi_push(thread, xnsched_cpu(thread->sched));
+
 	trace_mark(xn_nucleus_shadow_start, "thread %p thread_name %s",
 		   thread, xnthread_name(thread));
 	xnpod_resume_thread(thread, XNDORMANT);
Index: b/ChangeLog
===================================================================
--- a/ChangeLog
+++ b/ChangeLog
@@ -2,6 +2,11 @@
 
 	* include/asm-generic/system.h (xnlock_put): Add proper barrier.
 
+	* ksrc/nucleus/shadow.c, include/nucleus/pod.h: Let rpi_push,
+	xnpod_renice_root, and xnpod_root_priority work on specified CPU
+	instead of the current one. Issue rpi_push for the new thread's
+	target CPU in xnshadow_start.
+
 2008-02-15  Gilles Chanteperdrix  <[EMAIL PROTECTED]>
 
 	* src/skins/posix/thread.c (__wrap_pthread_create): Follow more

Attachment: signature.asc
Description: OpenPGP digital signature

_______________________________________________
Xenomai-core mailing list
Xenomai-core@gna.org
https://mail.gna.org/listinfo/xenomai-core

Reply via email to