[Xenomai-git] Philippe Gerum : nucleus/sched: move locking to resume_rpi/suspend_rpi

2010-10-04 Thread GIT version control
Module: xenomai-head
Branch: master
Commit: 1e8704ab315f352e5609723d94e889b5cca860fe
URL:
http://git.xenomai.org/?p=xenomai-head.git;a=commit;h=1e8704ab315f352e5609723d94e889b5cca860fe

Author: Philippe Gerum 
Date:   Fri Aug 27 07:51:21 2010 +0200

nucleus/sched: move locking to resume_rpi/suspend_rpi

Most scheduling classes do not implement RPI resume/suspend callbacks
upon thread state transition, so there is no need to grab the nklock
for running an empty stub for them.

This patch leaves resume_rpi/suspend_rpi callbacks deal with proper
locking internally, instead of grabbing the nucleus lock
unconditionally around those calls.

---

 ksrc/nucleus/sched-sporadic.c |   12 
 ksrc/nucleus/shadow.c |   19 +--
 2 files changed, 21 insertions(+), 10 deletions(-)

diff --git a/ksrc/nucleus/sched-sporadic.c b/ksrc/nucleus/sched-sporadic.c
index 0ea14e0..fccebd7 100644
--- a/ksrc/nucleus/sched-sporadic.c
+++ b/ksrc/nucleus/sched-sporadic.c
@@ -401,14 +401,26 @@ static struct xnthread *xnsched_sporadic_peek_rpi(struct 
xnsched *sched)
 
 static void xnsched_sporadic_suspend_rpi(struct xnthread *thread)
 {
+   spl_t s;
+
+   xnlock_get_irqsave(&nklock, s);
+
if (thread->pss)
sporadic_suspend_activity(thread);
+
+   xnlock_put_irqrestore(&nklock, s);
 }
 
 static void xnsched_sporadic_resume_rpi(struct xnthread *thread)
 {
+   spl_t s;
+
+   xnlock_get_irqsave(&nklock, s);
+
if (thread->pss)
sporadic_resume_activity(thread);
+
+   xnlock_put_irqrestore(&nklock, s);
 }
 
 #endif /* CONFIG_XENO_OPT_PRIOCPL */
diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c
index 52ee381..c15e6e1 100644
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -339,18 +339,16 @@ static void rpi_clear_remote(struct xnthread *thread)
 
 static void rpi_migrate(struct xnsched *sched, struct xnthread *thread)
 {
-   spl_t s;
-
rpi_clear_remote(thread);
rpi_push(sched, thread);
/*
 * The remote CPU already ran rpi_switch() for the leaving
 * thread, so there is no point in calling
-* xnsched_suspend_rpi() for the latter anew.
+* xnsched_suspend_rpi() for the latter anew.  Proper locking
+* is left to the resume_rpi() callback, so that we don't grab
+* the nklock uselessly for nop calls.
 */
-   xnlock_get_irqsave(&nklock, s);
xnsched_resume_rpi(thread);
-   xnlock_put_irqrestore(&nklock, s);
 }
 
 #else  /* !CONFIG_SMP */
@@ -392,10 +390,13 @@ static inline void rpi_switch(struct task_struct 
*next_task)
xnsched_pop_rpi(prev);
prev->rpi = NULL;
xnlock_put_irqrestore(&sched->rpilock, s);
-   /* Do NOT nest the rpilock and nklock locks. */
-   xnlock_get_irqsave(&nklock, s);
+   /*
+* Do NOT nest the rpilock and nklock locks.
+* Proper locking is left to the suspend_rpi()
+* callback, so that we don't grab the nklock
+* uselessly for nop calls.
+*/
xnsched_suspend_rpi(prev);
-   xnlock_put_irqrestore(&nklock, s);
} else
xnlock_put_irqrestore(&sched->rpilock, s);
}
@@ -449,9 +450,7 @@ static inline void rpi_switch(struct task_struct *next_task)
xnsched_push_rpi(sched, next);
next->rpi = sched;
xnlock_put_irqrestore(&sched->rpilock, s);
-   xnlock_get_irqsave(&nklock, s);
xnsched_resume_rpi(next);
-   xnlock_put_irqrestore(&nklock, s);
}
} else if (unlikely(next->rpi != sched))
/* We hold no lock here. */


___
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git


[Xenomai-git] Philippe Gerum : nucleus/sched: move locking to resume_rpi/suspend_rpi

2010-09-02 Thread GIT version control
Module: xenomai-head
Branch: master
Commit: 46281296696b1342f79045d880e32ae24571928a
URL:
http://git.xenomai.org/?p=xenomai-head.git;a=commit;h=46281296696b1342f79045d880e32ae24571928a

Author: Philippe Gerum 
Date:   Fri Aug 27 07:51:21 2010 +0200

nucleus/sched: move locking to resume_rpi/suspend_rpi

Most scheduling classes do not implement RPI resume/suspend callbacks
upon thread state transition, so there is no need to grab the nklock
for running an empty stub for them.

This patch leaves resume_rpi/suspend_rpi callbacks deal with proper
locking internally, instead of grabbing the nucleus lock
unconditionally around those calls.

---

 ksrc/nucleus/sched-sporadic.c |   12 
 ksrc/nucleus/shadow.c |   19 +--
 2 files changed, 21 insertions(+), 10 deletions(-)

diff --git a/ksrc/nucleus/sched-sporadic.c b/ksrc/nucleus/sched-sporadic.c
index fe12400..8a788b8 100644
--- a/ksrc/nucleus/sched-sporadic.c
+++ b/ksrc/nucleus/sched-sporadic.c
@@ -401,14 +401,26 @@ static struct xnthread *xnsched_sporadic_peek_rpi(struct 
xnsched *sched)
 
 static void xnsched_sporadic_suspend_rpi(struct xnthread *thread)
 {
+   spl_t s;
+
+   xnlock_get_irqsave(&nklock, s);
+
if (thread->pss)
sporadic_suspend_activity(thread);
+
+   xnlock_put_irqrestore(&nklock, s);
 }
 
 static void xnsched_sporadic_resume_rpi(struct xnthread *thread)
 {
+   spl_t s;
+
+   xnlock_get_irqsave(&nklock, s);
+
if (thread->pss)
sporadic_resume_activity(thread);
+
+   xnlock_put_irqrestore(&nklock, s);
 }
 
 #endif /* CONFIG_XENO_OPT_PRIOCPL */
diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c
index 3d14784..609151e 100644
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -347,18 +347,16 @@ static void rpi_clear_remote(struct xnthread *thread)
 
 static void rpi_migrate(struct xnsched *sched, struct xnthread *thread)
 {
-   spl_t s;
-
rpi_clear_remote(thread);
rpi_push(sched, thread);
/*
 * The remote CPU already ran rpi_switch() for the leaving
 * thread, so there is no point in calling
-* xnsched_suspend_rpi() for the latter anew.
+* xnsched_suspend_rpi() for the latter anew.  Proper locking
+* is left to the resume_rpi() callback, so that we don't grab
+* the nklock uselessly for nop calls.
 */
-   xnlock_get_irqsave(&nklock, s);
xnsched_resume_rpi(thread);
-   xnlock_put_irqrestore(&nklock, s);
 }
 
 #else  /* !CONFIG_SMP */
@@ -400,10 +398,13 @@ static inline void rpi_switch(struct task_struct 
*next_task)
xnsched_pop_rpi(prev);
prev->rpi = NULL;
xnlock_put_irqrestore(&sched->rpilock, s);
-   /* Do NOT nest the rpilock and nklock locks. */
-   xnlock_get_irqsave(&nklock, s);
+   /*
+* Do NOT nest the rpilock and nklock locks.
+* Proper locking is left to the suspend_rpi()
+* callback, so that we don't grab the nklock
+* uselessly for nop calls.
+*/
xnsched_suspend_rpi(prev);
-   xnlock_put_irqrestore(&nklock, s);
} else
xnlock_put_irqrestore(&sched->rpilock, s);
}
@@ -457,9 +458,7 @@ static inline void rpi_switch(struct task_struct *next_task)
xnsched_push_rpi(sched, next);
next->rpi = sched;
xnlock_put_irqrestore(&sched->rpilock, s);
-   xnlock_get_irqsave(&nklock, s);
xnsched_resume_rpi(next);
-   xnlock_put_irqrestore(&nklock, s);
}
} else if (unlikely(next->rpi != sched))
/* We hold no lock here. */


___
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git


[Xenomai-git] Philippe Gerum : nucleus/sched: move locking to resume_rpi/suspend_rpi

2010-08-31 Thread GIT version control
Module: xenomai-2.5
Branch: master
Commit: 1e8704ab315f352e5609723d94e889b5cca860fe
URL:
http://git.xenomai.org/?p=xenomai-2.5.git;a=commit;h=1e8704ab315f352e5609723d94e889b5cca860fe

Author: Philippe Gerum 
Date:   Fri Aug 27 07:51:21 2010 +0200

nucleus/sched: move locking to resume_rpi/suspend_rpi

Most scheduling classes do not implement RPI resume/suspend callbacks
upon thread state transition, so there is no need to grab the nklock
for running an empty stub for them.

This patch leaves resume_rpi/suspend_rpi callbacks deal with proper
locking internally, instead of grabbing the nucleus lock
unconditionally around those calls.

---

 ksrc/nucleus/sched-sporadic.c |   12 
 ksrc/nucleus/shadow.c |   19 +--
 2 files changed, 21 insertions(+), 10 deletions(-)

diff --git a/ksrc/nucleus/sched-sporadic.c b/ksrc/nucleus/sched-sporadic.c
index 0ea14e0..fccebd7 100644
--- a/ksrc/nucleus/sched-sporadic.c
+++ b/ksrc/nucleus/sched-sporadic.c
@@ -401,14 +401,26 @@ static struct xnthread *xnsched_sporadic_peek_rpi(struct 
xnsched *sched)
 
 static void xnsched_sporadic_suspend_rpi(struct xnthread *thread)
 {
+   spl_t s;
+
+   xnlock_get_irqsave(&nklock, s);
+
if (thread->pss)
sporadic_suspend_activity(thread);
+
+   xnlock_put_irqrestore(&nklock, s);
 }
 
 static void xnsched_sporadic_resume_rpi(struct xnthread *thread)
 {
+   spl_t s;
+
+   xnlock_get_irqsave(&nklock, s);
+
if (thread->pss)
sporadic_resume_activity(thread);
+
+   xnlock_put_irqrestore(&nklock, s);
 }
 
 #endif /* CONFIG_XENO_OPT_PRIOCPL */
diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c
index 52ee381..c15e6e1 100644
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -339,18 +339,16 @@ static void rpi_clear_remote(struct xnthread *thread)
 
 static void rpi_migrate(struct xnsched *sched, struct xnthread *thread)
 {
-   spl_t s;
-
rpi_clear_remote(thread);
rpi_push(sched, thread);
/*
 * The remote CPU already ran rpi_switch() for the leaving
 * thread, so there is no point in calling
-* xnsched_suspend_rpi() for the latter anew.
+* xnsched_suspend_rpi() for the latter anew.  Proper locking
+* is left to the resume_rpi() callback, so that we don't grab
+* the nklock uselessly for nop calls.
 */
-   xnlock_get_irqsave(&nklock, s);
xnsched_resume_rpi(thread);
-   xnlock_put_irqrestore(&nklock, s);
 }
 
 #else  /* !CONFIG_SMP */
@@ -392,10 +390,13 @@ static inline void rpi_switch(struct task_struct 
*next_task)
xnsched_pop_rpi(prev);
prev->rpi = NULL;
xnlock_put_irqrestore(&sched->rpilock, s);
-   /* Do NOT nest the rpilock and nklock locks. */
-   xnlock_get_irqsave(&nklock, s);
+   /*
+* Do NOT nest the rpilock and nklock locks.
+* Proper locking is left to the suspend_rpi()
+* callback, so that we don't grab the nklock
+* uselessly for nop calls.
+*/
xnsched_suspend_rpi(prev);
-   xnlock_put_irqrestore(&nklock, s);
} else
xnlock_put_irqrestore(&sched->rpilock, s);
}
@@ -449,9 +450,7 @@ static inline void rpi_switch(struct task_struct *next_task)
xnsched_push_rpi(sched, next);
next->rpi = sched;
xnlock_put_irqrestore(&sched->rpilock, s);
-   xnlock_get_irqsave(&nklock, s);
xnsched_resume_rpi(next);
-   xnlock_put_irqrestore(&nklock, s);
}
} else if (unlikely(next->rpi != sched))
/* We hold no lock here. */


___
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git


[Xenomai-git] Philippe Gerum : nucleus/sched: move locking to resume_rpi/suspend_rpi

2010-08-26 Thread GIT version control
Module: xenomai-rpm
Branch: for-upstream
Commit: 1e8704ab315f352e5609723d94e889b5cca860fe
URL:
http://git.xenomai.org/?p=xenomai-rpm.git;a=commit;h=1e8704ab315f352e5609723d94e889b5cca860fe

Author: Philippe Gerum 
Date:   Fri Aug 27 07:51:21 2010 +0200

nucleus/sched: move locking to resume_rpi/suspend_rpi

Most scheduling classes do not implement RPI resume/suspend callbacks
upon thread state transition, so there is no need to grab the nklock
for running an empty stub for them.

This patch leaves resume_rpi/suspend_rpi callbacks deal with proper
locking internally, instead of grabbing the nucleus lock
unconditionally around those calls.

---

 ksrc/nucleus/sched-sporadic.c |   12 
 ksrc/nucleus/shadow.c |   19 +--
 2 files changed, 21 insertions(+), 10 deletions(-)

diff --git a/ksrc/nucleus/sched-sporadic.c b/ksrc/nucleus/sched-sporadic.c
index 0ea14e0..fccebd7 100644
--- a/ksrc/nucleus/sched-sporadic.c
+++ b/ksrc/nucleus/sched-sporadic.c
@@ -401,14 +401,26 @@ static struct xnthread *xnsched_sporadic_peek_rpi(struct 
xnsched *sched)
 
 static void xnsched_sporadic_suspend_rpi(struct xnthread *thread)
 {
+   spl_t s;
+
+   xnlock_get_irqsave(&nklock, s);
+
if (thread->pss)
sporadic_suspend_activity(thread);
+
+   xnlock_put_irqrestore(&nklock, s);
 }
 
 static void xnsched_sporadic_resume_rpi(struct xnthread *thread)
 {
+   spl_t s;
+
+   xnlock_get_irqsave(&nklock, s);
+
if (thread->pss)
sporadic_resume_activity(thread);
+
+   xnlock_put_irqrestore(&nklock, s);
 }
 
 #endif /* CONFIG_XENO_OPT_PRIOCPL */
diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c
index 52ee381..c15e6e1 100644
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -339,18 +339,16 @@ static void rpi_clear_remote(struct xnthread *thread)
 
 static void rpi_migrate(struct xnsched *sched, struct xnthread *thread)
 {
-   spl_t s;
-
rpi_clear_remote(thread);
rpi_push(sched, thread);
/*
 * The remote CPU already ran rpi_switch() for the leaving
 * thread, so there is no point in calling
-* xnsched_suspend_rpi() for the latter anew.
+* xnsched_suspend_rpi() for the latter anew.  Proper locking
+* is left to the resume_rpi() callback, so that we don't grab
+* the nklock uselessly for nop calls.
 */
-   xnlock_get_irqsave(&nklock, s);
xnsched_resume_rpi(thread);
-   xnlock_put_irqrestore(&nklock, s);
 }
 
 #else  /* !CONFIG_SMP */
@@ -392,10 +390,13 @@ static inline void rpi_switch(struct task_struct 
*next_task)
xnsched_pop_rpi(prev);
prev->rpi = NULL;
xnlock_put_irqrestore(&sched->rpilock, s);
-   /* Do NOT nest the rpilock and nklock locks. */
-   xnlock_get_irqsave(&nklock, s);
+   /*
+* Do NOT nest the rpilock and nklock locks.
+* Proper locking is left to the suspend_rpi()
+* callback, so that we don't grab the nklock
+* uselessly for nop calls.
+*/
xnsched_suspend_rpi(prev);
-   xnlock_put_irqrestore(&nklock, s);
} else
xnlock_put_irqrestore(&sched->rpilock, s);
}
@@ -449,9 +450,7 @@ static inline void rpi_switch(struct task_struct *next_task)
xnsched_push_rpi(sched, next);
next->rpi = sched;
xnlock_put_irqrestore(&sched->rpilock, s);
-   xnlock_get_irqsave(&nklock, s);
xnsched_resume_rpi(next);
-   xnlock_put_irqrestore(&nklock, s);
}
} else if (unlikely(next->rpi != sched))
/* We hold no lock here. */


___
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git