[Xenomai-git] Philippe Gerum : nucleus/shadow: wake up the gatekeeper asynchronously

2011-05-01 Thread GIT version control
Module: xenomai-head
Branch: master
Commit: 1147846e16e36f4b51e4bbe04de9f4dc8d4beeb7
URL:
http://git.xenomai.org/?p=xenomai-head.git;a=commit;h=1147846e16e36f4b51e4bbe04de9f4dc8d4beeb7

Author: Philippe Gerum 
Date:   Fri Apr  8 18:32:12 2011 +0200

nucleus/shadow: wake up the gatekeeper asynchronously

This patch moves the wake up call for the gatekeeper over an APC we
kick in do_schedule_event() on the way out for the hardening task,
instead of invoking wake_up_interruptible_sync() directly.

This change is required to run hybrid I-pipe + PREEMPT_RT kernels,
because we may not hold the preemption lock across calls to
wake_up_*() services over native preemption (since tasks might sleep
when contending for spinlocks there). However, we must hold the
preemption lock until the hardening task schedules out, so that such
task always resumes in primary mode from the schedule() call within
xnshadow_harden(), and never from any other preemption point.

NOTE: this indirection does NOT invalidate the reason to rely on
TASK_ATOMICSWITCH.

---

 ksrc/nucleus/shadow.c |   33 +
 1 files changed, 21 insertions(+), 12 deletions(-)

diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c
index 6af0659..626c499 100644
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -86,6 +86,7 @@ static struct __lostagerq {
 #define LO_SIGGRP_REQ 2
 #define LO_SIGTHR_REQ 3
 #define LO_UNMAP_REQ  4
+#define LO_GKWAKE_REQ 5
int type;
struct task_struct *task;
int arg;
@@ -825,9 +826,13 @@ static void xnshadow_dereference_skin(unsigned magic)
 
 static void lostage_handler(void *cookie)
 {
-   struct __lostagerq *rq = &lostagerq[smp_processor_id()];
-   int reqnum, type, arg, sig, sigarg;
+   int cpu, reqnum, type, arg, sig, sigarg;
+   struct __lostagerq *rq;
struct task_struct *p;
+   struct xnsched *sched;
+
+   cpu = smp_processor_id();
+   rq = &lostagerq[cpu];
 
while ((reqnum = rq->out) != rq->in) {
type = rq->req[reqnum].type;
@@ -882,6 +887,11 @@ static void lostage_handler(void *cookie)
case LO_SIGGRP_REQ:
kill_proc(p->pid, arg, 1);
break;
+
+   case LO_GKWAKE_REQ:
+   sched = xnpod_sched_slot(cpu);
+   wake_up_interruptible_sync(&sched->gkwaitq);
+   break;
}
}
 }
@@ -960,8 +970,7 @@ static int gatekeeper_thread(void *data)
 * process the pending request, just ignore the
 * latter.
 */
-   if ((xnthread_user_task(target)->state & ~TASK_ATOMICSWITCH)
-   == TASK_INTERRUPTIBLE) {
+   if ((xnthread_user_task(target)->state & ~TASK_ATOMICSWITCH) == 
TASK_INTERRUPTIBLE) {
rpi_pop(target);
xnlock_get_irqsave(&nklock, s);
 #ifdef CONFIG_SMP
@@ -1049,10 +1058,7 @@ redo:
 * task. For this to happen, we set up the migration data,
 * prepare to suspend the current task, wake up the gatekeeper
 * which will perform the actual transition, then schedule
-* out. Most of this sequence must be atomic, and we get this
-* guarantee by disabling preemption and using the
-* TASK_ATOMICSWITCH cumulative state provided by Adeos to
-* Linux tasks.
+* out.
 */
 
trace_mark(xn_nucleus, shadow_gohard,
@@ -1062,8 +1068,7 @@ redo:
sched->gktarget = thread;
xnthread_set_info(thread, XNATOMIC);
set_current_state(TASK_INTERRUPTIBLE | TASK_ATOMICSWITCH);
-   wake_up_interruptible_sync(&sched->gkwaitq);
-   schedule(); /* Will preempt_enable() thanks to TASK_ATOMICSWITCH */
+   schedule();
xnthread_clear_info(thread, XNATOMIC);
 
/*
@@ -1427,7 +1432,7 @@ int xnshadow_map(xnthread_t *thread, xncompletion_t 
__user *u_completion,
 */
xnthread_set_info(thread, XNPRIOSET);
 
-   xnarch_trace_pid(xnarch_user_pid(xnthread_archtcb(thread)),
+   xnarch_trace_pid(xnthread_user_pid(thread),
 xnthread_current_priority(thread));
 
return ret;
@@ -2624,12 +2629,16 @@ RTHAL_DECLARE_EXIT_EVENT(taskexit_event);
 static inline void do_schedule_event(struct task_struct *next_task)
 {
struct task_struct *prev_task;
-   struct xnthread *next;
+   struct xnthread *prev, *next;
 
if (!xnpod_active_p())
return;
 
prev_task = current;
+   prev = xnshadow_thread(prev_task);
+   if (prev && xnthread_test_info(prev, XNATOMIC))
+   schedule_linux_call(LO_GKWAKE_REQ, prev_task, 0);
+
next = xnshadow_thread(next_task);
set_switch_lock_owner(prev_task);
 


___
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/x

[Xenomai-git] Philippe Gerum : nucleus/shadow: wake up the gatekeeper asynchronously

2011-05-01 Thread GIT version control
Module: xenomai-2.5
Branch: master
Commit: 223685ce40a3b25d593ba512ae6f06d84efe58ba
URL:
http://git.xenomai.org/?p=xenomai-2.5.git;a=commit;h=223685ce40a3b25d593ba512ae6f06d84efe58ba

Author: Philippe Gerum 
Date:   Fri Apr  8 18:32:12 2011 +0200

nucleus/shadow: wake up the gatekeeper asynchronously

This patch moves the wake up call for the gatekeeper over an APC we
kick in do_schedule_event() on the way out for the hardening task,
instead of invoking wake_up_interruptible_sync() directly.

This change is required to run hybrid I-pipe + PREEMPT_RT kernels,
because we may not hold the preemption lock across calls to
wake_up_*() services over native preemption (since tasks might sleep
when contending for spinlocks there). However, we must hold the
preemption lock until the hardening task schedules out, so that such
task always resumes in primary mode from the schedule() call within
xnshadow_harden(), and never from any other preemption point.

NOTE: this indirection does NOT invalidate the reason to rely on
TASK_ATOMICSWITCH.

---

 ksrc/nucleus/shadow.c |   33 +
 1 files changed, 21 insertions(+), 12 deletions(-)

diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c
index 0fea962..925f1c7 100644
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -80,6 +80,7 @@ static struct __lostagerq {
 #define LO_SIGGRP_REQ 2
 #define LO_SIGTHR_REQ 3
 #define LO_UNMAP_REQ  4
+#define LO_GKWAKE_REQ 5
int type;
struct task_struct *task;
int arg;
@@ -817,9 +818,13 @@ static void xnshadow_dereference_skin(unsigned magic)
 
 static void lostage_handler(void *cookie)
 {
-   struct __lostagerq *rq = &lostagerq[smp_processor_id()];
-   int reqnum, type, arg, sig, sigarg;
+   int cpu, reqnum, type, arg, sig, sigarg;
+   struct __lostagerq *rq;
struct task_struct *p;
+   struct xnsched *sched;
+
+   cpu = smp_processor_id();
+   rq = &lostagerq[cpu];
 
while ((reqnum = rq->out) != rq->in) {
type = rq->req[reqnum].type;
@@ -874,6 +879,11 @@ static void lostage_handler(void *cookie)
case LO_SIGGRP_REQ:
kill_proc(p->pid, arg, 1);
break;
+
+   case LO_GKWAKE_REQ:
+   sched = xnpod_sched_slot(cpu);
+   wake_up_interruptible_sync(&sched->gkwaitq);
+   break;
}
}
 }
@@ -952,8 +962,7 @@ static int gatekeeper_thread(void *data)
 * process the pending request, just ignore the
 * latter.
 */
-   if ((xnthread_user_task(target)->state & ~TASK_ATOMICSWITCH)
-   == TASK_INTERRUPTIBLE) {
+   if ((xnthread_user_task(target)->state & ~TASK_ATOMICSWITCH) == 
TASK_INTERRUPTIBLE) {
rpi_pop(target);
xnlock_get_irqsave(&nklock, s);
 #ifdef CONFIG_SMP
@@ -1041,10 +1050,7 @@ redo:
 * task. For this to happen, we set up the migration data,
 * prepare to suspend the current task, wake up the gatekeeper
 * which will perform the actual transition, then schedule
-* out. Most of this sequence must be atomic, and we get this
-* guarantee by disabling preemption and using the
-* TASK_ATOMICSWITCH cumulative state provided by Adeos to
-* Linux tasks.
+* out.
 */
 
trace_mark(xn_nucleus, shadow_gohard,
@@ -1054,8 +1060,7 @@ redo:
sched->gktarget = thread;
xnthread_set_info(thread, XNATOMIC);
set_current_state(TASK_INTERRUPTIBLE | TASK_ATOMICSWITCH);
-   wake_up_interruptible_sync(&sched->gkwaitq);
-   schedule(); /* Will preempt_enable() thanks to TASK_ATOMICSWITCH */
+   schedule();
xnthread_clear_info(thread, XNATOMIC);
 
/*
@@ -1414,7 +1419,7 @@ int xnshadow_map(xnthread_t *thread, xncompletion_t 
__user *u_completion,
 */
xnthread_set_info(thread, XNPRIOSET);
 
-   xnarch_trace_pid(xnarch_user_pid(xnthread_archtcb(thread)),
+   xnarch_trace_pid(xnthread_user_pid(thread),
 xnthread_current_priority(thread));
 
return ret;
@@ -2593,12 +2598,16 @@ RTHAL_DECLARE_EXIT_EVENT(taskexit_event);
 static inline void do_schedule_event(struct task_struct *next_task)
 {
struct task_struct *prev_task;
-   struct xnthread *next;
+   struct xnthread *prev, *next;
 
if (!xnpod_active_p())
return;
 
prev_task = current;
+   prev = xnshadow_thread(prev_task);
+   if (prev && xnthread_test_info(prev, XNATOMIC))
+   schedule_linux_call(LO_GKWAKE_REQ, prev_task, 0);
+
next = xnshadow_thread(next_task);
set_switch_lock_owner(prev_task);
 


___
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xen

[Xenomai-git] Philippe Gerum : nucleus/shadow: wake up the gatekeeper asynchronously

2011-04-11 Thread GIT version control
Module: xenomai-rpm
Branch: for-upstream
Commit: 223685ce40a3b25d593ba512ae6f06d84efe58ba
URL:
http://git.xenomai.org/?p=xenomai-rpm.git;a=commit;h=223685ce40a3b25d593ba512ae6f06d84efe58ba

Author: Philippe Gerum 
Date:   Fri Apr  8 18:32:12 2011 +0200

nucleus/shadow: wake up the gatekeeper asynchronously

This patch moves the wake up call for the gatekeeper over an APC we
kick in do_schedule_event() on the way out for the hardening task,
instead of invoking wake_up_interruptible_sync() directly.

This change is required to run hybrid I-pipe + PREEMPT_RT kernels,
because we may not hold the preemption lock across calls to
wake_up_*() services over native preemption (since tasks might sleep
when contending for spinlocks there). However, we must hold the
preemption lock until the hardening task schedules out, so that such
task always resumes in primary mode from the schedule() call within
xnshadow_harden(), and never from any other preemption point.

NOTE: this indirection does NOT invalidate the reason to rely on
TASK_ATOMICSWITCH.

---

 ksrc/nucleus/shadow.c |   33 +
 1 files changed, 21 insertions(+), 12 deletions(-)

diff --git a/ksrc/nucleus/shadow.c b/ksrc/nucleus/shadow.c
index 0fea962..925f1c7 100644
--- a/ksrc/nucleus/shadow.c
+++ b/ksrc/nucleus/shadow.c
@@ -80,6 +80,7 @@ static struct __lostagerq {
 #define LO_SIGGRP_REQ 2
 #define LO_SIGTHR_REQ 3
 #define LO_UNMAP_REQ  4
+#define LO_GKWAKE_REQ 5
int type;
struct task_struct *task;
int arg;
@@ -817,9 +818,13 @@ static void xnshadow_dereference_skin(unsigned magic)
 
 static void lostage_handler(void *cookie)
 {
-   struct __lostagerq *rq = &lostagerq[smp_processor_id()];
-   int reqnum, type, arg, sig, sigarg;
+   int cpu, reqnum, type, arg, sig, sigarg;
+   struct __lostagerq *rq;
struct task_struct *p;
+   struct xnsched *sched;
+
+   cpu = smp_processor_id();
+   rq = &lostagerq[cpu];
 
while ((reqnum = rq->out) != rq->in) {
type = rq->req[reqnum].type;
@@ -874,6 +879,11 @@ static void lostage_handler(void *cookie)
case LO_SIGGRP_REQ:
kill_proc(p->pid, arg, 1);
break;
+
+   case LO_GKWAKE_REQ:
+   sched = xnpod_sched_slot(cpu);
+   wake_up_interruptible_sync(&sched->gkwaitq);
+   break;
}
}
 }
@@ -952,8 +962,7 @@ static int gatekeeper_thread(void *data)
 * process the pending request, just ignore the
 * latter.
 */
-   if ((xnthread_user_task(target)->state & ~TASK_ATOMICSWITCH)
-   == TASK_INTERRUPTIBLE) {
+   if ((xnthread_user_task(target)->state & ~TASK_ATOMICSWITCH) == 
TASK_INTERRUPTIBLE) {
rpi_pop(target);
xnlock_get_irqsave(&nklock, s);
 #ifdef CONFIG_SMP
@@ -1041,10 +1050,7 @@ redo:
 * task. For this to happen, we set up the migration data,
 * prepare to suspend the current task, wake up the gatekeeper
 * which will perform the actual transition, then schedule
-* out. Most of this sequence must be atomic, and we get this
-* guarantee by disabling preemption and using the
-* TASK_ATOMICSWITCH cumulative state provided by Adeos to
-* Linux tasks.
+* out.
 */
 
trace_mark(xn_nucleus, shadow_gohard,
@@ -1054,8 +1060,7 @@ redo:
sched->gktarget = thread;
xnthread_set_info(thread, XNATOMIC);
set_current_state(TASK_INTERRUPTIBLE | TASK_ATOMICSWITCH);
-   wake_up_interruptible_sync(&sched->gkwaitq);
-   schedule(); /* Will preempt_enable() thanks to TASK_ATOMICSWITCH */
+   schedule();
xnthread_clear_info(thread, XNATOMIC);
 
/*
@@ -1414,7 +1419,7 @@ int xnshadow_map(xnthread_t *thread, xncompletion_t 
__user *u_completion,
 */
xnthread_set_info(thread, XNPRIOSET);
 
-   xnarch_trace_pid(xnarch_user_pid(xnthread_archtcb(thread)),
+   xnarch_trace_pid(xnthread_user_pid(thread),
 xnthread_current_priority(thread));
 
return ret;
@@ -2593,12 +2598,16 @@ RTHAL_DECLARE_EXIT_EVENT(taskexit_event);
 static inline void do_schedule_event(struct task_struct *next_task)
 {
struct task_struct *prev_task;
-   struct xnthread *next;
+   struct xnthread *prev, *next;
 
if (!xnpod_active_p())
return;
 
prev_task = current;
+   prev = xnshadow_thread(prev_task);
+   if (prev && xnthread_test_info(prev, XNATOMIC))
+   schedule_linux_call(LO_GKWAKE_REQ, prev_task, 0);
+
next = xnshadow_thread(next_task);
set_switch_lock_owner(prev_task);
 


___
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listin