Merge architecture specifics from arch/ppc and arch/ppc64 to arch/powerpc.

-- Heikki Lindholm

diff -Nru --exclude=.svn xenomai-orig/arch/powerpc/defconfig 
xenomai-devel/arch/powerpc/defconfig
--- xenomai-orig/arch/powerpc/defconfig 1970-01-01 02:00:00.000000000 +0200
+++ xenomai-devel/arch/powerpc/defconfig        2005-10-11 10:32:16.000000000 
+0300
@@ -0,0 +1,90 @@
+#
+# Automatically generated make config: don't edit
+#
+CONFIG_MODULES=y
+CONFIG_XENO_VERSION="2.0"
+
+#
+# General
+#
+CONFIG_XENO_INSTALLDIR="/usr/realtime"
+CONFIG_XENO_LINUXDIR="/lib/modules/`uname -r`/build"
+
+#
+# Documentation
+#
+# CONFIG_XENO_DOC_DOX is not set
+# CONFIG_XENO_DOC_LATEX_NONSTOP is not set
+# CONFIG_XENO_DOC_DBX is not set
+CONFIG_XENO_OPT_EXPERT=y
+# CONFIG_XENO_OPT_KSYMS is not set
+# CONFIG_XENO_OPT_USYMS is not set
+# CONFIG_XENO_MAINT is not set
+
+#
+# Nucleus
+#
+CONFIG_XENO_OPT_PERVASIVE=y
+CONFIG_XENO_OPT_PIPE=y
+CONFIG_XENO_OPT_PIPE_NRDEV="32"
+CONFIG_XENO_OPT_SYS_HEAPSZ="128"
+CONFIG_XENO_OPT_STATS=y
+# CONFIG_XENO_OPT_DEBUG is not set
+# CONFIG_XENO_OPT_WATCHDOG is not set
+
+#
+# Scalability
+#
+# CONFIG_XENO_OPT_SCALABLE_SCHED is not set
+
+#
+# LTT tracepoints filtering
+#
+# CONFIG_XENO_OPT_FILTER_EVIRQ is not set
+# CONFIG_XENO_OPT_FILTER_EVTHR is not set
+# CONFIG_XENO_OPT_FILTER_EVSYS is not set
+# CONFIG_XENO_OPT_FILTER_EVALL is not set
+
+#
+# Machine (powerpc)
+#
+CONFIG_XENO_HW_FPU=y
+CONFIG_XENO_HW_PERIODIC_TIMER=y
+CONFIG_XENO_HW_TIMER_LATENCY="0"
+CONFIG_XENO_HW_SCHED_LATENCY="0"
+
+#
+# APIs
+#
+CONFIG_XENO_SKIN_NATIVE=y
+CONFIG_XENO_OPT_NATIVE_REGISTRY=y
+CONFIG_XENO_OPT_NATIVE_REGISTRY_NRSLOTS="512"
+CONFIG_XENO_OPT_NATIVE_PIPE=y
+CONFIG_XENO_OPT_NATIVE_PIPE_BUFSZ="4096"
+CONFIG_XENO_OPT_NATIVE_SEM=y
+CONFIG_XENO_OPT_NATIVE_EVENT=y
+CONFIG_XENO_OPT_NATIVE_MUTEX=y
+CONFIG_XENO_OPT_NATIVE_COND=y
+CONFIG_XENO_OPT_NATIVE_QUEUE=y
+CONFIG_XENO_OPT_NATIVE_HEAP=y
+CONFIG_XENO_OPT_NATIVE_ALARM=y
+CONFIG_XENO_OPT_NATIVE_MPS=y
+CONFIG_XENO_OPT_NATIVE_INTR=y
+CONFIG_XENO_SKIN_POSIX=y
+# CONFIG_XENO_SKIN_PSOS is not set
+# CONFIG_XENO_SKIN_UITRON is not set
+# CONFIG_XENO_SKIN_VRTX is not set
+# CONFIG_XENO_SKIN_VXWORKS is not set
+CONFIG_XENO_SKIN_RTDM=y
+# CONFIG_XENO_SKIN_RTAI is not set
+CONFIG_XENO_OPT_UVM=y
+
+#
+# Drivers
+#
+# CONFIG_XENO_DRIVERS_16550A is not set
+
+#
+# Simulator
+#
+# CONFIG_XENO_MVM is not set
diff -Nru --exclude=.svn xenomai-orig/arch/powerpc/GNUmakefile.am 
xenomai-devel/arch/powerpc/GNUmakefile.am
--- xenomai-orig/arch/powerpc/GNUmakefile.am    1970-01-01 02:00:00.000000000 
+0200
+++ xenomai-devel/arch/powerpc/GNUmakefile.am   2005-10-11 10:32:16.000000000 
+0300
@@ -0,0 +1,3 @@
+SUBDIRS = hal
+
+EXTRA_DIST = Kconfig defconfig patches
diff -Nru --exclude=.svn xenomai-orig/arch/powerpc/hal/fpu.S 
xenomai-devel/arch/powerpc/hal/fpu.S
--- xenomai-orig/arch/powerpc/hal/fpu.S 1970-01-01 02:00:00.000000000 +0200
+++ xenomai-devel/arch/powerpc/hal/fpu.S        2005-10-11 10:32:15.000000000 
+0300
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <asm/processor.h>
+#include <asm/cputable.h>
+#include <asm/ppc_asm.h>
+#include <xeno_config.h> 
+
+#define RTHAL_FPSAVE(n, base)  stfd n,8*(n)(base)
+#define RTHAL_FPSAVE2(n, base) RTHAL_FPSAVE(n, base); RTHAL_FPSAVE(n+1, base)
+#define RTHAL_FPSAVE4(n, base) RTHAL_FPSAVE2(n, base); RTHAL_FPSAVE2(n+2, base)
+#define RTHAL_FPSAVE8(n, base) RTHAL_FPSAVE4(n, base); RTHAL_FPSAVE4(n+4, base)
+#define RTHAL_FPSAVE16(n, base)        RTHAL_FPSAVE8(n, base); 
RTHAL_FPSAVE8(n+8, base)
+#define RTHAL_FPSAVE32(n, base)        RTHAL_FPSAVE16(n, base); 
RTHAL_FPSAVE16(n+16, base)
+
+/* r3 = &tcb->fpuenv */
+_GLOBAL(rthal_save_fpu)
+       mfmsr   r5
+       ori     r5,r5,MSR_FP            /* Re-enable use of FPU. */
+#ifdef CONFIG_PPC64BRIDGE
+       clrldi  r5,r5,1                 /* Turn off 64-bit mode. */
+#endif /* CONFIG_PPC64BRIDGE */
+       SYNC
+       MTMSRD(r5)                      /* Enable use of fpu. */
+       isync
+       RTHAL_FPSAVE32(0,r3)
+       mffs    fr0
+       stfd    fr0,8*32(r3)
+       blr
+
+#define RTHAL_FPLOAD(n, base)  lfd n,8*(n)(base)
+#define RTHAL_FPLOAD2(n, base) RTHAL_FPLOAD(n, base); RTHAL_FPLOAD(n+1, base)
+#define RTHAL_FPLOAD4(n, base) RTHAL_FPLOAD2(n, base); RTHAL_FPLOAD2(n+2, base)
+#define RTHAL_FPLOAD8(n, base) RTHAL_FPLOAD4(n, base); RTHAL_FPLOAD4(n+4, base)
+#define RTHAL_FPLOAD16(n, base)        RTHAL_FPLOAD8(n, base); 
RTHAL_FPLOAD8(n+8, base)
+#define RTHAL_FPLOAD32(n, base)        RTHAL_FPLOAD16(n, base); 
RTHAL_FPLOAD16(n+16, base)
+
+/* r3 = &tcb->fpuenv */
+_GLOBAL(rthal_init_fpu)
+       mfmsr   r5
+       ori     r5,r5,MSR_FP|MSR_FE1    /* RT kernel threads always operate in 
*/
+       li      r4,MSR_FE0              /* imprecise non-recoverable exception 
mode. */
+       andc    r5,r5,r4
+       SYNC
+       MTMSRD(r5)
+
+       /* Fallback wanted. */
+       
+/* r3 = &tcb->fpuenv */
+_GLOBAL(rthal_restore_fpu)
+       mfmsr   r5
+       ori     r5,r5,MSR_FP            /* Re-enable use of FPU. */
+#ifdef CONFIG_PPC64BRIDGE
+       clrldi  r5,r5,1                 /* Turn off 64-bit mode. */
+#endif /* CONFIG_PPC64BRIDGE */
+       SYNC
+       MTMSRD(r5)                      /* Enable use of fpu. */
+       isync
+       lfd     fr0,8*32(r3)
+       mtfsf   0xff,0
+       RTHAL_FPLOAD32(0,r3)
+       blr
diff -Nru --exclude=.svn xenomai-orig/arch/powerpc/hal/fpu_64.S 
xenomai-devel/arch/powerpc/hal/fpu_64.S
--- xenomai-orig/arch/powerpc/hal/fpu_64.S      1970-01-01 02:00:00.000000000 
+0200
+++ xenomai-devel/arch/powerpc/hal/fpu_64.S     2005-10-19 16:49:41.000000000 
+0300
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum.
+ *
+ * 64-bit PowerPC adoption
+ *   copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+#include <xeno_config.h> 
+
+#define RTHAL_FPSAVE(n, base)  stfd n,8*(n)(base)
+#define RTHAL_FPSAVE2(n, base) RTHAL_FPSAVE(n, base); RTHAL_FPSAVE(n+1, base)
+#define RTHAL_FPSAVE4(n, base) RTHAL_FPSAVE2(n, base); RTHAL_FPSAVE2(n+2, base)
+#define RTHAL_FPSAVE8(n, base) RTHAL_FPSAVE4(n, base); RTHAL_FPSAVE4(n+4, base)
+#define RTHAL_FPSAVE16(n, base)        RTHAL_FPSAVE8(n, base); 
RTHAL_FPSAVE8(n+8, base)
+#define RTHAL_FPSAVE32(n, base)        RTHAL_FPSAVE16(n, base); 
RTHAL_FPSAVE16(n+16, base)
+
+/* r3 = &tcb->fpuenv */
+_GLOBAL(rthal_save_fpu)
+       mfmsr   r5
+       ori     r5,r5,MSR_FP            /* Re-enable use of FPU. */
+       mtmsrd  r5                      /* Enable use of fpu. */
+       isync
+       RTHAL_FPSAVE32(0,r3)
+       mffs    fr0
+       stfd    fr0,8*32(r3)
+       blr
+
+#define RTHAL_FPLOAD(n, base)  lfd n,8*(n)(base)
+#define RTHAL_FPLOAD2(n, base) RTHAL_FPLOAD(n, base); RTHAL_FPLOAD(n+1, base)
+#define RTHAL_FPLOAD4(n, base) RTHAL_FPLOAD2(n, base); RTHAL_FPLOAD2(n+2, base)
+#define RTHAL_FPLOAD8(n, base) RTHAL_FPLOAD4(n, base); RTHAL_FPLOAD4(n+4, base)
+#define RTHAL_FPLOAD16(n, base)        RTHAL_FPLOAD8(n, base); 
RTHAL_FPLOAD8(n+8, base)
+#define RTHAL_FPLOAD32(n, base)        RTHAL_FPLOAD16(n, base); 
RTHAL_FPLOAD16(n+16, base)
+
+/* r3 = &tcb->fpuenv */
+_GLOBAL(rthal_init_fpu)
+       mfmsr   r5
+       ori     r5,r5,MSR_FP|MSR_FE1    /* RT kernel threads always operate in 
*/
+       li      r4,MSR_FE0              /* imprecise non-recoverable exception 
mode. */
+       andc    r5,r5,r4
+       mtmsrd  r5
+
+       /* Fallback wanted. */
+       
+/* r3 = &tcb->fpuenv */
+_GLOBAL(rthal_restore_fpu)
+       mfmsr   r5
+       ori     r5,r5,MSR_FP            /* Re-enable use of FPU. */
+       mtmsrd  r5                      /* Enable use of fpu. */
+       isync
+       lfd     fr0,8*32(r3)
+       mtfsf   0xff,0
+       RTHAL_FPLOAD32(0,r3)
+       blr
diff -Nru --exclude=.svn xenomai-orig/arch/powerpc/hal/GNUmakefile.am 
xenomai-devel/arch/powerpc/hal/GNUmakefile.am
--- xenomai-orig/arch/powerpc/hal/GNUmakefile.am        1970-01-01 
02:00:00.000000000 +0200
+++ xenomai-devel/arch/powerpc/hal/GNUmakefile.am       2005-10-23 
11:44:49.000000000 +0300
@@ -0,0 +1,41 @@
+moduledir = $(DESTDIR)@XENO_MODULE_DIR@
+
+modext = @XENO_MODULE_EXT@
+
+CROSS_COMPILE = @CROSS_COMPILE@
+
+libhal_SRC = powerpc.c
+
+if CONFIG_PPC64
+libhal_SRC += switch_64.S
+if CONFIG_XENO_HW_FPU
+libhal_SRC += fpu_64.S
+endif
+else
+libhal_SRC += switch.S
+if CONFIG_XENO_HW_FPU
+libhal_SRC += fpu.S
+endif
+endif
+distfiles = switch.S switch_64.S fpu.S fpu_64.S
+
+xeno_hal.ko: @XENO_KBUILD_ENV@
+xeno_hal.ko: $(libhal_SRC) generic.c FORCE
+       @XENO_KBUILD_CMD@ xeno_extradef="@XENO_KMOD_CFLAGS@"
+
+clean-local:
+       @XENO_KBUILD_CLEAN@
+
+all-local: xeno_hal$(modext)
+if CONFIG_XENO_OLD_FASHIONED_BUILD
+       $(mkinstalldirs) $(top_srcdir)/modules
+       $(INSTALL_DATA) $^ $(top_srcdir)/modules
+endif
+
+install-exec-local: xeno_hal$(modext)
+       $(mkinstalldirs) $(moduledir)
+       $(INSTALL_DATA) $< $(moduledir)
+
+.PHONY: FORCE
+
+EXTRA_DIST = $(libhal_SRC) $(distfiles) Makefile
diff -Nru --exclude=.svn xenomai-orig/arch/powerpc/hal/Makefile 
xenomai-devel/arch/powerpc/hal/Makefile
--- xenomai-orig/arch/powerpc/hal/Makefile      1970-01-01 02:00:00.000000000 
+0200
+++ xenomai-devel/arch/powerpc/hal/Makefile     2005-10-23 12:16:56.000000000 
+0300
@@ -0,0 +1,13 @@
+EXTRA_CFLAGS += -I$(xeno_srctree)/include \
+               -I$(src)/../../../include \
+               -I$(src)/../../.. \
+               $(xeno_extradef)
+
+EXTRA_AFLAGS += -I$(xeno_srctree)/include \
+               -I$(src)/../../../include \
+               -I$(src)/../../.. \
+               $(xeno_extradef)
+
+obj-m += xeno_hal.o
+
+xeno_hal-objs := $(xeno_objs)
diff -Nru --exclude=.svn xenomai-orig/arch/powerpc/hal/powerpc.c 
xenomai-devel/arch/powerpc/hal/powerpc.c
--- xenomai-orig/arch/powerpc/hal/powerpc.c     1970-01-01 02:00:00.000000000 
+0200
+++ xenomai-devel/arch/powerpc/hal/powerpc.c    2005-10-23 16:03:10.000000000 
+0300
@@ -0,0 +1,190 @@
+/**
+ *   @ingroup hal
+ *   @file
+ *
+ *   Adeos-based Real-Time Abstraction Layer for PowerPC.
+ *
+ *   64-bit PowerPC adoption
+ *     copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ *
+ *   Xenomai is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License as
+ *   published by the Free Software Foundation, Inc., 675 Mass Ave,
+ *   Cambridge MA 02139, USA; either version 2 of the License, or (at
+ *   your option) any later version.
+ *
+ *   Xenomai is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ *   02111-1307, USA.
+ */
+
+/**
+ * @addtogroup hal
+ *
+ * PowerPC-specific HAL services.
+ *
+ [EMAIL PROTECTED]/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/console.h>
+#include <linux/kallsyms.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/hw_irq.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <nucleus/asm/hal.h>
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#endif /* CONFIG_PROC_FS */
+#include <stdarg.h>
+
+static int rthal_periodic_p;
+
+int rthal_timer_request (void (*handler)(void),
+                        unsigned long nstick)
+{
+    unsigned long flags;
+    int err;
+
+    flags = rthal_critical_enter(NULL);
+
+    if (nstick > 0)
+       {
+       /* Periodic setup --
+          Use the built-in Adeos service directly. */
+       err = rthal_set_timer(nstick);
+       rthal_periodic_p = 1;
+       }
+    else
+       {
+       /* Oneshot setup. */
+       disarm_decr[rthal_processor_id()] = 1;
+       rthal_periodic_p = 0;
+#ifdef CONFIG_40x
+        mtspr(SPRN_TCR,mfspr(SPRN_TCR) & ~TCR_ARE); /* Auto-reload off. */
+#endif /* CONFIG_40x */
+       rthal_timer_program_shot(tb_ticks_per_jiffy);
+       }
+
+    rthal_irq_release(RTHAL_TIMER_IRQ);
+
+    err = rthal_irq_request(RTHAL_TIMER_IRQ,
+                           (rthal_irq_handler_t)handler,
+                           NULL,
+                           NULL);
+
+    rthal_critical_exit(flags);
+
+    return err;
+}
+
+void rthal_timer_release (void)
+
+{
+    unsigned long flags;
+
+    flags = rthal_critical_enter(NULL);
+
+    if (rthal_periodic_p)
+       rthal_reset_timer();
+    else
+       {
+       disarm_decr[rthal_processor_id()] = 0;
+#ifdef CONFIG_40x
+       mtspr(SPRN_TCR,mfspr(SPRN_TCR)|TCR_ARE); /* Auto-reload on. */
+       mtspr(SPRN_PIT,tb_ticks_per_jiffy);
+#else /* !CONFIG_40x */
+       set_dec(tb_ticks_per_jiffy);
+#endif /* CONFIG_40x */
+       }
+
+    rthal_irq_release(RTHAL_TIMER_IRQ);
+
+    rthal_critical_exit(flags);
+}
+
+unsigned long rthal_timer_calibrate (void)
+
+{
+    return 1000000000 / RTHAL_CPU_FREQ;
+}
+
+static inline int do_exception_event (unsigned event, unsigned domid, void 
*data)
+
+{
+    rthal_declare_cpuid;
+
+    rthal_load_cpuid();
+
+    if (domid == RTHAL_DOMAIN_ID)
+       {
+       rthal_realtime_faults[cpuid][event]++;
+
+       if (rthal_trap_handler != NULL &&
+           test_bit(cpuid,&rthal_cpu_realtime) &&
+           rthal_trap_handler(event,domid,data) != 0)
+           return RTHAL_EVENT_STOP;
+       }
+
+    return RTHAL_EVENT_PROPAGATE;
+}
+
+RTHAL_DECLARE_EVENT(exception_event);
+
+static inline void do_rthal_domain_entry (void)
+
+{
+    unsigned trapnr;
+
+    /* Trap all faults. */
+    for (trapnr = 0; trapnr < RTHAL_NR_FAULTS; trapnr++)
+       rthal_catch_exception(trapnr,&exception_event);
+
+    printk(KERN_INFO "Xenomai: hal/powerpc loaded.\n");
+}
+
+RTHAL_DECLARE_DOMAIN(rthal_domain_entry);
+
+int rthal_arch_init (void)
+
+{
+    if (rthal_cpufreq_arg == 0)
+       /* The CPU frequency is expressed as the timebase frequency
+          for this port. */
+       rthal_cpufreq_arg = (unsigned long)rthal_get_cpufreq();
+
+    if (rthal_timerfreq_arg == 0)
+       rthal_timerfreq_arg = rthal_tunables.cpu_freq;
+
+    return 0;
+}
+
+void rthal_arch_cleanup (void)
+
+{
+    /* Nothing to cleanup so far. */
+}
+
+/[EMAIL PROTECTED]/
+
+EXPORT_SYMBOL(rthal_switch_context);
+
+#ifdef CONFIG_XENO_HW_FPU
+EXPORT_SYMBOL(rthal_init_fpu);
+EXPORT_SYMBOL(rthal_save_fpu);
+EXPORT_SYMBOL(rthal_restore_fpu);
+#endif /* CONFIG_XENO_HW_FPU */
diff -Nru --exclude=.svn xenomai-orig/arch/powerpc/hal/switch.S 
xenomai-devel/arch/powerpc/hal/switch.S
--- xenomai-orig/arch/powerpc/hal/switch.S      1970-01-01 02:00:00.000000000 
+0200
+++ xenomai-devel/arch/powerpc/hal/switch.S     2005-10-17 11:02:57.000000000 
+0300
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2004 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <asm/processor.h>
+#include <asm/cputable.h>
+#include <asm/page.h>
+#include <asm/ppc_asm.h>
+#include <xeno_config.h> 
+
+#define RTHAL_SAVEREG(reg, pos)        stw reg,STACK_FRAME_OVERHEAD+4*(pos)(r1)
+#define RTHAL_LOADREG(reg, pos)        lwz reg,STACK_FRAME_OVERHEAD+4*(pos)(r1)
+
+/*
+ * r3=out_kspp, r4=in_kspp
+ */
+_GLOBAL(rthal_switch_context)
+
+        stwu    r1,-124(r1)
+
+        /* Save general purpose registers. */
+
+       RTHAL_SAVEREG(r31,0)
+       RTHAL_SAVEREG(r30,1)
+       RTHAL_SAVEREG(r29,2)
+       RTHAL_SAVEREG(r28,3)
+       RTHAL_SAVEREG(r27,4)
+       RTHAL_SAVEREG(r26,5)
+       RTHAL_SAVEREG(r25,6)
+       RTHAL_SAVEREG(r24,7)
+       RTHAL_SAVEREG(r23,8)
+       RTHAL_SAVEREG(r22,9)
+       RTHAL_SAVEREG(r21,10)
+       RTHAL_SAVEREG(r20,11)
+       RTHAL_SAVEREG(r19,12)
+       RTHAL_SAVEREG(r18,13)
+       RTHAL_SAVEREG(r17,14)
+       RTHAL_SAVEREG(r16,15)
+       RTHAL_SAVEREG(r15,16)
+       RTHAL_SAVEREG(r14,17)
+       RTHAL_SAVEREG(r13,18)
+       RTHAL_SAVEREG(r3,19)
+       RTHAL_SAVEREG(r2,20)
+       RTHAL_SAVEREG(r0,21)
+
+        /* Save special registers. */
+       
+       mfctr    r2
+       RTHAL_SAVEREG(r2,22)
+        mfcr     r2
+       RTHAL_SAVEREG(r2,23)
+        mfxer    r2
+       RTHAL_SAVEREG(r2,24)
+        mflr     r2
+       RTHAL_SAVEREG(r2,25)
+        mfmsr    r2
+       RTHAL_SAVEREG(r2,26)
+
+        /* Switch stacks. */
+       
+        stw      r1,0(r3)       /* *out_kspp = sp */
+        lwz      r1,0(r4)       /* sp = *in_kspp */
+
+        /* Restore special registers. */
+
+       RTHAL_LOADREG(r2,26)
+        mtmsr    r2
+       RTHAL_LOADREG(r2,25)
+        mtlr     r2
+       RTHAL_LOADREG(r2,24)
+        mtxer    r2
+       RTHAL_LOADREG(r2,23)
+        mtcr     r2
+       RTHAL_LOADREG(r2,22)
+        mtctr    r2
+
+       /* Restore general purpose registers. */
+       
+       RTHAL_LOADREG(r0,21)
+       RTHAL_LOADREG(r2,20)
+       RTHAL_LOADREG(r3,19)
+       RTHAL_LOADREG(r13,18)
+       RTHAL_LOADREG(r14,17)
+       RTHAL_LOADREG(r15,16)
+       RTHAL_LOADREG(r16,15)
+       RTHAL_LOADREG(r17,14)
+       RTHAL_LOADREG(r18,13)
+       RTHAL_LOADREG(r19,12)
+       RTHAL_LOADREG(r20,11)
+       RTHAL_LOADREG(r21,10)
+       RTHAL_LOADREG(r22,9)
+       RTHAL_LOADREG(r23,8)
+       RTHAL_LOADREG(r24,7)
+       RTHAL_LOADREG(r25,6)
+       RTHAL_LOADREG(r26,5)
+       RTHAL_LOADREG(r27,4)
+       RTHAL_LOADREG(r28,3)
+       RTHAL_LOADREG(r29,2)
+       RTHAL_LOADREG(r30,1)
+       RTHAL_LOADREG(r31,0)
+
+        addi    r1,r1,124
+
+        blr
diff -Nru --exclude=.svn xenomai-orig/arch/powerpc/hal/switch_64.S 
xenomai-devel/arch/powerpc/hal/switch_64.S
--- xenomai-orig/arch/powerpc/hal/switch_64.S   1970-01-01 02:00:00.000000000 
+0200
+++ xenomai-devel/arch/powerpc/hal/switch_64.S  2005-10-19 16:49:33.000000000 
+0300
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2004 Philippe Gerum.
+ *
+ * 64-bit PowerPC adoption
+ *   copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <asm/processor.h>
+#include <asm/cputable.h>
+#include <asm/page.h>
+#include <asm/offsets.h>
+#include <asm/ppc_asm.h>
+#include <xeno_config.h> 
+
+#define RTHAL_SAVEREG(reg, pos)        std reg,STACK_FRAME_OVERHEAD+8*(pos)(r1)
+#define RTHAL_LOADREG(reg, pos)        ld reg,STACK_FRAME_OVERHEAD+8*(pos)(r1)
+
+/*
+ * r3=out_kspp, r4=in_kspp
+ */
+_GLOBAL(rthal_switch_context)
+        stdu    r1,-224-STACK_FRAME_OVERHEAD(r1)
+
+        /* Save general purpose registers. */
+
+       RTHAL_SAVEREG(r31,0)
+       RTHAL_SAVEREG(r30,1)
+       RTHAL_SAVEREG(r29,2)
+       RTHAL_SAVEREG(r28,3)
+       RTHAL_SAVEREG(r27,4)
+       RTHAL_SAVEREG(r26,5)
+       RTHAL_SAVEREG(r25,6)
+       RTHAL_SAVEREG(r24,7)
+       RTHAL_SAVEREG(r23,8)
+       RTHAL_SAVEREG(r22,9)
+       RTHAL_SAVEREG(r21,10)
+       RTHAL_SAVEREG(r20,11)
+       RTHAL_SAVEREG(r19,12)
+       RTHAL_SAVEREG(r18,13)
+       RTHAL_SAVEREG(r17,14)
+       RTHAL_SAVEREG(r16,15)
+       RTHAL_SAVEREG(r15,16)
+       RTHAL_SAVEREG(r14,17)
+       RTHAL_SAVEREG(r13,18)
+       RTHAL_SAVEREG(r3,19)
+       RTHAL_SAVEREG(r2,20)
+       RTHAL_SAVEREG(r0,21)
+
+        /* Save special registers. */
+       
+       mfctr    r2
+       RTHAL_SAVEREG(r2,22)
+        mfcr     r2
+       RTHAL_SAVEREG(r2,23)
+        mfxer    r2
+       RTHAL_SAVEREG(r2,24)
+        mflr     r2
+       RTHAL_SAVEREG(r2,25)
+        mfmsr    r2
+       RTHAL_SAVEREG(r2,26)
+
+        /* Switch stacks. */
+       
+        std      r1,0(r3)       /* *out_kspp = sp */
+       /* TODO: VSIDs */
+        ld      r1,0(r4)       /* sp = *in_kspp */
+
+        /* Restore special registers. */
+
+       RTHAL_LOADREG(r2,26)
+        mtmsrd   r2
+       RTHAL_LOADREG(r2,25)
+        mtlr     r2
+       RTHAL_LOADREG(r2,24)
+        mtxer    r2
+       RTHAL_LOADREG(r2,23)
+        mtcr     r2
+       RTHAL_LOADREG(r2,22)
+        mtctr    r2
+
+       /* Restore general purpose registers. */
+       
+       RTHAL_LOADREG(r0,21)
+       RTHAL_LOADREG(r2,20)
+       RTHAL_LOADREG(r3,19)
+       RTHAL_LOADREG(r13,18)
+       RTHAL_LOADREG(r14,17)
+       RTHAL_LOADREG(r15,16)
+       RTHAL_LOADREG(r16,15)
+       RTHAL_LOADREG(r17,14)
+       RTHAL_LOADREG(r18,13)
+       RTHAL_LOADREG(r19,12)
+       RTHAL_LOADREG(r20,11)
+       RTHAL_LOADREG(r21,10)
+       RTHAL_LOADREG(r22,9)
+       RTHAL_LOADREG(r23,8)
+       RTHAL_LOADREG(r24,7)
+       RTHAL_LOADREG(r25,6)
+       RTHAL_LOADREG(r26,5)
+       RTHAL_LOADREG(r27,4)
+       RTHAL_LOADREG(r28,3)
+       RTHAL_LOADREG(r29,2)
+       RTHAL_LOADREG(r30,1)
+       RTHAL_LOADREG(r31,0)
+
+        addi    r1,r1,224+STACK_FRAME_OVERHEAD
+
+        blr
diff -Nru --exclude=.svn xenomai-orig/arch/powerpc/Kconfig 
xenomai-devel/arch/powerpc/Kconfig
--- xenomai-orig/arch/powerpc/Kconfig   1970-01-01 02:00:00.000000000 +0200
+++ xenomai-devel/arch/powerpc/Kconfig  2005-10-11 10:32:16.000000000 +0300
@@ -0,0 +1,71 @@
+mainmenu "Xenomai/powerpc configuration"
+
+source Kconfig
+
+source "nucleus/Kconfig"
+
+menu "Machine (powerpc)"
+
+config XENO_HW_FPU
+       bool "Enable FPU support"
+       default y
+       help
+       The FPU executes instructions from the processor's normal
+       instruction stream. It can handle the types of high-precision
+       floating-point processing operations commonly found in
+       scientific, engineering, and business applications.
+       If your target system has no FPU, say NO here; otherwise,
+       enabling FPU support when the hardware is available may
+       greatly improve performance.
+
+config XENO_HW_PERIODIC_TIMER
+       bool "Enable periodic timer support"
+       default y
+       help
+       On this architecture, the nucleus provides both aperiodic and
+       periodic timing modes. In aperiodic mode, timing accuracy is
+       higher - since it is not rounded to a constant time slice - at
+       the expense of a lesser efficicency when many timers are
+       simultaneously active. The aperiodic mode gives better results
+       in configuration involving a few threads requesting timing
+       services over different time scales that cannot be easily
+       expressed as multiples of a single base tick, or would lead to
+       a waste of high frequency periodic ticks. You can disable
+       the periodic support for this architecture to save a few
+       hundreds bytes if you plan to use the system timer in
+       aperiodic mode only.
+
+config XENO_HW_TIMER_LATENCY
+       depends on XENO_OPT_EXPERT
+       string "Timer tuning latency (ns)"
+       default 0
+       help
+       This parameter accounts for the time (in nanoseconds) needed
+       to program the underlying time source in one-shot timing mode.
+       This value will be used to reduce the scheduling jitter induced
+       by the time needed to setup the timer for its next shot. A
+       default value of 0 (recommended) will cause this value to be
+       estimated by the nucleus at startup.
+
+config XENO_HW_SCHED_LATENCY
+       depends on XENO_OPT_EXPERT
+       string "Scheduling latency (ns)"
+       default 0
+       help
+       Scheduling latency is the time between the termination of an
+       interrupt handler and the execution of the first instruction
+       of the real-time thread this handler resumes. A
+       default value of 0 (recommended) will cause this value to be
+       estimated by the nucleus at startup.
+
+endmenu
+
+source "skins/Kconfig"
+
+menu "Drivers"
+
+source "drivers/Kconfig"
+
+endmenu
+
+source "sim/Kconfig"
diff -Nru --exclude=.svn xenomai-orig/arch/powerpc/patches/README 
xenomai-devel/arch/powerpc/patches/README
--- xenomai-orig/arch/powerpc/patches/README    1970-01-01 02:00:00.000000000 
+0200
+++ xenomai-devel/arch/powerpc/patches/README   2005-10-23 16:07:14.000000000 
+0300
@@ -0,0 +1,20 @@
+-- arch/powerpc/patches
+
+Xenomai needs special kernel support to deliver fast and deterministic
+response time to external interrupts, and also to provide real-time
+services highly integrated with the standard Linux kernel.
+
+This support is provided by the Adeos real-time enabler [1], in the
+form of a kernel patch you have to apply to a vanilla kernel tree,
+before you attempt to compile the Xenomai codebase against the latter
+kernel.
+
+On the PowerPC architecture, Xenomai can run over the former and the 
+new generation of Adeos patches, namely adeos-linux-* and adeos-ipipe-*,
+that one can find in this directory. However, the 64-bit PowerPC
+architecture does not yet support the new generation Adeos variant. Just
+apply one of those patches to the corresponding kernel release. You may
+want to have a look at the README.*INSTALL guides at the top of the
+Xenomai tree for more information.
+
+[1] http://www.gna.org/projects/adeos/
diff -Nru --exclude=.svn xenomai-orig/arch/ppc/defconfig 
xenomai-devel/arch/ppc/defconfig
--- xenomai-orig/arch/ppc/defconfig     2005-10-11 10:32:16.000000000 +0300
+++ xenomai-devel/arch/ppc/defconfig    1970-01-01 02:00:00.000000000 +0200
@@ -1,90 +0,0 @@
-#
-# Automatically generated make config: don't edit
-#
-CONFIG_MODULES=y
-CONFIG_XENO_VERSION="2.0"
-
-#
-# General
-#
-CONFIG_XENO_INSTALLDIR="/usr/realtime"
-CONFIG_XENO_LINUXDIR="/lib/modules/`uname -r`/build"
-
-#
-# Documentation
-#
-# CONFIG_XENO_DOC_DOX is not set
-# CONFIG_XENO_DOC_LATEX_NONSTOP is not set
-# CONFIG_XENO_DOC_DBX is not set
-CONFIG_XENO_OPT_EXPERT=y
-# CONFIG_XENO_OPT_KSYMS is not set
-# CONFIG_XENO_OPT_USYMS is not set
-# CONFIG_XENO_MAINT is not set
-
-#
-# Nucleus
-#
-CONFIG_XENO_OPT_PERVASIVE=y
-CONFIG_XENO_OPT_PIPE=y
-CONFIG_XENO_OPT_PIPE_NRDEV="32"
-CONFIG_XENO_OPT_SYS_HEAPSZ="128"
-CONFIG_XENO_OPT_STATS=y
-# CONFIG_XENO_OPT_DEBUG is not set
-# CONFIG_XENO_OPT_WATCHDOG is not set
-
-#
-# Scalability
-#
-# CONFIG_XENO_OPT_SCALABLE_SCHED is not set
-
-#
-# LTT tracepoints filtering
-#
-# CONFIG_XENO_OPT_FILTER_EVIRQ is not set
-# CONFIG_XENO_OPT_FILTER_EVTHR is not set
-# CONFIG_XENO_OPT_FILTER_EVSYS is not set
-# CONFIG_XENO_OPT_FILTER_EVALL is not set
-
-#
-# Machine (powerpc)
-#
-CONFIG_XENO_HW_FPU=y
-CONFIG_XENO_HW_PERIODIC_TIMER=y
-CONFIG_XENO_HW_TIMER_LATENCY="0"
-CONFIG_XENO_HW_SCHED_LATENCY="0"
-
-#
-# APIs
-#
-CONFIG_XENO_SKIN_NATIVE=y
-CONFIG_XENO_OPT_NATIVE_REGISTRY=y
-CONFIG_XENO_OPT_NATIVE_REGISTRY_NRSLOTS="512"
-CONFIG_XENO_OPT_NATIVE_PIPE=y
-CONFIG_XENO_OPT_NATIVE_PIPE_BUFSZ="4096"
-CONFIG_XENO_OPT_NATIVE_SEM=y
-CONFIG_XENO_OPT_NATIVE_EVENT=y
-CONFIG_XENO_OPT_NATIVE_MUTEX=y
-CONFIG_XENO_OPT_NATIVE_COND=y
-CONFIG_XENO_OPT_NATIVE_QUEUE=y
-CONFIG_XENO_OPT_NATIVE_HEAP=y
-CONFIG_XENO_OPT_NATIVE_ALARM=y
-CONFIG_XENO_OPT_NATIVE_MPS=y
-CONFIG_XENO_OPT_NATIVE_INTR=y
-CONFIG_XENO_SKIN_POSIX=y
-# CONFIG_XENO_SKIN_PSOS is not set
-# CONFIG_XENO_SKIN_UITRON is not set
-# CONFIG_XENO_SKIN_VRTX is not set
-# CONFIG_XENO_SKIN_VXWORKS is not set
-CONFIG_XENO_SKIN_RTDM=y
-# CONFIG_XENO_SKIN_RTAI is not set
-CONFIG_XENO_OPT_UVM=y
-
-#
-# Drivers
-#
-# CONFIG_XENO_DRIVERS_16550A is not set
-
-#
-# Simulator
-#
-# CONFIG_XENO_MVM is not set
diff -Nru --exclude=.svn xenomai-orig/arch/ppc/GNUmakefile.am 
xenomai-devel/arch/ppc/GNUmakefile.am
--- xenomai-orig/arch/ppc/GNUmakefile.am        2005-10-11 10:32:16.000000000 
+0300
+++ xenomai-devel/arch/ppc/GNUmakefile.am       1970-01-01 02:00:00.000000000 
+0200
@@ -1,3 +0,0 @@
-SUBDIRS = hal
-
-EXTRA_DIST = Kconfig defconfig patches
diff -Nru --exclude=.svn xenomai-orig/arch/ppc/GNUmakefile.in 
xenomai-devel/arch/ppc/GNUmakefile.in
--- xenomai-orig/arch/ppc/GNUmakefile.in        2005-10-23 11:00:14.000000000 
+0300
+++ xenomai-devel/arch/ppc/GNUmakefile.in       1970-01-01 02:00:00.000000000 
+0200
@@ -1,614 +0,0 @@
-# GNUmakefile.in generated by automake 1.9.5 from GNUmakefile.am.
-# @configure_input@
-
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005  Free Software Foundation, Inc.
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
[EMAIL PROTECTED]@
-srcdir = @srcdir@
-top_srcdir = @top_srcdir@
-VPATH = @srcdir@
-pkgdatadir = $(datadir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-top_builddir = ../..
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-INSTALL = @INSTALL@
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-target_triplet = @target@
-subdir = arch/ppc
-DIST_COMMON = $(srcdir)/GNUmakefile.am $(srcdir)/GNUmakefile.in
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps =  \
-       $(top_srcdir)/config/autoconf/ac_prog_cc_for_build.m4 \
-       $(top_srcdir)/config/autoconf/docbook.m4 \
-       $(top_srcdir)/config/version $(top_srcdir)/configure.in
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
-       $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/include/xeno_config.h
-CONFIG_CLEAN_FILES =
-SOURCES =
-DIST_SOURCES =
-RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
-       html-recursive info-recursive install-data-recursive \
-       install-exec-recursive install-info-recursive \
-       install-recursive installcheck-recursive installdirs-recursive \
-       pdf-recursive ps-recursive uninstall-info-recursive \
-       uninstall-recursive
-ETAGS = etags
-CTAGS = ctags
-DIST_SUBDIRS = $(SUBDIRS)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMDEP_FALSE = @AMDEP_FALSE@
-AMDEP_TRUE = @AMDEP_TRUE@
-AMTAR = @AMTAR@
-AR = @AR@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-BUILD_EXEEXT = @BUILD_EXEEXT@
-BUILD_OBJEXT = @BUILD_OBJEXT@
-CC = @CC@
-CCAS = @CCAS@
-CCASFLAGS = @CCASFLAGS@
-CCDEPMODE = @CCDEPMODE@
-CC_FOR_BUILD = @CC_FOR_BUILD@
-CFLAGS = @CFLAGS@
-CFLAGS_FOR_BUILD = @CFLAGS_FOR_BUILD@
-CONFIG_IA64_FALSE = @CONFIG_IA64_FALSE@
-CONFIG_IA64_TRUE = @CONFIG_IA64_TRUE@
-CONFIG_LTT_FALSE = @CONFIG_LTT_FALSE@
-CONFIG_LTT_TRUE = @CONFIG_LTT_TRUE@
-CONFIG_PPC64_FALSE = @CONFIG_PPC64_FALSE@
-CONFIG_PPC64_TRUE = @CONFIG_PPC64_TRUE@
-CONFIG_PPC_FALSE = @CONFIG_PPC_FALSE@
-CONFIG_PPC_TRUE = @CONFIG_PPC_TRUE@
-CONFIG_SMP_FALSE = @CONFIG_SMP_FALSE@
-CONFIG_SMP_TRUE = @CONFIG_SMP_TRUE@
-CONFIG_X86_FALSE = @CONFIG_X86_FALSE@
-CONFIG_X86_LOCAL_APIC_FALSE = @CONFIG_X86_LOCAL_APIC_FALSE@
-CONFIG_X86_LOCAL_APIC_TRUE = @CONFIG_X86_LOCAL_APIC_TRUE@
-CONFIG_X86_TRUE = @CONFIG_X86_TRUE@
-CONFIG_XENO_DOC_DOX_FALSE = @CONFIG_XENO_DOC_DOX_FALSE@
-CONFIG_XENO_DOC_DOX_TRUE = @CONFIG_XENO_DOC_DOX_TRUE@
-CONFIG_XENO_DRIVERS_16550A_FALSE = @CONFIG_XENO_DRIVERS_16550A_FALSE@
-CONFIG_XENO_DRIVERS_16550A_TRUE = @CONFIG_XENO_DRIVERS_16550A_TRUE@
-CONFIG_XENO_HW_FPU_FALSE = @CONFIG_XENO_HW_FPU_FALSE@
-CONFIG_XENO_HW_FPU_TRUE = @CONFIG_XENO_HW_FPU_TRUE@
-CONFIG_XENO_HW_NMI_DEBUG_LATENCY_FALSE = 
@CONFIG_XENO_HW_NMI_DEBUG_LATENCY_FALSE@
-CONFIG_XENO_HW_NMI_DEBUG_LATENCY_TRUE = @CONFIG_XENO_HW_NMI_DEBUG_LATENCY_TRUE@
-CONFIG_XENO_HW_SMI_DETECT_FALSE = @CONFIG_XENO_HW_SMI_DETECT_FALSE@
-CONFIG_XENO_HW_SMI_DETECT_TRUE = @CONFIG_XENO_HW_SMI_DETECT_TRUE@
-CONFIG_XENO_MAINT_FALSE = @CONFIG_XENO_MAINT_FALSE@
-CONFIG_XENO_MAINT_GCH_FALSE = @CONFIG_XENO_MAINT_GCH_FALSE@
-CONFIG_XENO_MAINT_GCH_TRUE = @CONFIG_XENO_MAINT_GCH_TRUE@
-CONFIG_XENO_MAINT_PGM_FALSE = @CONFIG_XENO_MAINT_PGM_FALSE@
-CONFIG_XENO_MAINT_PGM_TRUE = @CONFIG_XENO_MAINT_PGM_TRUE@
-CONFIG_XENO_MAINT_TRUE = @CONFIG_XENO_MAINT_TRUE@
-CONFIG_XENO_OLD_FASHIONED_BUILD_FALSE = @CONFIG_XENO_OLD_FASHIONED_BUILD_FALSE@
-CONFIG_XENO_OLD_FASHIONED_BUILD_TRUE = @CONFIG_XENO_OLD_FASHIONED_BUILD_TRUE@
-CONFIG_XENO_OPT_CONFIG_GZ_FALSE = @CONFIG_XENO_OPT_CONFIG_GZ_FALSE@
-CONFIG_XENO_OPT_CONFIG_GZ_TRUE = @CONFIG_XENO_OPT_CONFIG_GZ_TRUE@
-CONFIG_XENO_OPT_NATIVE_ALARM_FALSE = @CONFIG_XENO_OPT_NATIVE_ALARM_FALSE@
-CONFIG_XENO_OPT_NATIVE_ALARM_TRUE = @CONFIG_XENO_OPT_NATIVE_ALARM_TRUE@
-CONFIG_XENO_OPT_NATIVE_COND_FALSE = @CONFIG_XENO_OPT_NATIVE_COND_FALSE@
-CONFIG_XENO_OPT_NATIVE_COND_TRUE = @CONFIG_XENO_OPT_NATIVE_COND_TRUE@
-CONFIG_XENO_OPT_NATIVE_EVENT_FALSE = @CONFIG_XENO_OPT_NATIVE_EVENT_FALSE@
-CONFIG_XENO_OPT_NATIVE_EVENT_TRUE = @CONFIG_XENO_OPT_NATIVE_EVENT_TRUE@
-CONFIG_XENO_OPT_NATIVE_HEAP_FALSE = @CONFIG_XENO_OPT_NATIVE_HEAP_FALSE@
-CONFIG_XENO_OPT_NATIVE_HEAP_TRUE = @CONFIG_XENO_OPT_NATIVE_HEAP_TRUE@
-CONFIG_XENO_OPT_NATIVE_INTR_FALSE = @CONFIG_XENO_OPT_NATIVE_INTR_FALSE@
-CONFIG_XENO_OPT_NATIVE_INTR_TRUE = @CONFIG_XENO_OPT_NATIVE_INTR_TRUE@
-CONFIG_XENO_OPT_NATIVE_MUTEX_FALSE = @CONFIG_XENO_OPT_NATIVE_MUTEX_FALSE@
-CONFIG_XENO_OPT_NATIVE_MUTEX_TRUE = @CONFIG_XENO_OPT_NATIVE_MUTEX_TRUE@
-CONFIG_XENO_OPT_NATIVE_PIPE_FALSE = @CONFIG_XENO_OPT_NATIVE_PIPE_FALSE@
-CONFIG_XENO_OPT_NATIVE_PIPE_TRUE = @CONFIG_XENO_OPT_NATIVE_PIPE_TRUE@
-CONFIG_XENO_OPT_NATIVE_QUEUE_FALSE = @CONFIG_XENO_OPT_NATIVE_QUEUE_FALSE@
-CONFIG_XENO_OPT_NATIVE_QUEUE_TRUE = @CONFIG_XENO_OPT_NATIVE_QUEUE_TRUE@
-CONFIG_XENO_OPT_NATIVE_REGISTRY_FALSE = @CONFIG_XENO_OPT_NATIVE_REGISTRY_FALSE@
-CONFIG_XENO_OPT_NATIVE_REGISTRY_TRUE = @CONFIG_XENO_OPT_NATIVE_REGISTRY_TRUE@
-CONFIG_XENO_OPT_NATIVE_SEM_FALSE = @CONFIG_XENO_OPT_NATIVE_SEM_FALSE@
-CONFIG_XENO_OPT_NATIVE_SEM_TRUE = @CONFIG_XENO_OPT_NATIVE_SEM_TRUE@
-CONFIG_XENO_OPT_PERVASIVE_FALSE = @CONFIG_XENO_OPT_PERVASIVE_FALSE@
-CONFIG_XENO_OPT_PERVASIVE_TRUE = @CONFIG_XENO_OPT_PERVASIVE_TRUE@
-CONFIG_XENO_OPT_PIPE_FALSE = @CONFIG_XENO_OPT_PIPE_FALSE@
-CONFIG_XENO_OPT_PIPE_TRUE = @CONFIG_XENO_OPT_PIPE_TRUE@
-CONFIG_XENO_OPT_RTAI_FIFO_FALSE = @CONFIG_XENO_OPT_RTAI_FIFO_FALSE@
-CONFIG_XENO_OPT_RTAI_FIFO_TRUE = @CONFIG_XENO_OPT_RTAI_FIFO_TRUE@
-CONFIG_XENO_OPT_RTAI_SEM_FALSE = @CONFIG_XENO_OPT_RTAI_SEM_FALSE@
-CONFIG_XENO_OPT_RTAI_SEM_TRUE = @CONFIG_XENO_OPT_RTAI_SEM_TRUE@
-CONFIG_XENO_OPT_RTAI_SHM_FALSE = @CONFIG_XENO_OPT_RTAI_SHM_FALSE@
-CONFIG_XENO_OPT_RTAI_SHM_TRUE = @CONFIG_XENO_OPT_RTAI_SHM_TRUE@
-CONFIG_XENO_OPT_UDEV_FALSE = @CONFIG_XENO_OPT_UDEV_FALSE@
-CONFIG_XENO_OPT_UDEV_TRUE = @CONFIG_XENO_OPT_UDEV_TRUE@
-CONFIG_XENO_OPT_UVM_FALSE = @CONFIG_XENO_OPT_UVM_FALSE@
-CONFIG_XENO_OPT_UVM_TRUE = @CONFIG_XENO_OPT_UVM_TRUE@
-CONFIG_XENO_SKIN_NATIVE_FALSE = @CONFIG_XENO_SKIN_NATIVE_FALSE@
-CONFIG_XENO_SKIN_NATIVE_TRUE = @CONFIG_XENO_SKIN_NATIVE_TRUE@
-CONFIG_XENO_SKIN_POSIX_FALSE = @CONFIG_XENO_SKIN_POSIX_FALSE@
-CONFIG_XENO_SKIN_POSIX_TRUE = @CONFIG_XENO_SKIN_POSIX_TRUE@
-CONFIG_XENO_SKIN_PSOS_FALSE = @CONFIG_XENO_SKIN_PSOS_FALSE@
-CONFIG_XENO_SKIN_PSOS_TRUE = @CONFIG_XENO_SKIN_PSOS_TRUE@
-CONFIG_XENO_SKIN_RTAI_FALSE = @CONFIG_XENO_SKIN_RTAI_FALSE@
-CONFIG_XENO_SKIN_RTAI_TRUE = @CONFIG_XENO_SKIN_RTAI_TRUE@
-CONFIG_XENO_SKIN_RTDM_FALSE = @CONFIG_XENO_SKIN_RTDM_FALSE@
-CONFIG_XENO_SKIN_RTDM_TRUE = @CONFIG_XENO_SKIN_RTDM_TRUE@
-CONFIG_XENO_SKIN_UITRON_FALSE = @CONFIG_XENO_SKIN_UITRON_FALSE@
-CONFIG_XENO_SKIN_UITRON_TRUE = @CONFIG_XENO_SKIN_UITRON_TRUE@
-CONFIG_XENO_SKIN_VRTX_FALSE = @CONFIG_XENO_SKIN_VRTX_FALSE@
-CONFIG_XENO_SKIN_VRTX_TRUE = @CONFIG_XENO_SKIN_VRTX_TRUE@
-CONFIG_XENO_SKIN_VXWORKS_FALSE = @CONFIG_XENO_SKIN_VXWORKS_FALSE@
-CONFIG_XENO_SKIN_VXWORKS_TRUE = @CONFIG_XENO_SKIN_VXWORKS_TRUE@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CPPFLAGS_FOR_BUILD = @CPPFLAGS_FOR_BUILD@
-CPP_FOR_BUILD = @CPP_FOR_BUILD@
-CROSS_COMPILE = @CROSS_COMPILE@
-CXX = @CXX@
-CXXCPP = @CXXCPP@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DBX_ABS_SRCDIR_FALSE = @DBX_ABS_SRCDIR_FALSE@
-DBX_ABS_SRCDIR_TRUE = @DBX_ABS_SRCDIR_TRUE@
-DBX_DOC_FALSE = @DBX_DOC_FALSE@
-DBX_DOC_ROOT = @DBX_DOC_ROOT@
-DBX_DOC_TRUE = @DBX_DOC_TRUE@
-DBX_FOP = @DBX_FOP@
-DBX_GEN_DOC_ROOT = @DBX_GEN_DOC_ROOT@
-DBX_LINT = @DBX_LINT@
-DBX_MAYBE_NONET = @DBX_MAYBE_NONET@
-DBX_ROOT = @DBX_ROOT@
-DBX_XSLTPROC = @DBX_XSLTPROC@
-DBX_XSL_ROOT = @DBX_XSL_ROOT@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-DOXYGEN = @DOXYGEN@
-DOXYGEN_HAVE_DOT = @DOXYGEN_HAVE_DOT@
-DOXYGEN_SHOW_INCLUDE_FILES = @DOXYGEN_SHOW_INCLUDE_FILES@
-ECHO = @ECHO@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-F77 = @F77@
-FFLAGS = @FFLAGS@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-LATEX_BATCHMODE = @LATEX_BATCHMODE@
-LATEX_MODE = @LATEX_MODE@
-LDFLAGS = @LDFLAGS@
-LEX = @LEX@
-LEXLIB = @LEXLIB@
-LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBTOOL = @LIBTOOL@
-LN_S = @LN_S@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@
-MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@
-MAKEINFO = @MAKEINFO@
-OBJEXT = @OBJEXT@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-RANLIB = @RANLIB@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-STRIP = @STRIP@
-VERSION = @VERSION@
-XENO_BUILD_STRING = @XENO_BUILD_STRING@
-XENO_FP_CFLAGS = @XENO_FP_CFLAGS@
-XENO_HOST_STRING = @XENO_HOST_STRING@
-XENO_KBUILD_CLEAN = @XENO_KBUILD_CLEAN@
-XENO_KBUILD_CMD = @XENO_KBUILD_CMD@
-XENO_KBUILD_DISTCLEAN = @XENO_KBUILD_DISTCLEAN@
-XENO_KBUILD_ENV = @XENO_KBUILD_ENV@
-XENO_KMOD_APP_CFLAGS = @XENO_KMOD_APP_CFLAGS@
-XENO_KMOD_CFLAGS = @XENO_KMOD_CFLAGS@
-XENO_LINUX_DIR = @XENO_LINUX_DIR@
-XENO_LINUX_VERSION = @XENO_LINUX_VERSION@
-XENO_MAYBE_DOCDIR = @XENO_MAYBE_DOCDIR@
-XENO_MAYBE_SIMDIR = @XENO_MAYBE_SIMDIR@
-XENO_MODULE_DIR = @XENO_MODULE_DIR@
-XENO_MODULE_EXT = @XENO_MODULE_EXT@
-XENO_PIPE_NRDEV = @XENO_PIPE_NRDEV@
-XENO_SYMBOL_DIR = @XENO_SYMBOL_DIR@
-XENO_TARGET_ARCH = @XENO_TARGET_ARCH@
-XENO_TARGET_SUBARCH = @XENO_TARGET_SUBARCH@
-XENO_USER_APP_CFLAGS = @XENO_USER_APP_CFLAGS@
-XENO_USER_CFLAGS = @XENO_USER_CFLAGS@
-ac_ct_AR = @ac_ct_AR@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CC_FOR_BUILD = @ac_ct_CC_FOR_BUILD@
-ac_ct_CXX = @ac_ct_CXX@
-ac_ct_F77 = @ac_ct_F77@
-ac_ct_RANLIB = @ac_ct_RANLIB@
-ac_ct_STRIP = @ac_ct_STRIP@
-am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
-am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
-am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@
-am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_vendor = @build_vendor@
-datadir = @datadir@
-exec_prefix = @exec_prefix@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localstatedir = @localstatedir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-oldincludedir = @oldincludedir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-subdirs = @subdirs@
-sysconfdir = @sysconfdir@
-target = @target@
-target_alias = @target_alias@
-target_cpu = @target_cpu@
-target_os = @target_os@
-target_vendor = @target_vendor@
-SUBDIRS = hal
-EXTRA_DIST = Kconfig defconfig patches
-all: all-recursive
-
-.SUFFIXES:
-$(srcdir)/GNUmakefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/GNUmakefile.am  
$(am__configure_deps)
-       @for dep in $?; do \
-         case '$(am__configure_deps)' in \
-           *$$dep*) \
-             cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
-               && exit 0; \
-             exit 1;; \
-         esac; \
-       done; \
-       echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  
arch/ppc/GNUmakefile'; \
-       cd $(top_srcdir) && \
-         $(AUTOMAKE) --foreign  arch/ppc/GNUmakefile
-.PRECIOUS: GNUmakefile
-GNUmakefile: $(srcdir)/GNUmakefile.in $(top_builddir)/config.status
-       @case '$?' in \
-         *config.status*) \
-           cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
-         *) \
-           echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ 
$(am__depfiles_maybe)'; \
-           cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ 
$(am__depfiles_maybe);; \
-       esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure 
$(CONFIG_STATUS_DEPENDENCIES)
-       cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
-       cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
-       cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-mostlyclean-libtool:
-       -rm -f *.lo
-
-clean-libtool:
-       -rm -rf .libs _libs
-
-distclean-libtool:
-       -rm -f libtool
-uninstall-info-am:
-
-# This directory's subdirectories are mostly independent; you can cd
-# into them and run `make' without going through this Makefile.
-# To change the values of `make' variables: instead of editing Makefiles,
-# (1) if the variable is set in `config.status', edit `config.status'
-#     (which will cause the Makefiles to be regenerated when you run `make');
-# (2) otherwise, pass the desired values on the `make' command line.
-$(RECURSIVE_TARGETS):
-       @failcom='exit 1'; \
-       for f in x $$MAKEFLAGS; do \
-         case $$f in \
-           *=* | --[!k]*);; \
-           *k*) failcom='fail=yes';; \
-         esac; \
-       done; \
-       dot_seen=no; \
-       target=`echo $@ | sed s/-recursive//`; \
-       list='$(SUBDIRS)'; for subdir in $$list; do \
-         echo "Making $$target in $$subdir"; \
-         if test "$$subdir" = "."; then \
-           dot_seen=yes; \
-           local_target="$$target-am"; \
-         else \
-           local_target="$$target"; \
-         fi; \
-         (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
-         || eval $$failcom; \
-       done; \
-       if test "$$dot_seen" = "no"; then \
-         $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
-       fi; test -z "$$fail"
-
-mostlyclean-recursive clean-recursive distclean-recursive \
-maintainer-clean-recursive:
-       @failcom='exit 1'; \
-       for f in x $$MAKEFLAGS; do \
-         case $$f in \
-           *=* | --[!k]*);; \
-           *k*) failcom='fail=yes';; \
-         esac; \
-       done; \
-       dot_seen=no; \
-       case "$@" in \
-         distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
-         *) list='$(SUBDIRS)' ;; \
-       esac; \
-       rev=''; for subdir in $$list; do \
-         if test "$$subdir" = "."; then :; else \
-           rev="$$subdir $$rev"; \
-         fi; \
-       done; \
-       rev="$$rev ."; \
-       target=`echo $@ | sed s/-recursive//`; \
-       for subdir in $$rev; do \
-         echo "Making $$target in $$subdir"; \
-         if test "$$subdir" = "."; then \
-           local_target="$$target-am"; \
-         else \
-           local_target="$$target"; \
-         fi; \
-         (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
-         || eval $$failcom; \
-       done && test -z "$$fail"
-tags-recursive:
-       list='$(SUBDIRS)'; for subdir in $$list; do \
-         test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); 
\
-       done
-ctags-recursive:
-       list='$(SUBDIRS)'; for subdir in $$list; do \
-         test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) 
ctags); \
-       done
-
-ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
-       list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
-       unique=`for i in $$list; do \
-           if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
-         done | \
-         $(AWK) '    { files[$$0] = 1; } \
-              END { for (i in files) print i; }'`; \
-       mkid -fID $$unique
-tags: TAGS
-
-TAGS: tags-recursive $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
-               $(TAGS_FILES) $(LISP)
-       tags=; \
-       here=`pwd`; \
-       if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
-         include_option=--etags-include; \
-         empty_fix=.; \
-       else \
-         include_option=--include; \
-         empty_fix=; \
-       fi; \
-       list='$(SUBDIRS)'; for subdir in $$list; do \
-         if test "$$subdir" = .; then :; else \
-           test ! -f $$subdir/TAGS || \
-             tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \
-         fi; \
-       done; \
-       list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
-       unique=`for i in $$list; do \
-           if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
-         done | \
-         $(AWK) '    { files[$$0] = 1; } \
-              END { for (i in files) print i; }'`; \
-       if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
-         test -n "$$unique" || unique=$$empty_fix; \
-         $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
-           $$tags $$unique; \
-       fi
-ctags: CTAGS
-CTAGS: ctags-recursive $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
-               $(TAGS_FILES) $(LISP)
-       tags=; \
-       here=`pwd`; \
-       list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
-       unique=`for i in $$list; do \
-           if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
-         done | \
-         $(AWK) '    { files[$$0] = 1; } \
-              END { for (i in files) print i; }'`; \
-       test -z "$(CTAGS_ARGS)$$tags$$unique" \
-         || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
-            $$tags $$unique
-
-GTAGS:
-       here=`$(am__cd) $(top_builddir) && pwd` \
-         && cd $(top_srcdir) \
-         && gtags -i $(GTAGS_ARGS) $$here
-
-distclean-tags:
-       -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
-
-distdir: $(DISTFILES)
-       @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
-       topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
-       list='$(DISTFILES)'; for file in $$list; do \
-         case $$file in \
-           $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
-           $(top_srcdir)/*) file=`echo "$$file" | sed 
"s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
-         esac; \
-         if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
-         dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
-         if test "$$dir" != "$$file" && test "$$dir" != "."; then \
-           dir="/$$dir"; \
-           $(mkdir_p) "$(distdir)$$dir"; \
-         else \
-           dir=''; \
-         fi; \
-         if test -d $$d/$$file; then \
-           if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
-             cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
-           fi; \
-           cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
-         else \
-           test -f $(distdir)/$$file \
-           || cp -p $$d/$$file $(distdir)/$$file \
-           || exit 1; \
-         fi; \
-       done
-       list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
-         if test "$$subdir" = .; then :; else \
-           test -d "$(distdir)/$$subdir" \
-           || $(mkdir_p) "$(distdir)/$$subdir" \
-           || exit 1; \
-           distdir=`$(am__cd) $(distdir) && pwd`; \
-           top_distdir=`$(am__cd) $(top_distdir) && pwd`; \
-           (cd $$subdir && \
-             $(MAKE) $(AM_MAKEFLAGS) \
-               top_distdir="$$top_distdir" \
-               distdir="$$distdir/$$subdir" \
-               distdir) \
-             || exit 1; \
-         fi; \
-       done
-check-am: all-am
-check: check-recursive
-all-am: GNUmakefile
-installdirs: installdirs-recursive
-installdirs-am:
-install: install-recursive
-install-exec: install-exec-recursive
-install-data: install-data-recursive
-uninstall: uninstall-recursive
-
-install-am: all-am
-       @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-recursive
-install-strip:
-       $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
-         install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
-         `test -z '$(STRIP)' || \
-           echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
-       -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-
-maintainer-clean-generic:
-       @echo "This command is intended for maintainers to use"
-       @echo "it deletes files that may require special tools to rebuild."
-clean: clean-recursive
-
-clean-am: clean-generic clean-libtool mostlyclean-am
-
-distclean: distclean-recursive
-       -rm -f GNUmakefile
-distclean-am: clean-am distclean-generic distclean-libtool \
-       distclean-tags
-
-dvi: dvi-recursive
-
-dvi-am:
-
-html: html-recursive
-
-info: info-recursive
-
-info-am:
-
-install-data-am:
-
-install-exec-am:
-
-install-info: install-info-recursive
-
-install-man:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-recursive
-       -rm -f GNUmakefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-recursive
-
-mostlyclean-am: mostlyclean-generic mostlyclean-libtool
-
-pdf: pdf-recursive
-
-pdf-am:
-
-ps: ps-recursive
-
-ps-am:
-
-uninstall-am: uninstall-info-am
-
-uninstall-info: uninstall-info-recursive
-
-.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am check check-am \
-       clean clean-generic clean-libtool clean-recursive ctags \
-       ctags-recursive distclean distclean-generic distclean-libtool \
-       distclean-recursive distclean-tags distdir dvi dvi-am html \
-       html-am info info-am install install-am install-data \
-       install-data-am install-exec install-exec-am install-info \
-       install-info-am install-man install-strip installcheck \
-       installcheck-am installdirs installdirs-am maintainer-clean \
-       maintainer-clean-generic maintainer-clean-recursive \
-       mostlyclean mostlyclean-generic mostlyclean-libtool \
-       mostlyclean-recursive pdf pdf-am ps ps-am tags tags-recursive \
-       uninstall uninstall-am uninstall-info-am
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff -Nru --exclude=.svn xenomai-orig/arch/ppc/hal/fpu.S 
xenomai-devel/arch/ppc/hal/fpu.S
--- xenomai-orig/arch/ppc/hal/fpu.S     2005-10-11 10:32:15.000000000 +0300
+++ xenomai-devel/arch/ppc/hal/fpu.S    1970-01-01 02:00:00.000000000 +0200
@@ -1,78 +0,0 @@
-/*
- * Copyright (C) 2001,2002,2003,2004 Philippe Gerum.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
- * USA; either version 2 of the License, or (at your option) any later
- * version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/ppc_asm.h>
-#include <xeno_config.h> 
-
-#define RTHAL_FPSAVE(n, base)  stfd n,8*(n)(base)
-#define RTHAL_FPSAVE2(n, base) RTHAL_FPSAVE(n, base); RTHAL_FPSAVE(n+1, base)
-#define RTHAL_FPSAVE4(n, base) RTHAL_FPSAVE2(n, base); RTHAL_FPSAVE2(n+2, base)
-#define RTHAL_FPSAVE8(n, base) RTHAL_FPSAVE4(n, base); RTHAL_FPSAVE4(n+4, base)
-#define RTHAL_FPSAVE16(n, base)        RTHAL_FPSAVE8(n, base); 
RTHAL_FPSAVE8(n+8, base)
-#define RTHAL_FPSAVE32(n, base)        RTHAL_FPSAVE16(n, base); 
RTHAL_FPSAVE16(n+16, base)
-
-/* r3 = &tcb->fpuenv */
-_GLOBAL(rthal_save_fpu)
-       mfmsr   r5
-       ori     r5,r5,MSR_FP            /* Re-enable use of FPU. */
-#ifdef CONFIG_PPC64BRIDGE
-       clrldi  r5,r5,1                 /* Turn off 64-bit mode. */
-#endif /* CONFIG_PPC64BRIDGE */
-       SYNC
-       MTMSRD(r5)                      /* Enable use of fpu. */
-       isync
-       RTHAL_FPSAVE32(0,r3)
-       mffs    fr0
-       stfd    fr0,8*32(r3)
-       blr
-
-#define RTHAL_FPLOAD(n, base)  lfd n,8*(n)(base)
-#define RTHAL_FPLOAD2(n, base) RTHAL_FPLOAD(n, base); RTHAL_FPLOAD(n+1, base)
-#define RTHAL_FPLOAD4(n, base) RTHAL_FPLOAD2(n, base); RTHAL_FPLOAD2(n+2, base)
-#define RTHAL_FPLOAD8(n, base) RTHAL_FPLOAD4(n, base); RTHAL_FPLOAD4(n+4, base)
-#define RTHAL_FPLOAD16(n, base)        RTHAL_FPLOAD8(n, base); 
RTHAL_FPLOAD8(n+8, base)
-#define RTHAL_FPLOAD32(n, base)        RTHAL_FPLOAD16(n, base); 
RTHAL_FPLOAD16(n+16, base)
-
-/* r3 = &tcb->fpuenv */
-_GLOBAL(rthal_init_fpu)
-       mfmsr   r5
-       ori     r5,r5,MSR_FP|MSR_FE1    /* RT kernel threads always operate in 
*/
-       li      r4,MSR_FE0              /* imprecise non-recoverable exception 
mode. */
-       andc    r5,r5,r4
-       SYNC
-       MTMSRD(r5)
-
-       /* Fallback wanted. */
-       
-/* r3 = &tcb->fpuenv */
-_GLOBAL(rthal_restore_fpu)
-       mfmsr   r5
-       ori     r5,r5,MSR_FP            /* Re-enable use of FPU. */
-#ifdef CONFIG_PPC64BRIDGE
-       clrldi  r5,r5,1                 /* Turn off 64-bit mode. */
-#endif /* CONFIG_PPC64BRIDGE */
-       SYNC
-       MTMSRD(r5)                      /* Enable use of fpu. */
-       isync
-       lfd     fr0,8*32(r3)
-       mtfsf   0xff,0
-       RTHAL_FPLOAD32(0,r3)
-       blr
diff -Nru --exclude=.svn xenomai-orig/arch/ppc/hal/GNUmakefile.am 
xenomai-devel/arch/ppc/hal/GNUmakefile.am
--- xenomai-orig/arch/ppc/hal/GNUmakefile.am    2005-10-23 11:00:13.000000000 
+0300
+++ xenomai-devel/arch/ppc/hal/GNUmakefile.am   1970-01-01 02:00:00.000000000 
+0200
@@ -1,33 +0,0 @@
-moduledir = $(DESTDIR)@XENO_MODULE_DIR@
-
-modext = @XENO_MODULE_EXT@
-
-CROSS_COMPILE = @CROSS_COMPILE@
-
-libhal_SRC = ppc.c switch.S
-
-if CONFIG_XENO_HW_FPU
-libhal_SRC += fpu.S
-endif
-distfiles = fpu.S
-
-xeno_hal.ko: @XENO_KBUILD_ENV@
-xeno_hal.ko: $(libhal_SRC) generic.c FORCE
-       @XENO_KBUILD_CMD@ xeno_extradef="@XENO_KMOD_CFLAGS@"
-
-clean-local:
-       @XENO_KBUILD_CLEAN@
-
-all-local: xeno_hal$(modext)
-if CONFIG_XENO_OLD_FASHIONED_BUILD
-       $(mkinstalldirs) $(top_srcdir)/modules
-       $(INSTALL_DATA) $^ $(top_srcdir)/modules
-endif
-
-install-exec-local: xeno_hal$(modext)
-       $(mkinstalldirs) $(moduledir)
-       $(INSTALL_DATA) $< $(moduledir)
-
-.PHONY: FORCE
-
-EXTRA_DIST = $(libhal_SRC) $(distfiles) Makefile
diff -Nru --exclude=.svn xenomai-orig/arch/ppc/hal/GNUmakefile.in 
xenomai-devel/arch/ppc/hal/GNUmakefile.in
--- xenomai-orig/arch/ppc/hal/GNUmakefile.in    2005-10-23 11:00:13.000000000 
+0300
+++ xenomai-devel/arch/ppc/hal/GNUmakefile.in   1970-01-01 02:00:00.000000000 
+0200
@@ -1,479 +0,0 @@
-# GNUmakefile.in generated by automake 1.9.5 from GNUmakefile.am.
-# @configure_input@
-
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005  Free Software Foundation, Inc.
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
[EMAIL PROTECTED]@
-srcdir = @srcdir@
-top_srcdir = @top_srcdir@
-VPATH = @srcdir@
-pkgdatadir = $(datadir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-top_builddir = ../../..
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-INSTALL = @INSTALL@
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-target_triplet = @target@
[EMAIL PROTECTED] = fpu.S
-subdir = arch/ppc/hal
-DIST_COMMON = $(srcdir)/GNUmakefile.am $(srcdir)/GNUmakefile.in
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps =  \
-       $(top_srcdir)/config/autoconf/ac_prog_cc_for_build.m4 \
-       $(top_srcdir)/config/autoconf/docbook.m4 \
-       $(top_srcdir)/config/version $(top_srcdir)/configure.in
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
-       $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/include/xeno_config.h
-CONFIG_CLEAN_FILES =
-SOURCES =
-DIST_SOURCES =
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMDEP_FALSE = @AMDEP_FALSE@
-AMDEP_TRUE = @AMDEP_TRUE@
-AMTAR = @AMTAR@
-AR = @AR@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-BUILD_EXEEXT = @BUILD_EXEEXT@
-BUILD_OBJEXT = @BUILD_OBJEXT@
-CC = @CC@
-CCAS = @CCAS@
-CCASFLAGS = @CCASFLAGS@
-CCDEPMODE = @CCDEPMODE@
-CC_FOR_BUILD = @CC_FOR_BUILD@
-CFLAGS = @CFLAGS@
-CFLAGS_FOR_BUILD = @CFLAGS_FOR_BUILD@
-CONFIG_IA64_FALSE = @CONFIG_IA64_FALSE@
-CONFIG_IA64_TRUE = @CONFIG_IA64_TRUE@
-CONFIG_LTT_FALSE = @CONFIG_LTT_FALSE@
-CONFIG_LTT_TRUE = @CONFIG_LTT_TRUE@
-CONFIG_PPC64_FALSE = @CONFIG_PPC64_FALSE@
-CONFIG_PPC64_TRUE = @CONFIG_PPC64_TRUE@
-CONFIG_PPC_FALSE = @CONFIG_PPC_FALSE@
-CONFIG_PPC_TRUE = @CONFIG_PPC_TRUE@
-CONFIG_SMP_FALSE = @CONFIG_SMP_FALSE@
-CONFIG_SMP_TRUE = @CONFIG_SMP_TRUE@
-CONFIG_X86_FALSE = @CONFIG_X86_FALSE@
-CONFIG_X86_LOCAL_APIC_FALSE = @CONFIG_X86_LOCAL_APIC_FALSE@
-CONFIG_X86_LOCAL_APIC_TRUE = @CONFIG_X86_LOCAL_APIC_TRUE@
-CONFIG_X86_TRUE = @CONFIG_X86_TRUE@
-CONFIG_XENO_DOC_DOX_FALSE = @CONFIG_XENO_DOC_DOX_FALSE@
-CONFIG_XENO_DOC_DOX_TRUE = @CONFIG_XENO_DOC_DOX_TRUE@
-CONFIG_XENO_DRIVERS_16550A_FALSE = @CONFIG_XENO_DRIVERS_16550A_FALSE@
-CONFIG_XENO_DRIVERS_16550A_TRUE = @CONFIG_XENO_DRIVERS_16550A_TRUE@
-CONFIG_XENO_HW_FPU_FALSE = @CONFIG_XENO_HW_FPU_FALSE@
-CONFIG_XENO_HW_FPU_TRUE = @CONFIG_XENO_HW_FPU_TRUE@
-CONFIG_XENO_HW_NMI_DEBUG_LATENCY_FALSE = 
@CONFIG_XENO_HW_NMI_DEBUG_LATENCY_FALSE@
-CONFIG_XENO_HW_NMI_DEBUG_LATENCY_TRUE = @CONFIG_XENO_HW_NMI_DEBUG_LATENCY_TRUE@
-CONFIG_XENO_HW_SMI_DETECT_FALSE = @CONFIG_XENO_HW_SMI_DETECT_FALSE@
-CONFIG_XENO_HW_SMI_DETECT_TRUE = @CONFIG_XENO_HW_SMI_DETECT_TRUE@
-CONFIG_XENO_MAINT_FALSE = @CONFIG_XENO_MAINT_FALSE@
-CONFIG_XENO_MAINT_GCH_FALSE = @CONFIG_XENO_MAINT_GCH_FALSE@
-CONFIG_XENO_MAINT_GCH_TRUE = @CONFIG_XENO_MAINT_GCH_TRUE@
-CONFIG_XENO_MAINT_PGM_FALSE = @CONFIG_XENO_MAINT_PGM_FALSE@
-CONFIG_XENO_MAINT_PGM_TRUE = @CONFIG_XENO_MAINT_PGM_TRUE@
-CONFIG_XENO_MAINT_TRUE = @CONFIG_XENO_MAINT_TRUE@
-CONFIG_XENO_OLD_FASHIONED_BUILD_FALSE = @CONFIG_XENO_OLD_FASHIONED_BUILD_FALSE@
-CONFIG_XENO_OLD_FASHIONED_BUILD_TRUE = @CONFIG_XENO_OLD_FASHIONED_BUILD_TRUE@
-CONFIG_XENO_OPT_CONFIG_GZ_FALSE = @CONFIG_XENO_OPT_CONFIG_GZ_FALSE@
-CONFIG_XENO_OPT_CONFIG_GZ_TRUE = @CONFIG_XENO_OPT_CONFIG_GZ_TRUE@
-CONFIG_XENO_OPT_NATIVE_ALARM_FALSE = @CONFIG_XENO_OPT_NATIVE_ALARM_FALSE@
-CONFIG_XENO_OPT_NATIVE_ALARM_TRUE = @CONFIG_XENO_OPT_NATIVE_ALARM_TRUE@
-CONFIG_XENO_OPT_NATIVE_COND_FALSE = @CONFIG_XENO_OPT_NATIVE_COND_FALSE@
-CONFIG_XENO_OPT_NATIVE_COND_TRUE = @CONFIG_XENO_OPT_NATIVE_COND_TRUE@
-CONFIG_XENO_OPT_NATIVE_EVENT_FALSE = @CONFIG_XENO_OPT_NATIVE_EVENT_FALSE@
-CONFIG_XENO_OPT_NATIVE_EVENT_TRUE = @CONFIG_XENO_OPT_NATIVE_EVENT_TRUE@
-CONFIG_XENO_OPT_NATIVE_HEAP_FALSE = @CONFIG_XENO_OPT_NATIVE_HEAP_FALSE@
-CONFIG_XENO_OPT_NATIVE_HEAP_TRUE = @CONFIG_XENO_OPT_NATIVE_HEAP_TRUE@
-CONFIG_XENO_OPT_NATIVE_INTR_FALSE = @CONFIG_XENO_OPT_NATIVE_INTR_FALSE@
-CONFIG_XENO_OPT_NATIVE_INTR_TRUE = @CONFIG_XENO_OPT_NATIVE_INTR_TRUE@
-CONFIG_XENO_OPT_NATIVE_MUTEX_FALSE = @CONFIG_XENO_OPT_NATIVE_MUTEX_FALSE@
-CONFIG_XENO_OPT_NATIVE_MUTEX_TRUE = @CONFIG_XENO_OPT_NATIVE_MUTEX_TRUE@
-CONFIG_XENO_OPT_NATIVE_PIPE_FALSE = @CONFIG_XENO_OPT_NATIVE_PIPE_FALSE@
-CONFIG_XENO_OPT_NATIVE_PIPE_TRUE = @CONFIG_XENO_OPT_NATIVE_PIPE_TRUE@
-CONFIG_XENO_OPT_NATIVE_QUEUE_FALSE = @CONFIG_XENO_OPT_NATIVE_QUEUE_FALSE@
-CONFIG_XENO_OPT_NATIVE_QUEUE_TRUE = @CONFIG_XENO_OPT_NATIVE_QUEUE_TRUE@
-CONFIG_XENO_OPT_NATIVE_REGISTRY_FALSE = @CONFIG_XENO_OPT_NATIVE_REGISTRY_FALSE@
-CONFIG_XENO_OPT_NATIVE_REGISTRY_TRUE = @CONFIG_XENO_OPT_NATIVE_REGISTRY_TRUE@
-CONFIG_XENO_OPT_NATIVE_SEM_FALSE = @CONFIG_XENO_OPT_NATIVE_SEM_FALSE@
-CONFIG_XENO_OPT_NATIVE_SEM_TRUE = @CONFIG_XENO_OPT_NATIVE_SEM_TRUE@
-CONFIG_XENO_OPT_PERVASIVE_FALSE = @CONFIG_XENO_OPT_PERVASIVE_FALSE@
-CONFIG_XENO_OPT_PERVASIVE_TRUE = @CONFIG_XENO_OPT_PERVASIVE_TRUE@
-CONFIG_XENO_OPT_PIPE_FALSE = @CONFIG_XENO_OPT_PIPE_FALSE@
-CONFIG_XENO_OPT_PIPE_TRUE = @CONFIG_XENO_OPT_PIPE_TRUE@
-CONFIG_XENO_OPT_RTAI_FIFO_FALSE = @CONFIG_XENO_OPT_RTAI_FIFO_FALSE@
-CONFIG_XENO_OPT_RTAI_FIFO_TRUE = @CONFIG_XENO_OPT_RTAI_FIFO_TRUE@
-CONFIG_XENO_OPT_RTAI_SEM_FALSE = @CONFIG_XENO_OPT_RTAI_SEM_FALSE@
-CONFIG_XENO_OPT_RTAI_SEM_TRUE = @CONFIG_XENO_OPT_RTAI_SEM_TRUE@
-CONFIG_XENO_OPT_RTAI_SHM_FALSE = @CONFIG_XENO_OPT_RTAI_SHM_FALSE@
-CONFIG_XENO_OPT_RTAI_SHM_TRUE = @CONFIG_XENO_OPT_RTAI_SHM_TRUE@
-CONFIG_XENO_OPT_UDEV_FALSE = @CONFIG_XENO_OPT_UDEV_FALSE@
-CONFIG_XENO_OPT_UDEV_TRUE = @CONFIG_XENO_OPT_UDEV_TRUE@
-CONFIG_XENO_OPT_UVM_FALSE = @CONFIG_XENO_OPT_UVM_FALSE@
-CONFIG_XENO_OPT_UVM_TRUE = @CONFIG_XENO_OPT_UVM_TRUE@
-CONFIG_XENO_SKIN_NATIVE_FALSE = @CONFIG_XENO_SKIN_NATIVE_FALSE@
-CONFIG_XENO_SKIN_NATIVE_TRUE = @CONFIG_XENO_SKIN_NATIVE_TRUE@
-CONFIG_XENO_SKIN_POSIX_FALSE = @CONFIG_XENO_SKIN_POSIX_FALSE@
-CONFIG_XENO_SKIN_POSIX_TRUE = @CONFIG_XENO_SKIN_POSIX_TRUE@
-CONFIG_XENO_SKIN_PSOS_FALSE = @CONFIG_XENO_SKIN_PSOS_FALSE@
-CONFIG_XENO_SKIN_PSOS_TRUE = @CONFIG_XENO_SKIN_PSOS_TRUE@
-CONFIG_XENO_SKIN_RTAI_FALSE = @CONFIG_XENO_SKIN_RTAI_FALSE@
-CONFIG_XENO_SKIN_RTAI_TRUE = @CONFIG_XENO_SKIN_RTAI_TRUE@
-CONFIG_XENO_SKIN_RTDM_FALSE = @CONFIG_XENO_SKIN_RTDM_FALSE@
-CONFIG_XENO_SKIN_RTDM_TRUE = @CONFIG_XENO_SKIN_RTDM_TRUE@
-CONFIG_XENO_SKIN_UITRON_FALSE = @CONFIG_XENO_SKIN_UITRON_FALSE@
-CONFIG_XENO_SKIN_UITRON_TRUE = @CONFIG_XENO_SKIN_UITRON_TRUE@
-CONFIG_XENO_SKIN_VRTX_FALSE = @CONFIG_XENO_SKIN_VRTX_FALSE@
-CONFIG_XENO_SKIN_VRTX_TRUE = @CONFIG_XENO_SKIN_VRTX_TRUE@
-CONFIG_XENO_SKIN_VXWORKS_FALSE = @CONFIG_XENO_SKIN_VXWORKS_FALSE@
-CONFIG_XENO_SKIN_VXWORKS_TRUE = @CONFIG_XENO_SKIN_VXWORKS_TRUE@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CPPFLAGS_FOR_BUILD = @CPPFLAGS_FOR_BUILD@
-CPP_FOR_BUILD = @CPP_FOR_BUILD@
-CROSS_COMPILE = @CROSS_COMPILE@
-CXX = @CXX@
-CXXCPP = @CXXCPP@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DBX_ABS_SRCDIR_FALSE = @DBX_ABS_SRCDIR_FALSE@
-DBX_ABS_SRCDIR_TRUE = @DBX_ABS_SRCDIR_TRUE@
-DBX_DOC_FALSE = @DBX_DOC_FALSE@
-DBX_DOC_ROOT = @DBX_DOC_ROOT@
-DBX_DOC_TRUE = @DBX_DOC_TRUE@
-DBX_FOP = @DBX_FOP@
-DBX_GEN_DOC_ROOT = @DBX_GEN_DOC_ROOT@
-DBX_LINT = @DBX_LINT@
-DBX_MAYBE_NONET = @DBX_MAYBE_NONET@
-DBX_ROOT = @DBX_ROOT@
-DBX_XSLTPROC = @DBX_XSLTPROC@
-DBX_XSL_ROOT = @DBX_XSL_ROOT@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-DOXYGEN = @DOXYGEN@
-DOXYGEN_HAVE_DOT = @DOXYGEN_HAVE_DOT@
-DOXYGEN_SHOW_INCLUDE_FILES = @DOXYGEN_SHOW_INCLUDE_FILES@
-ECHO = @ECHO@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-F77 = @F77@
-FFLAGS = @FFLAGS@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-LATEX_BATCHMODE = @LATEX_BATCHMODE@
-LATEX_MODE = @LATEX_MODE@
-LDFLAGS = @LDFLAGS@
-LEX = @LEX@
-LEXLIB = @LEXLIB@
-LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBTOOL = @LIBTOOL@
-LN_S = @LN_S@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@
-MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@
-MAKEINFO = @MAKEINFO@
-OBJEXT = @OBJEXT@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-RANLIB = @RANLIB@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-STRIP = @STRIP@
-VERSION = @VERSION@
-XENO_BUILD_STRING = @XENO_BUILD_STRING@
-XENO_FP_CFLAGS = @XENO_FP_CFLAGS@
-XENO_HOST_STRING = @XENO_HOST_STRING@
-XENO_KBUILD_CLEAN = @XENO_KBUILD_CLEAN@
-XENO_KBUILD_CMD = @XENO_KBUILD_CMD@
-XENO_KBUILD_DISTCLEAN = @XENO_KBUILD_DISTCLEAN@
-XENO_KBUILD_ENV = @XENO_KBUILD_ENV@
-XENO_KMOD_APP_CFLAGS = @XENO_KMOD_APP_CFLAGS@
-XENO_KMOD_CFLAGS = @XENO_KMOD_CFLAGS@
-XENO_LINUX_DIR = @XENO_LINUX_DIR@
-XENO_LINUX_VERSION = @XENO_LINUX_VERSION@
-XENO_MAYBE_DOCDIR = @XENO_MAYBE_DOCDIR@
-XENO_MAYBE_SIMDIR = @XENO_MAYBE_SIMDIR@
-XENO_MODULE_DIR = @XENO_MODULE_DIR@
-XENO_MODULE_EXT = @XENO_MODULE_EXT@
-XENO_PIPE_NRDEV = @XENO_PIPE_NRDEV@
-XENO_SYMBOL_DIR = @XENO_SYMBOL_DIR@
-XENO_TARGET_ARCH = @XENO_TARGET_ARCH@
-XENO_TARGET_SUBARCH = @XENO_TARGET_SUBARCH@
-XENO_USER_APP_CFLAGS = @XENO_USER_APP_CFLAGS@
-XENO_USER_CFLAGS = @XENO_USER_CFLAGS@
-ac_ct_AR = @ac_ct_AR@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CC_FOR_BUILD = @ac_ct_CC_FOR_BUILD@
-ac_ct_CXX = @ac_ct_CXX@
-ac_ct_F77 = @ac_ct_F77@
-ac_ct_RANLIB = @ac_ct_RANLIB@
-ac_ct_STRIP = @ac_ct_STRIP@
-am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
-am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
-am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@
-am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_vendor = @build_vendor@
-datadir = @datadir@
-exec_prefix = @exec_prefix@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localstatedir = @localstatedir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-oldincludedir = @oldincludedir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-subdirs = @subdirs@
-sysconfdir = @sysconfdir@
-target = @target@
-target_alias = @target_alias@
-target_cpu = @target_cpu@
-target_os = @target_os@
-target_vendor = @target_vendor@
-moduledir = $(DESTDIR)@XENO_MODULE_DIR@
-modext = @XENO_MODULE_EXT@
-libhal_SRC = ppc.c switch.S $(am__append_1)
-distfiles = fpu.S
-EXTRA_DIST = $(libhal_SRC) $(distfiles) Makefile
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/GNUmakefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/GNUmakefile.am  
$(am__configure_deps)
-       @for dep in $?; do \
-         case '$(am__configure_deps)' in \
-           *$$dep*) \
-             cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
-               && exit 0; \
-             exit 1;; \
-         esac; \
-       done; \
-       echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  
arch/ppc/hal/GNUmakefile'; \
-       cd $(top_srcdir) && \
-         $(AUTOMAKE) --foreign  arch/ppc/hal/GNUmakefile
-.PRECIOUS: GNUmakefile
-GNUmakefile: $(srcdir)/GNUmakefile.in $(top_builddir)/config.status
-       @case '$?' in \
-         *config.status*) \
-           cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
-         *) \
-           echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ 
$(am__depfiles_maybe)'; \
-           cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ 
$(am__depfiles_maybe);; \
-       esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure 
$(CONFIG_STATUS_DEPENDENCIES)
-       cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
-       cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
-       cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-mostlyclean-libtool:
-       -rm -f *.lo
-
-clean-libtool:
-       -rm -rf .libs _libs
-
-distclean-libtool:
-       -rm -f libtool
-uninstall-info-am:
-tags: TAGS
-TAGS:
-
-ctags: CTAGS
-CTAGS:
-
-
-distdir: $(DISTFILES)
-       @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
-       topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
-       list='$(DISTFILES)'; for file in $$list; do \
-         case $$file in \
-           $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
-           $(top_srcdir)/*) file=`echo "$$file" | sed 
"s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
-         esac; \
-         if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
-         dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
-         if test "$$dir" != "$$file" && test "$$dir" != "."; then \
-           dir="/$$dir"; \
-           $(mkdir_p) "$(distdir)$$dir"; \
-         else \
-           dir=''; \
-         fi; \
-         if test -d $$d/$$file; then \
-           if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
-             cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
-           fi; \
-           cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
-         else \
-           test -f $(distdir)/$$file \
-           || cp -p $$d/$$file $(distdir)/$$file \
-           || exit 1; \
-         fi; \
-       done
-check-am: all-am
-check: check-am
-all-am: GNUmakefile all-local
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
-       @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
-       $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
-         install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
-         `test -z '$(STRIP)' || \
-           echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
-       -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-
-maintainer-clean-generic:
-       @echo "This command is intended for maintainers to use"
-       @echo "it deletes files that may require special tools to rebuild."
-clean: clean-am
-
-clean-am: clean-generic clean-libtool clean-local mostlyclean-am
-
-distclean: distclean-am
-       -rm -f GNUmakefile
-distclean-am: clean-am distclean-generic distclean-libtool
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-exec-am: install-exec-local
-
-install-info: install-info-am
-
-install-man:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
-       -rm -f GNUmakefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic mostlyclean-libtool
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-info-am
-
-.PHONY: all all-am all-local check check-am clean clean-generic \
-       clean-libtool clean-local distclean distclean-generic \
-       distclean-libtool distdir dvi dvi-am html html-am info info-am \
-       install install-am install-data install-data-am install-exec \
-       install-exec-am install-exec-local install-info \
-       install-info-am install-man install-strip installcheck \
-       installcheck-am installdirs maintainer-clean \
-       maintainer-clean-generic mostlyclean mostlyclean-generic \
-       mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \
-       uninstall-info-am
-
-
-xeno_hal.ko: @XENO_KBUILD_ENV@
-xeno_hal.ko: $(libhal_SRC) generic.c FORCE
-       @XENO_KBUILD_CMD@ xeno_extradef="@XENO_KMOD_CFLAGS@"
-
-clean-local:
-       @XENO_KBUILD_CLEAN@
-
-all-local: xeno_hal$(modext)
[EMAIL PROTECTED]@      $(mkinstalldirs) $(top_srcdir)/modules
[EMAIL PROTECTED]@      $(INSTALL_DATA) $^ $(top_srcdir)/modules
-
-install-exec-local: xeno_hal$(modext)
-       $(mkinstalldirs) $(moduledir)
-       $(INSTALL_DATA) $< $(moduledir)
-
-.PHONY: FORCE
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff -Nru --exclude=.svn xenomai-orig/arch/ppc/hal/Makefile 
xenomai-devel/arch/ppc/hal/Makefile
--- xenomai-orig/arch/ppc/hal/Makefile  2005-10-23 11:00:13.000000000 +0300
+++ xenomai-devel/arch/ppc/hal/Makefile 1970-01-01 02:00:00.000000000 +0200
@@ -1,13 +0,0 @@
-EXTRA_CFLAGS += -I$(xeno_srctree)/include \
-               -I$(src)/../../../include \
-               -I$(src)/../../.. \
-               $(xeno_extradef)
-
-EXTRA_AFLAGS += -I$(xeno_srctree)/include \
-               -I$(src)/../../../include \
-               -I$(src)/../../.. \
-               $(xeno_extradef)
-
-obj-m += xeno_hal.o
-
-xeno_hal-objs := $(xeno_objs)
diff -Nru --exclude=.svn xenomai-orig/arch/ppc/hal/ppc.c 
xenomai-devel/arch/ppc/hal/ppc.c
--- xenomai-orig/arch/ppc/hal/ppc.c     2005-10-11 10:32:15.000000000 +0300
+++ xenomai-devel/arch/ppc/hal/ppc.c    1970-01-01 02:00:00.000000000 +0200
@@ -1,184 +0,0 @@
-/**
- *   @ingroup hal
- *   @file
- *
- *   Adeos-based Real-Time Abstraction Layer for PPC.
- *
- *   Xenomai is free software; you can redistribute it and/or
- *   modify it under the terms of the GNU General Public License as
- *   published by the Free Software Foundation, Inc., 675 Mass Ave,
- *   Cambridge MA 02139, USA; either version 2 of the License, or (at
- *   your option) any later version.
- *
- *   Xenomai is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *   General Public License for more details.
- *
- *   You should have received a copy of the GNU General Public License
- *   along with this program; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- *   02111-1307, USA.
- */
-
-/**
- * @addtogroup hal
- *
- * PowerPC-specific HAL services.
- *
- [EMAIL PROTECTED]/
-
-#include <linux/version.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/console.h>
-#include <linux/kallsyms.h>
-#include <asm/system.h>
-#include <asm/hardirq.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/unistd.h>
-#include <nucleus/asm/hal.h>
-#ifdef CONFIG_PROC_FS
-#include <linux/proc_fs.h>
-#endif /* CONFIG_PROC_FS */
-#include <stdarg.h>
-
-static int rthal_periodic_p;
-
-int rthal_timer_request (void (*handler)(void),
-                        unsigned long nstick)
-{
-    unsigned long flags;
-    int err;
-
-    flags = rthal_critical_enter(NULL);
-
-    if (nstick > 0)
-       {
-       /* Periodic setup --
-          Use the built-in Adeos service directly. */
-       err = rthal_set_timer(nstick);
-       rthal_periodic_p = 1;
-       }
-    else
-       {
-       /* Oneshot setup. */
-       disarm_decr[rthal_processor_id()] = 1;
-       rthal_periodic_p = 0;
-#ifdef CONFIG_40x
-        mtspr(SPRN_TCR,mfspr(SPRN_TCR) & ~TCR_ARE); /* Auto-reload off. */
-#endif /* CONFIG_40x */
-       rthal_timer_program_shot(tb_ticks_per_jiffy);
-       }
-
-    rthal_irq_release(RTHAL_TIMER_IRQ);
-
-    err = rthal_irq_request(RTHAL_TIMER_IRQ,
-                           (rthal_irq_handler_t)handler,
-                           NULL,
-                           NULL);
-
-    rthal_critical_exit(flags);
-
-    return err;
-}
-
-void rthal_timer_release (void)
-
-{
-    unsigned long flags;
-
-    flags = rthal_critical_enter(NULL);
-
-    if (rthal_periodic_p)
-       rthal_reset_timer();
-    else
-       {
-       disarm_decr[rthal_processor_id()] = 0;
-#ifdef CONFIG_40x
-       mtspr(SPRN_TCR,mfspr(SPRN_TCR)|TCR_ARE); /* Auto-reload on. */
-       mtspr(SPRN_PIT,tb_ticks_per_jiffy);
-#else /* !CONFIG_40x */
-       set_dec(tb_ticks_per_jiffy);
-#endif /* CONFIG_40x */
-       }
-
-    rthal_irq_release(RTHAL_TIMER_IRQ);
-
-    rthal_critical_exit(flags);
-}
-
-unsigned long rthal_timer_calibrate (void)
-
-{
-    return 1000000000 / RTHAL_CPU_FREQ;
-}
-
-static inline int do_exception_event (unsigned event, unsigned domid, void 
*data)
-
-{
-    rthal_declare_cpuid;
-
-    rthal_load_cpuid();
-
-    if (domid == RTHAL_DOMAIN_ID)
-       {
-       rthal_realtime_faults[cpuid][event]++;
-
-       if (rthal_trap_handler != NULL &&
-           test_bit(cpuid,&rthal_cpu_realtime) &&
-           rthal_trap_handler(event,domid,data) != 0)
-           return RTHAL_EVENT_STOP;
-       }
-
-    return RTHAL_EVENT_PROPAGATE;
-}
-
-RTHAL_DECLARE_EVENT(exception_event);
-
-static inline void do_rthal_domain_entry (void)
-
-{
-    unsigned trapnr;
-
-    /* Trap all faults. */
-    for (trapnr = 0; trapnr < RTHAL_NR_FAULTS; trapnr++)
-       rthal_catch_exception(trapnr,&exception_event);
-
-    printk(KERN_INFO "Xenomai: hal/ppc loaded.\n");
-}
-
-RTHAL_DECLARE_DOMAIN(rthal_domain_entry);
-
-int rthal_arch_init (void)
-
-{
-    if (rthal_cpufreq_arg == 0)
-       /* The CPU frequency is expressed as the timebase frequency
-          for this port. */
-       rthal_cpufreq_arg = (unsigned long)rthal_get_cpufreq();
-
-    if (rthal_timerfreq_arg == 0)
-       rthal_timerfreq_arg = rthal_tunables.cpu_freq;
-
-    return 0;
-}
-
-void rthal_arch_cleanup (void)
-
-{
-    /* Nothing to cleanup so far. */
-}
-
-/[EMAIL PROTECTED]/
-
-EXPORT_SYMBOL(rthal_switch_context);
-
-#ifdef CONFIG_XENO_HW_FPU
-EXPORT_SYMBOL(rthal_init_fpu);
-EXPORT_SYMBOL(rthal_save_fpu);
-EXPORT_SYMBOL(rthal_restore_fpu);
-#endif /* CONFIG_XENO_HW_FPU */
diff -Nru --exclude=.svn xenomai-orig/arch/ppc/hal/switch.S 
xenomai-devel/arch/ppc/hal/switch.S
--- xenomai-orig/arch/ppc/hal/switch.S  2005-10-17 11:02:57.000000000 +0300
+++ xenomai-devel/arch/ppc/hal/switch.S 1970-01-01 02:00:00.000000000 +0200
@@ -1,119 +0,0 @@
-/*
- * Copyright (C) 2004 Philippe Gerum.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
- * USA; either version 2 of the License, or (at your option) any later
- * version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/page.h>
-#include <asm/ppc_asm.h>
-#include <xeno_config.h> 
-
-#define RTHAL_SAVEREG(reg, pos)        stw reg,STACK_FRAME_OVERHEAD+4*(pos)(r1)
-#define RTHAL_LOADREG(reg, pos)        lwz reg,STACK_FRAME_OVERHEAD+4*(pos)(r1)
-
-/*
- * r3=out_kspp, r4=in_kspp
- */
-_GLOBAL(rthal_switch_context)
-
-        stwu    r1,-124(r1)
-
-        /* Save general purpose registers. */
-
-       RTHAL_SAVEREG(r31,0)
-       RTHAL_SAVEREG(r30,1)
-       RTHAL_SAVEREG(r29,2)
-       RTHAL_SAVEREG(r28,3)
-       RTHAL_SAVEREG(r27,4)
-       RTHAL_SAVEREG(r26,5)
-       RTHAL_SAVEREG(r25,6)
-       RTHAL_SAVEREG(r24,7)
-       RTHAL_SAVEREG(r23,8)
-       RTHAL_SAVEREG(r22,9)
-       RTHAL_SAVEREG(r21,10)
-       RTHAL_SAVEREG(r20,11)
-       RTHAL_SAVEREG(r19,12)
-       RTHAL_SAVEREG(r18,13)
-       RTHAL_SAVEREG(r17,14)
-       RTHAL_SAVEREG(r16,15)
-       RTHAL_SAVEREG(r15,16)
-       RTHAL_SAVEREG(r14,17)
-       RTHAL_SAVEREG(r13,18)
-       RTHAL_SAVEREG(r3,19)
-       RTHAL_SAVEREG(r2,20)
-       RTHAL_SAVEREG(r0,21)
-
-        /* Save special registers. */
-       
-       mfctr    r2
-       RTHAL_SAVEREG(r2,22)
-        mfcr     r2
-       RTHAL_SAVEREG(r2,23)
-        mfxer    r2
-       RTHAL_SAVEREG(r2,24)
-        mflr     r2
-       RTHAL_SAVEREG(r2,25)
-        mfmsr    r2
-       RTHAL_SAVEREG(r2,26)
-
-        /* Switch stacks. */
-       
-        stw      r1,0(r3)       /* *out_kspp = sp */
-        lwz      r1,0(r4)       /* sp = *in_kspp */
-
-        /* Restore special registers. */
-
-       RTHAL_LOADREG(r2,26)
-        mtmsr    r2
-       RTHAL_LOADREG(r2,25)
-        mtlr     r2
-       RTHAL_LOADREG(r2,24)
-        mtxer    r2
-       RTHAL_LOADREG(r2,23)
-        mtcr     r2
-       RTHAL_LOADREG(r2,22)
-        mtctr    r2
-
-       /* Restore general purpose registers. */
-       
-       RTHAL_LOADREG(r0,21)
-       RTHAL_LOADREG(r2,20)
-       RTHAL_LOADREG(r3,19)
-       RTHAL_LOADREG(r13,18)
-       RTHAL_LOADREG(r14,17)
-       RTHAL_LOADREG(r15,16)
-       RTHAL_LOADREG(r16,15)
-       RTHAL_LOADREG(r17,14)
-       RTHAL_LOADREG(r18,13)
-       RTHAL_LOADREG(r19,12)
-       RTHAL_LOADREG(r20,11)
-       RTHAL_LOADREG(r21,10)
-       RTHAL_LOADREG(r22,9)
-       RTHAL_LOADREG(r23,8)
-       RTHAL_LOADREG(r24,7)
-       RTHAL_LOADREG(r25,6)
-       RTHAL_LOADREG(r26,5)
-       RTHAL_LOADREG(r27,4)
-       RTHAL_LOADREG(r28,3)
-       RTHAL_LOADREG(r29,2)
-       RTHAL_LOADREG(r30,1)
-       RTHAL_LOADREG(r31,0)
-
-        addi    r1,r1,124
-
-        blr
diff -Nru --exclude=.svn xenomai-orig/arch/ppc/Kconfig 
xenomai-devel/arch/ppc/Kconfig
--- xenomai-orig/arch/ppc/Kconfig       2005-10-11 10:32:16.000000000 +0300
+++ xenomai-devel/arch/ppc/Kconfig      1970-01-01 02:00:00.000000000 +0200
@@ -1,71 +0,0 @@
-mainmenu "Xenomai/powerpc configuration"
-
-source Kconfig
-
-source "nucleus/Kconfig"
-
-menu "Machine (powerpc)"
-
-config XENO_HW_FPU
-       bool "Enable FPU support"
-       default y
-       help
-       The FPU executes instructions from the processor's normal
-       instruction stream. It can handle the types of high-precision
-       floating-point processing operations commonly found in
-       scientific, engineering, and business applications.
-       If your target system has no FPU, say NO here; otherwise,
-       enabling FPU support when the hardware is available may
-       greatly improve performance.
-
-config XENO_HW_PERIODIC_TIMER
-       bool "Enable periodic timer support"
-       default y
-       help
-       On this architecture, the nucleus provides both aperiodic and
-       periodic timing modes. In aperiodic mode, timing accuracy is
-       higher - since it is not rounded to a constant time slice - at
-       the expense of a lesser efficicency when many timers are
-       simultaneously active. The aperiodic mode gives better results
-       in configuration involving a few threads requesting timing
-       services over different time scales that cannot be easily
-       expressed as multiples of a single base tick, or would lead to
-       a waste of high frequency periodic ticks. You can disable
-       the periodic support for this architecture to save a few
-       hundreds bytes if you plan to use the system timer in
-       aperiodic mode only.
-
-config XENO_HW_TIMER_LATENCY
-       depends on XENO_OPT_EXPERT
-       string "Timer tuning latency (ns)"
-       default 0
-       help
-       This parameter accounts for the time (in nanoseconds) needed
-       to program the underlying time source in one-shot timing mode.
-       This value will be used to reduce the scheduling jitter induced
-       by the time needed to setup the timer for its next shot. A
-       default value of 0 (recommended) will cause this value to be
-       estimated by the nucleus at startup.
-
-config XENO_HW_SCHED_LATENCY
-       depends on XENO_OPT_EXPERT
-       string "Scheduling latency (ns)"
-       default 0
-       help
-       Scheduling latency is the time between the termination of an
-       interrupt handler and the execution of the first instruction
-       of the real-time thread this handler resumes. A
-       default value of 0 (recommended) will cause this value to be
-       estimated by the nucleus at startup.
-
-endmenu
-
-source "skins/Kconfig"
-
-menu "Drivers"
-
-source "drivers/Kconfig"
-
-endmenu
-
-source "sim/Kconfig"
diff -Nru --exclude=.svn 
xenomai-orig/arch/ppc/patches/adeos-ipipe-2.6.13-ppc-1.0-03.patch 
xenomai-devel/arch/ppc/patches/adeos-ipipe-2.6.13-ppc-1.0-03.patch
--- xenomai-orig/arch/ppc/patches/adeos-ipipe-2.6.13-ppc-1.0-03.patch   
2005-10-23 11:00:14.000000000 +0300
+++ xenomai-devel/arch/ppc/patches/adeos-ipipe-2.6.13-ppc-1.0-03.patch  
1970-01-01 02:00:00.000000000 +0200
@@ -1,4214 +0,0 @@
---- 2.6.13/arch/ppc/Kconfig    2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/arch/ppc/Kconfig      2005-09-07 14:02:09.000000000 +0200
-@@ -944,6 +944,8 @@ config NR_CPUS
-       depends on SMP
-       default "4"
- 
-+source "kernel/ipipe/Kconfig"
-+
- config HIGHMEM
-       bool "High memory support"
- 
---- 2.6.13/arch/ppc/kernel/Makefile    2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/arch/ppc/kernel/Makefile      2005-09-07 14:02:09.000000000 
+0200
-@@ -29,6 +29,7 @@ obj-$(CONFIG_ALTIVEC)                += vecemu.o vecto
- ifndef CONFIG_E200
- obj-$(CONFIG_FSL_BOOKE)               += perfmon_fsl_booke.o
- endif
-+obj-$(CONFIG_IPIPE)           += ipipe-core.o ipipe-root.o
- obj-$(CONFIG_KEXEC)           += machine_kexec.o relocate_kernel.o
- 
- ifndef CONFIG_MATH_EMULATION
---- 2.6.13/arch/ppc/kernel/entry.S     2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/arch/ppc/kernel/entry.S       2005-10-19 15:07:54.000000000 
+0200
-@@ -44,6 +44,14 @@
- #define LOAD_MSR_KERNEL(r, x) li r,(x)
- #endif
- 
-+#ifdef CONFIG_IPIPE
-+#define STALL_ROOT_COND               bl      __ipipe_stall_root_raw
-+#define UNSTALL_ROOT_COND     bl      __ipipe_unstall_root_raw
-+#else /* !CONFIG_IPIPE */
-+#define STALL_ROOT_COND
-+#define UNSTALL_ROOT_COND
-+#endif /* CONFIG_IPIPE */
-+
- #ifdef CONFIG_BOOKE
- #include "head_booke.h"
- #define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level)      \
-@@ -144,8 +152,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
-       .globl transfer_to_handler_cont
- transfer_to_handler_cont:
-       lwz     r11,THREAD_INFO-THREAD(r12)
-+#ifdef CONFIG_IPIPE
-+      /* Allow for private kernel-based stacks: those must not cause
-+      the stack overflow detection to trigger when some activity has
-+      been preempted over them. We just check if the kernel stack is
-+      not treading on the memory area ranging from
-+      &current->thread_info to &current->thread, which is coarser
-+      than the vanilla implementation, but likely sensitive enough
-+      to catch overflows soon enough though.*/
-+      addi    r9,r11,THREAD
-+      cmplw   0,r1,r11
-+      cmplw   1,r1,r9
-+      crand   1,1,4
-+      bgt-    stack_ovf               /* if r11 < r1 < r11+THREAD */
-+#else /* CONFIG_IPIPE */
-       cmplw   r1,r11                  /* if r1 <= current->thread_info */
-       ble-    stack_ovf               /* then the kernel stack overflowed */
-+#endif /* CONFIG_IPIPE */
- 3:
-       mflr    r9
-       lwz     r11,0(r9)               /* virtual address of handler */
-@@ -196,6 +219,21 @@ _GLOBAL(DoSyscall)
-       lwz     r11,_CCR(r1)    /* Clear SO bit in CR */
-       rlwinm  r11,r11,0,4,2
-       stw     r11,_CCR(r1)
-+#ifdef CONFIG_IPIPE
-+      addi    r3,r1,GPR0
-+      bl      __ipipe_syscall_root
-+      cmpwi   r3,0
-+      lwz     r3,GPR3(r1)
-+      lwz     r0,GPR0(r1)
-+      lwz     r4,GPR4(r1)
-+      lwz     r5,GPR5(r1)
-+      lwz     r6,GPR6(r1)
-+      lwz     r7,GPR7(r1)
-+      lwz     r8,GPR8(r1)
-+      lwz     r9,GPR9(r1)
-+      bgt     .ipipe_end_syscall
-+      blt     ret_from_syscall
-+#endif /* CONFIG_IPIPE */
- #ifdef SHOW_SYSCALLS
-       bl      do_show_syscall
- #endif /* SHOW_SYSCALLS */
-@@ -265,6 +303,14 @@ syscall_exit_cont:
-       SYNC
-       RFI
- 
-+#ifdef CONFIG_IPIPE
-+.ipipe_end_syscall:
-+      LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
-+      SYNC
-+      MTMSRD(r10)
-+      b syscall_exit_cont
-+#endif /* CONFIG_IPIPE */
-+
- 66:   li      r3,-ENOSYS
-       b       ret_from_syscall
- 
-@@ -650,6 +696,12 @@ ret_from_except:
-       SYNC                    /* Some chip revs have problems here... */
-       MTMSRD(r10)             /* disable interrupts */
- 
-+#ifdef CONFIG_IPIPE
-+        bl __ipipe_check_root
-+        cmpwi   r3, 0
-+        beq- restore
-+#endif /* CONFIG_IPIPE */
-+
-       lwz     r3,_MSR(r1)     /* Returning to user mode? */
-       andi.   r0,r3,MSR_PR
-       beq     resume_kernel
-@@ -685,11 +737,13 @@ resume_kernel:
-       beq+    restore
-       andi.   r0,r3,MSR_EE    /* interrupts off? */
-       beq     restore         /* don't schedule if so */
-+      STALL_ROOT_COND
- 1:    bl      preempt_schedule_irq
-       rlwinm  r9,r1,0,0,18
-       lwz     r3,TI_FLAGS(r9)
-       andi.   r0,r3,_TIF_NEED_RESCHED
-       bne-    1b
-+      UNSTALL_ROOT_COND
- #else
- resume_kernel:
- #endif /* CONFIG_PREEMPT */
-@@ -1000,3 +1054,10 @@ machine_check_in_rtas:
-       /* XXX load up BATs and panic */
- 
- #endif /* CONFIG_PPC_OF */
-+
-+#ifdef CONFIG_IPIPE
-+_GLOBAL(__ipipe_ret_from_except)
-+        cmpwi   r3, 0
-+        bne+ ret_from_except
-+        b restore
-+#endif /* CONFIG_IPIPE */
---- 2.6.13/arch/ppc/kernel/head.S      2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/arch/ppc/kernel/head.S        2005-09-07 14:02:09.000000000 
+0200
-@@ -335,6 +335,12 @@ i##n:                                                     
        \
-       EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
-                         ret_from_except_full)
- 
-+#ifdef CONFIG_IPIPE
-+#define EXC_XFER_IPIPE(n, hdlr)               \
-+      EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
-+                        __ipipe_ret_from_except)
-+#endif /* CONFIG_IPIPE */
-+
- #define EXC_XFER_LITE(n, hdlr)                \
-       EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
-                         ret_from_except)
-@@ -445,7 +451,11 @@ InstructionAccess:
- #endif /* CONFIG_PPC64BRIDGE */
- 
- /* External interrupt */
-+#ifdef CONFIG_IPIPE
-+      EXCEPTION(0x500, HardwareInterrupt, __ipipe_grab_irq, EXC_XFER_IPIPE)
-+#else /* !CONFIG_IPIPE */
-       EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
-+#endif /* CONFIG_IPIPE */
- 
- /* Alignment exception */
-       . = 0x600
-@@ -470,7 +480,11 @@ FPUnavailable:
-       EXC_XFER_EE_LITE(0x800, KernelFP)
- 
- /* Decrementer */
-+#ifdef CONFIG_IPIPE
-+      EXCEPTION(0x900, Decrementer, __ipipe_grab_timer, EXC_XFER_IPIPE)
-+#else /* !CONFIG_IPIPE */
-       EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
-+#endif /* CONFIG_IPIPE */
- 
-       EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE)
---- 2.6.13/arch/ppc/kernel/head_44x.S  2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/arch/ppc/kernel/head_44x.S    2005-09-07 14:02:09.000000000 
+0200
-@@ -430,7 +430,11 @@ interrupt_base:
-       INSTRUCTION_STORAGE_EXCEPTION
- 
-       /* External Input Interrupt */
--      EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
-+#ifdef CONFIG_IPIPE
-+      EXCEPTION(0x0500, ExternalInput, __ipipe_grab_irq, EXC_XFER_IPIPE)
-+#else /* !CONFIG_IPIPE */
-+      EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
-+#endif /* CONFIG_IPIPE */
- 
-       /* Alignment Interrupt */
-       ALIGNMENT_EXCEPTION
---- 2.6.13/arch/ppc/kernel/head_4xx.S  2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/arch/ppc/kernel/head_4xx.S    2005-09-07 14:02:09.000000000 
+0200
-@@ -229,6 +229,12 @@ label:
-       EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, 
transfer_to_handler_full, \
-                         ret_from_except_full)
- 
-+#ifdef CONFIG_IPIPE
-+#define EXC_XFER_IPIPE(n, hdlr)               \
-+      EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
-+                        __ipipe_ret_from_except)
-+#endif /* CONFIG_IPIPE */
-+
- #define EXC_XFER_LITE(n, hdlr)                \
-       EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
-                         ret_from_except)
-@@ -397,7 +403,11 @@ label:
-       EXC_XFER_EE_LITE(0x400, handle_page_fault)
- 
- /* 0x0500 - External Interrupt Exception */
-+#ifdef CONFIG_IPIPE
-+      EXCEPTION(0x0500, HardwareInterrupt, __ipipe_grab_irq, EXC_XFER_IPIPE)
-+#else /* !CONFIG_IPIPE */
-       EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
-+#endif /* CONFIG_IPIPE */
- 
- /* 0x0600 - Alignment Exception */
-       START_EXCEPTION(0x0600, Alignment)
-@@ -435,7 +445,11 @@ label:
-       lis     r0,[EMAIL PROTECTED]
-       mtspr   SPRN_TSR,r0             /* Clear the PIT exception */
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-+#ifdef CONFIG_IPIPE
-+      EXC_XFER_IPIPE(0x1000, __ipipe_grab_timer)
-+#else /* !CONFIG_IPIPE */
-       EXC_XFER_LITE(0x1000, timer_interrupt)
-+#endif /* CONFIG_IPIPE */
- 
- #if 0
- /* NOTE:
---- 2.6.13/arch/ppc/kernel/head_8xx.S  2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/arch/ppc/kernel/head_8xx.S    2005-09-07 14:02:09.000000000 
+0200
-@@ -189,6 +189,11 @@ i##n:                                                     
        \
- #define EXC_XFER_STD(n, hdlr)         \
-       EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
-                         ret_from_except_full)
-+#ifdef CONFIG_IPIPE
-+#define EXC_XFER_IPIPE(n, hdlr)               \
-+      EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
-+                        __ipipe_ret_from_except)
-+#endif /* CONFIG_IPIPE */
- 
- #define EXC_XFER_LITE(n, hdlr)                \
-       EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
-@@ -241,7 +246,11 @@ InstructionAccess:
-       EXC_XFER_EE_LITE(0x400, handle_page_fault)
- 
- /* External interrupt */
-+#ifdef CONFIG_IPIPE
-+      EXCEPTION(0x500, HardwareInterrupt, __ipipe_grab_irq, EXC_XFER_IPIPE)
-+#else /* !CONFIG_IPIPE */
-       EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
-+#endif /* CONFIG_IPIPE */
- 
- /* Alignment exception */
-       . = 0x600
-@@ -262,7 +271,11 @@ Alignment:
-       EXCEPTION(0x800, FPUnavailable, UnknownException, EXC_XFER_STD)
- 
- /* Decrementer */
-+#ifdef CONFIG_IPIPE
-+      EXCEPTION(0x900, Decrementer, __ipipe_grab_timer, EXC_XFER_IPIPE)
-+#else /* !CONFIG_IPIPE */
-       EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
-+#endif /* CONFIG_IPIPE */
- 
-       EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE)
---- 2.6.13/arch/ppc/kernel/head_booke.h        2005-08-29 01:41:01.000000000 
+0200
-+++ 2.6.13-ipipe/arch/ppc/kernel/head_booke.h  2005-09-07 14:02:09.000000000 
+0200
-@@ -187,6 +187,12 @@ label:
-       EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, 
transfer_to_handler_full, \
-                         ret_from_except_full)
- 
-+#ifdef CONFIG_IPIPE
-+#define EXC_XFER_IPIPE(n, hdlr)               \
-+      EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
-+                        __ipipe_ret_from_except)
-+#endif /* CONFIG_IPIPE */
-+
- #define EXC_XFER_LITE(n, hdlr)                \
-       EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
-                         ret_from_except)
-@@ -345,6 +351,15 @@ label:
-       addi    r3,r1,STACK_FRAME_OVERHEAD;                                   \
-       EXC_XFER_STD(0x0700, ProgramCheckException)
- 
-+#ifdef CONFIG_IPIPE
-+#define DECREMENTER_EXCEPTION                                               \
-+      START_EXCEPTION(Decrementer)                                          \
-+      NORMAL_EXCEPTION_PROLOG;                                              \
-+      lis     r0,[EMAIL PROTECTED];           /* Setup the DEC interrupt mask 
*/    \
-+      mtspr   SPRN_TSR,r0;            /* Clear the DEC interrupt */         \
-+      addi    r3,r1,STACK_FRAME_OVERHEAD;                                   \
-+      EXC_XFER_IPIPE(0x0900, __ipipe_grab_timer)
-+#else /* !CONFIG_IPIPE */
- #define DECREMENTER_EXCEPTION                                               \
-       START_EXCEPTION(Decrementer)                                          \
-       NORMAL_EXCEPTION_PROLOG;                                              \
-@@ -352,6 +367,7 @@ label:
-       mtspr   SPRN_TSR,r0;            /* Clear the DEC interrupt */         \
-       addi    r3,r1,STACK_FRAME_OVERHEAD;                                   \
-       EXC_XFER_LITE(0x0900, timer_interrupt)
-+#endif /* CONFIG_IPIPE */
- 
- #define FP_UNAVAILABLE_EXCEPTION                                            \
-       START_EXCEPTION(FloatingPointUnavailable)                             \
---- 2.6.13/arch/ppc/kernel/head_fsl_booke.S    2005-08-29 01:41:01.000000000 
+0200
-+++ 2.6.13-ipipe/arch/ppc/kernel/head_fsl_booke.S      2005-09-07 
14:02:09.000000000 +0200
-@@ -528,7 +528,11 @@ interrupt_base:
-       INSTRUCTION_STORAGE_EXCEPTION
- 
-       /* External Input Interrupt */
-+#ifdef CONFIG_IPIPE
-+      EXCEPTION(0x0500, ExternalInput, __ipipe_grab_irq, EXC_XFER_IPIPE)
-+#else /* !CONFIG_IPIPE */
-       EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
-+#endif /* CONFIG_IPIPE */
- 
-       /* Alignment Interrupt */
-       ALIGNMENT_EXCEPTION
---- 2.6.13/arch/ppc/kernel/idle.c      2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/arch/ppc/kernel/idle.c        2005-10-14 12:47:46.000000000 
+0200
-@@ -39,8 +39,9 @@ void default_idle(void)
-       powersave = ppc_md.power_save;
- 
-       if (!need_resched()) {
--              if (powersave != NULL)
-+              if (powersave != NULL) {
-                       powersave();
-+              }
- #ifdef CONFIG_SMP
-               else {
-                       set_thread_flag(TIF_POLLING_NRFLAG);
-@@ -50,6 +51,7 @@ void default_idle(void)
-               }
- #endif
-       }
-+
-       if (need_resched())
-               schedule();
- }
-@@ -59,11 +61,13 @@ void default_idle(void)
-  */
- void cpu_idle(void)
- {
--      for (;;)
-+      for (;;) {
-+              ipipe_suspend_domain();
-               if (ppc_md.idle != NULL)
-                       ppc_md.idle();
-               else
-                       default_idle();
-+      }
- }
- 
- #if defined(CONFIG_SYSCTL) && defined(CONFIG_6xx)
---- 2.6.13/arch/ppc/kernel/ipipe-core.c        1970-01-01 01:00:00.000000000 
+0100
-+++ 2.6.13-ipipe/arch/ppc/kernel/ipipe-core.c  2005-10-16 21:51:00.000000000 
+0200
-@@ -0,0 +1,536 @@
-+/* -*- linux-c -*-
-+ * linux/arch/ppc/kernel/ipipe-core.c
-+ *
-+ * Copyright (C) 2002-2005 Philippe Gerum.
-+ * Copyright (C) 2004 Wolfgang Grandegger (Adeos/ppc port over 2.4).
-+ * Copyright (C) 2005 Heikki Lindholm (PowerPC 970 fixes).
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ * USA; either version 2 of the License, or (at your option) any later
-+ * version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * Architecture-dependent I-PIPE core support for PowerPC.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/smp.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/bitops.h>
-+#include <linux/interrupt.h>
-+#include <linux/irq.h>
-+#include <linux/module.h>
-+#include <asm/system.h>
-+#include <asm/atomic.h>
-+#include <asm/hardirq.h>
-+#include <asm/io.h>
-+#include <asm/time.h>
-+
-+/* Current reload value for the decrementer. */
-+unsigned long __ipipe_decr_ticks;
-+
-+/* Next tick date (timebase value). */
-+unsigned long long __ipipe_decr_next[IPIPE_NR_CPUS];
-+
-+struct pt_regs __ipipe_tick_regs[IPIPE_NR_CPUS];
-+
-+static inline unsigned long ffnz(unsigned long ul)
-+{
-+
-+      __asm__ __volatile__("cntlzw %0, %1":"=r"(ul):"r"(ul & (-ul)));
-+      return 31 - ul;
-+}
-+
-+#ifdef CONFIG_POWER4
-+extern struct irqaction k2u3_cascade_action;
-+extern int openpic2_get_irq(struct pt_regs *regs);
-+#endif
-+
-+#ifdef CONFIG_SMP
-+
-+static cpumask_t __ipipe_cpu_sync_map;
-+
-+static cpumask_t __ipipe_cpu_lock_map;
-+
-+static spinlock_t __ipipe_cpu_barrier = SPIN_LOCK_UNLOCKED;
-+
-+static atomic_t __ipipe_critical_count = ATOMIC_INIT(0);
-+
-+static void (*__ipipe_cpu_sync) (void);
-+
-+/* Always called with hw interrupts off. */
-+
-+void __ipipe_do_critical_sync(unsigned irq)
-+{
-+      ipipe_declare_cpuid;
-+
-+      ipipe_load_cpuid();
-+
-+      cpu_set(cpuid, __ipipe_cpu_sync_map);
-+
-+      /*
-+       * Now we are in sync with the lock requestor running on another
-+       * CPU. Enter a spinning wait until he releases the global
-+       * lock.
-+       */
-+      spin_lock_hw(&__ipipe_cpu_barrier);
-+
-+      /* Got it. Now get out. */
-+
-+      if (__ipipe_cpu_sync)
-+              /* Call the sync routine if any. */
-+              __ipipe_cpu_sync();
-+
-+      spin_unlock_hw(&__ipipe_cpu_barrier);
-+
-+      cpu_clear(cpuid, __ipipe_cpu_sync_map);
-+}
-+
-+#endif        /* CONFIG_SMP */
-+
-+/*
-+ * ipipe_critical_enter() -- Grab the superlock excluding all CPUs
-+ * but the current one from a critical section. This lock is used when
-+ * we must enforce a global critical section for a single CPU in a
-+ * possibly SMP system whichever context the CPUs are running.
-+ */
-+unsigned long ipipe_critical_enter(void (*syncfn) (void))
-+{
-+      unsigned long flags;
-+
-+      local_irq_save_hw(flags);
-+
-+#ifdef CONFIG_SMP
-+      if (num_online_cpus() > 1) {    /* We might be running a SMP-kernel on 
a UP box... */
-+              ipipe_declare_cpuid;
-+              cpumask_t lock_map;
-+
-+              ipipe_load_cpuid();
-+
-+              if (!cpu_test_and_set(cpuid, __ipipe_cpu_lock_map)) {
-+                      while (cpu_test_and_set(BITS_PER_LONG - 1,
-+                                              __ipipe_cpu_lock_map)) {
-+                              int n = 0;
-+                              do {
-+                                      cpu_relax();
-+                              } while (++n < cpuid);
-+                      }
-+
-+                      spin_lock_hw(&__ipipe_cpu_barrier);
-+
-+                      __ipipe_cpu_sync = syncfn;
-+
-+                      /* Send the sync IPI to all processors but the current 
one. */
-+                      send_IPI_allbutself(IPIPE_CRITICAL_VECTOR);
-+
-+                      cpus_andnot(lock_map, cpu_online_map,
-+                                  __ipipe_cpu_lock_map);
-+
-+                      while (!cpus_equal(__ipipe_cpu_sync_map, lock_map))
-+                              cpu_relax();
-+              }
-+
-+              atomic_inc(&__ipipe_critical_count);
-+      }
-+#endif        /* CONFIG_SMP */
-+
-+      return flags;
-+}
-+
-+/* ipipe_critical_exit() -- Release the superlock. */
-+
-+void ipipe_critical_exit(unsigned long flags)
-+{
-+#ifdef CONFIG_SMP
-+      if (num_online_cpus() > 1) {    /* We might be running a SMP-kernel on 
a UP box... */
-+              ipipe_declare_cpuid;
-+
-+              ipipe_load_cpuid();
-+
-+              if (atomic_dec_and_test(&__ipipe_critical_count)) {
-+                      spin_unlock_hw(&__ipipe_cpu_barrier);
-+
-+                      while (!cpus_empty(__ipipe_cpu_sync_map))
-+                              cpu_relax();
-+
-+                      cpu_clear(cpuid, __ipipe_cpu_lock_map);
-+                      cpu_clear(BITS_PER_LONG - 1, __ipipe_cpu_lock_map);
-+              }
-+      }
-+#endif        /* CONFIG_SMP */
-+
-+      local_irq_restore_hw(flags);
-+}
-+
-+void __ipipe_init_platform(void)
-+{
-+      unsigned timer_virq;
-+
-+      /*
-+       * Allocate a virtual IRQ for the decrementer trap early to
-+       * get it mapped to IPIPE_VIRQ_BASE
-+       */
-+
-+      timer_virq = ipipe_alloc_virq();
-+
-+      if (timer_virq != IPIPE_TIMER_VIRQ)
-+              panic("I-pipe: cannot reserve timer virq #%d (got #%d)",
-+                    IPIPE_TIMER_VIRQ, timer_virq);
-+
-+      __ipipe_decr_ticks = tb_ticks_per_jiffy;
-+}
-+
-+/*
-+ * __ipipe_sync_stage() -- Flush the pending IRQs for the current
-+ * domain (and processor). This routine flushes the interrupt log
-+ * (see "Optimistic interrupt protection" from D. Stodolsky et al. for
-+ * more on the deferred interrupt scheme). Every interrupt that
-+ * occurred while the pipeline was stalled gets played. WARNING:
-+ * callers on SMP boxen should always check for CPU migration on
-+ * return of this routine. One can control the kind of interrupts
-+ * which are going to be sync'ed using the syncmask
-+ * parameter. IPIPE_IRQMASK_ANY plays them all, IPIPE_IRQMASK_VIRT
-+ * plays virtual interrupts only. This routine must be called with hw
-+ * interrupts off.
-+ */
-+void __ipipe_sync_stage(unsigned long syncmask)
-+{
-+      unsigned long mask, submask;
-+      struct ipcpudata *cpudata;
-+      struct ipipe_domain *ipd;
-+      ipipe_declare_cpuid;
-+      int level, rank;
-+      unsigned irq;
-+
-+      ipipe_load_cpuid();
-+      ipd = ipipe_percpu_domain[cpuid];
-+      cpudata = &ipd->cpudata[cpuid];
-+
-+      if (__test_and_set_bit(IPIPE_SYNC_FLAG, &cpudata->status))
-+              return;
-+
-+      /*
-+       * The policy here is to keep the dispatching code interrupt-free
-+       * by stalling the current stage. If the upper domain handler
-+       * (which we call) wants to re-enable interrupts while in a safe
-+       * portion of the code (e.g. SA_INTERRUPT flag unset for Linux's
-+       * sigaction()), it will have to unstall (then stall again before
-+       * returning to us!) the stage when it sees fit.
-+       */
-+      while ((mask = (cpudata->irq_pending_hi & syncmask)) != 0) {
-+              level = ffnz(mask);
-+              __clear_bit(level, &cpudata->irq_pending_hi);
-+
-+              while ((submask = cpudata->irq_pending_lo[level]) != 0) {
-+                      rank = ffnz(submask);
-+                      irq = (level << IPIPE_IRQ_ISHIFT) + rank;
-+
-+                      if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control)) 
{
-+                              __clear_bit(rank,
-+                                          &cpudata->irq_pending_lo[level]);
-+                              continue;
-+                      }
-+
-+                      if (--cpudata->irq_hits[irq] == 0) {
-+                              __clear_bit(rank,
-+                                          &cpudata->irq_pending_lo[level]);
-+                              ipipe_mark_irq_delivery(ipd,irq,cpuid);
-+                      }
-+
-+                      __set_bit(IPIPE_STALL_FLAG, &cpudata->status);
-+                      ipipe_mark_domain_stall(ipd, cpuid);
-+
-+                      if (ipd == ipipe_root_domain) {
-+                              /*
-+                               * Linux handlers are called w/ hw
-+                               * interrupts on so that they could
-+                               * not defer interrupts for higher
-+                               * priority domains.
-+                               */
-+                              local_irq_enable_hw();
-+                              ((void (*)(unsigned, struct pt_regs *))
-+                               ipd->irqs[irq].handler) (irq, 
__ipipe_tick_regs + cpuid);
-+                              local_irq_disable_hw();
-+                      } else {
-+                              __clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);
-+                              ipd->irqs[irq].handler(irq);
-+                              __set_bit(IPIPE_SYNC_FLAG, &cpudata->status);
-+                      }
-+#ifdef CONFIG_SMP
-+                      {
-+                              int _cpuid = ipipe_processor_id();
-+
-+                              if (_cpuid != cpuid) {  /* Handle CPU 
migration. */
-+                                      /*
-+                                       * We expect any domain to clear the 
SYNC bit each
-+                                       * time it switches in a new task, so 
that preemptions
-+                                       * and/or CPU migrations (in the SMP 
case) over the
-+                                       * ISR do not lock out the log syncer 
for some
-+                                       * indefinite amount of time. In the 
Linux case,
-+                                       * schedule() handles this (see 
kernel/sched.c). For
-+                                       * this reason, we don't bother 
clearing it here for
-+                                       * the source CPU in the migration 
handling case,
-+                                       * since it must have scheduled another 
task in by
-+                                       * now.
-+                                       */
-+                                      cpuid = _cpuid;
-+                                      cpudata = &ipd->cpudata[cpuid];
-+                                      __set_bit(IPIPE_SYNC_FLAG, 
&cpudata->status);
-+                              }
-+                      }
-+#endif        /* CONFIG_SMP */
-+
-+                      __clear_bit(IPIPE_STALL_FLAG, &cpudata->status);
-+                      ipipe_mark_domain_unstall(ipd, cpuid);
-+              }
-+      }
-+
-+      __clear_bit(IPIPE_SYNC_FLAG, &cpudata->status);
-+}
-+
-+/*
-+ * ipipe_virtualize_irq() -- Attach a handler (and optionally a hw
-+ * acknowledge routine) to an interrupt for the given domain.
-+ */
-+int ipipe_virtualize_irq(struct ipipe_domain *ipd,
-+                       unsigned irq,
-+                       void (*handler) (unsigned irq),
-+                       int (*acknowledge) (unsigned irq), unsigned modemask)
-+{
-+      unsigned long flags;
-+      int err;
-+
-+      if (irq >= IPIPE_NR_IRQS)
-+              return -EINVAL;
-+
-+      if (ipd->irqs[irq].control & IPIPE_SYSTEM_MASK)
-+              return -EPERM;
-+
-+      spin_lock_irqsave_hw(&__ipipe_pipelock, flags);
-+
-+      if (handler != NULL) {
-+              /*
-+               * A bit of hack here: if we are re-virtualizing an IRQ just
-+               * to change the acknowledge routine by passing the special
-+               * IPIPE_SAME_HANDLER value, then allow to recycle the current
-+               * handler for the IRQ. This allows Linux device drivers
-+               * managing shared IRQ lines to call ipipe_virtualize_irq() in
-+               * addition to request_irq() just for the purpose of
-+               * interposing their own shared acknowledge routine.
-+               */
-+
-+              if (handler == IPIPE_SAME_HANDLER) {
-+                      handler = ipd->irqs[irq].handler;
-+
-+                      if (handler == NULL) {
-+                              err = -EINVAL;
-+                              goto unlock_and_exit;
-+                      }
-+              } else if ((modemask & IPIPE_EXCLUSIVE_MASK) != 0 &&
-+                         ipd->irqs[irq].handler != NULL) {
-+                      err = -EBUSY;
-+                      goto unlock_and_exit;
-+              }
-+
-+              if ((modemask & (IPIPE_SHARED_MASK | IPIPE_PASS_MASK)) ==
-+                      IPIPE_SHARED_MASK) {
-+                      err = -EINVAL;
-+                      goto unlock_and_exit;
-+              }
-+
-+              if ((modemask & IPIPE_STICKY_MASK) != 0)
-+                      modemask |= IPIPE_HANDLE_MASK;
-+      } else
-+              modemask &=
-+                      ~(IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK |
-+                      IPIPE_SHARED_MASK);
-+
-+      if (acknowledge == NULL) {
-+              if ((modemask & IPIPE_SHARED_MASK) == 0)
-+                      /*
-+                       * Acknowledge handler unspecified -- this is ok in
-+                       * non-shared management mode, but we will force the
-+                       * use of the Linux-defined handler instead.
-+                       */
-+                      acknowledge = ipipe_root_domain->irqs[irq].acknowledge;
-+              else {
-+                      /*
-+                       * A valid acknowledge handler to be called in shared
-+                       * mode is required when declaring a shared IRQ.
-+                       */
-+                      err = -EINVAL;
-+                      goto unlock_and_exit;
-+              }
-+      }
-+
-+      ipd->irqs[irq].handler = handler;
-+      ipd->irqs[irq].acknowledge = acknowledge;
-+      ipd->irqs[irq].control = modemask;
-+
-+      if (irq < NR_IRQS &&
-+          handler != NULL &&
-+          !ipipe_virtual_irq_p(irq) && (modemask & IPIPE_ENABLE_MASK) != 0) {
-+              if (ipd != ipipe_current_domain) {
-+                      /*
-+                       * IRQ enable/disable state is domain-sensitive, so
-+                       * we may not change it for another domain. What is
-+                       * allowed however is forcing some domain to handle
-+                       * an interrupt source, by passing the proper 'ipd'
-+                       * descriptor which thus may be different from
-+                       * ipipe_current_domain.
-+                       */
-+                      err = -EPERM;
-+                      goto unlock_and_exit;
-+              }
-+
-+              enable_irq(irq);
-+      }
-+
-+      err = 0;
-+
-+unlock_and_exit:
-+
-+      spin_unlock_irqrestore_hw(&__ipipe_pipelock, flags);
-+
-+      return err;
-+}
-+
-+/* ipipe_control_irq() -- Change modes of a pipelined interrupt for
-+ * the current domain. */
-+
-+int ipipe_control_irq(unsigned irq, unsigned clrmask, unsigned setmask)
-+{
-+      irq_desc_t *desc;
-+      unsigned long flags;
-+
-+      if (irq >= IPIPE_NR_IRQS)
-+              return -EINVAL;
-+
-+      if (ipipe_current_domain->irqs[irq].control & IPIPE_SYSTEM_MASK)
-+              return -EPERM;
-+
-+      if (((setmask | clrmask) & IPIPE_SHARED_MASK) != 0)
-+              return -EINVAL;
-+
-+      desc = irq_desc + irq;
-+
-+      if (ipipe_current_domain->irqs[irq].handler == NULL)
-+              setmask &= ~(IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK);
-+
-+      if ((setmask & IPIPE_STICKY_MASK) != 0)
-+              setmask |= IPIPE_HANDLE_MASK;
-+
-+      if ((clrmask & (IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK)) != 0)   /* If 
one goes, both go. */
-+              clrmask |= (IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK);
-+
-+      spin_lock_irqsave_hw(&__ipipe_pipelock, flags);
-+
-+      ipipe_current_domain->irqs[irq].control &= ~clrmask;
-+      ipipe_current_domain->irqs[irq].control |= setmask;
-+
-+      if ((setmask & IPIPE_ENABLE_MASK) != 0)
-+              enable_irq(irq);
-+      else if ((clrmask & IPIPE_ENABLE_MASK) != 0)
-+              disable_irq(irq);
-+
-+      spin_unlock_irqrestore_hw(&__ipipe_pipelock, flags);
-+
-+      return 0;
-+}
-+
-+int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
-+{
-+      info->ncpus = num_online_cpus();
-+      info->cpufreq = ipipe_cpu_freq();
-+      info->archdep.tmirq = IPIPE_TIMER_VIRQ;
-+      info->archdep.tmfreq = info->cpufreq;
-+
-+      return 0;
-+}
-+
-+/*
-+ * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline
-+ * just like if it has been actually received from a hw source. Also
-+ * works for virtual interrupts.
-+ */
-+int ipipe_trigger_irq(unsigned irq)
-+{
-+      struct pt_regs regs;
-+      unsigned long flags;
-+
-+      if (irq >= IPIPE_NR_IRQS ||
-+          (ipipe_virtual_irq_p(irq)
-+           && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
-+              return -EINVAL;
-+
-+      local_irq_save_hw(flags);
-+
-+      regs.msr = flags;
-+
-+      __ipipe_handle_irq(irq | IPIPE_IRQ_ACKED, &regs);
-+
-+      local_irq_restore_hw(flags);
-+
-+      return 1;
-+}
-+
-+static void __ipipe_set_decr(void)
-+{
-+      ipipe_declare_cpuid;
-+
-+      ipipe_load_cpuid();
-+
-+      disarm_decr[cpuid] = (__ipipe_decr_ticks != tb_ticks_per_jiffy);
-+#ifdef CONFIG_40x
-+      /* Enable and set auto-reload. */
-+      mtspr(SPRN_TCR, mfspr(SPRN_TCR) | TCR_ARE);
-+      mtspr(SPRN_PIT, __ipipe_decr_ticks);
-+#else /* !CONFIG_40x */
-+      __ipipe_decr_next[cpuid] = __ipipe_read_timebase() + __ipipe_decr_ticks;
-+      set_dec(__ipipe_decr_ticks);
-+#endif        /* CONFIG_40x */
-+}
-+
-+int ipipe_tune_timer(unsigned long ns, int flags)
-+{
-+      unsigned long x, ticks;
-+
-+      if (flags & IPIPE_RESET_TIMER)
-+              ticks = tb_ticks_per_jiffy;
-+      else {
-+              ticks = ns * tb_ticks_per_jiffy / (1000000000 / HZ);
-+
-+              if (ticks > tb_ticks_per_jiffy)
-+                      return -EINVAL;
-+      }
-+
-+      x = ipipe_critical_enter(&__ipipe_set_decr);    /* Sync with all CPUs */
-+      __ipipe_decr_ticks = ticks;
-+      __ipipe_set_decr();
-+      ipipe_critical_exit(x);
-+
-+      return 0;
-+}
-+
-+EXPORT_SYMBOL(__ipipe_sync_stage);
-+EXPORT_SYMBOL(__ipipe_decr_ticks);
-+EXPORT_SYMBOL(__ipipe_decr_next);
-+EXPORT_SYMBOL(ipipe_critical_enter);
-+EXPORT_SYMBOL(ipipe_critical_exit);
-+EXPORT_SYMBOL(ipipe_trigger_irq);
-+EXPORT_SYMBOL(ipipe_virtualize_irq);
-+EXPORT_SYMBOL(ipipe_control_irq);
-+EXPORT_SYMBOL(ipipe_get_sysinfo);
-+EXPORT_SYMBOL(ipipe_tune_timer);
---- 2.6.13/arch/ppc/kernel/ipipe-root.c        1970-01-01 01:00:00.000000000 
+0100
-+++ 2.6.13-ipipe/arch/ppc/kernel/ipipe-root.c  2005-10-15 22:01:18.000000000 
+0200
-@@ -0,0 +1,510 @@
-+/* -*- linux-c -*-
-+ * linux/arch/ppc/kernel/ipipe-root.c
-+ *
-+ * Copyright (C) 2002-2005 Philippe Gerum (Adeos/ppc port over 2.6).
-+ * Copyright (C) 2004 Wolfgang Grandegger (Adeos/ppc port over 2.4).
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ * USA; either version 2 of the License, or (at your option) any later
-+ * version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * Architecture-dependent I-pipe support for PowerPC.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/irq.h>
-+#include <linux/errno.h>
-+#include <asm/system.h>
-+#include <asm/hardirq.h>
-+#include <asm/atomic.h>
-+#include <asm/io.h>
-+#include <asm/time.h>
-+#include <asm/mmu_context.h>
-+
-+extern irq_desc_t irq_desc[];
-+
-+static struct hw_interrupt_type __ipipe_std_irq_dtype[NR_IRQS];
-+
-+static void __ipipe_override_irq_enable(unsigned irq)
-+{
-+      unsigned long flags;
-+
-+      local_irq_save_hw(flags);
-+      ipipe_irq_unlock(irq);
-+      __ipipe_std_irq_dtype[irq].enable(irq);
-+      local_irq_restore_hw(flags);
-+}
-+
-+static void __ipipe_override_irq_disable(unsigned irq)
-+{
-+      unsigned long flags;
-+
-+      local_irq_save_hw(flags);
-+      ipipe_irq_lock(irq);
-+      __ipipe_std_irq_dtype[irq].disable(irq);
-+      local_irq_restore_hw(flags);
-+}
-+
-+static void __ipipe_override_irq_end(unsigned irq)
-+{
-+      unsigned long flags;
-+
-+      local_irq_save_hw(flags);
-+
-+      if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-+              ipipe_irq_unlock(irq);
-+
-+      __ipipe_std_irq_dtype[irq].end(irq);
-+
-+      local_irq_restore_hw(flags);
-+}
-+
-+static void __ipipe_override_irq_affinity(unsigned irq, cpumask_t mask)
-+{
-+      unsigned long flags;
-+
-+      local_irq_save_hw(flags);
-+      __ipipe_std_irq_dtype[irq].set_affinity(irq, mask);
-+      local_irq_restore_hw(flags);
-+}
-+
-+static void __ipipe_enable_sync(void)
-+{
-+      __ipipe_decr_next[ipipe_processor_id()] =
-+              __ipipe_read_timebase() + get_dec();
-+}
-+
-+/*
-+ * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
-+ * interrupts are off, and secondary CPUs are still lost in space.
-+ */
-+void __ipipe_enable_pipeline(void)
-+{
-+      unsigned long flags;
-+      unsigned irq;
-+
-+      flags = ipipe_critical_enter(&__ipipe_enable_sync);
-+
-+      /* First, virtualize all interrupts from the root domain. */
-+
-+      for (irq = 0; irq < NR_IRQS; irq++)
-+              ipipe_virtualize_irq(ipipe_root_domain,
-+                                   irq,
-+                                   (void (*)(unsigned))&__ipipe_do_IRQ,
-+                                   &__ipipe_ack_irq,
-+                                   IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
-+
-+      /*
-+       * We use a virtual IRQ to handle the timer irq (decrementer trap)
-+       * which has been allocated early in __ipipe_init_platform().
-+       */
-+
-+      ipipe_virtualize_irq(ipipe_root_domain,
-+                           IPIPE_TIMER_VIRQ,
-+                           (void (*)(unsigned))&__ipipe_do_timer,
-+                           NULL, IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
-+
-+      /*
-+       * Interpose on the IRQ control routines so we can make them
-+       * atomic using hw masking and prevent the interrupt log from
-+       * being untimely flushed.
-+       */
-+
-+      for (irq = 0; irq < NR_IRQS; irq++) {
-+              if (irq_desc[irq].handler != NULL)
-+                      __ipipe_std_irq_dtype[irq] = *irq_desc[irq].handler;
-+      }
-+
-+      /*
-+       * The original controller structs are often shared, so we first
-+       * save them all before changing any of them. Notice that we don't
-+       * override the ack() handler since we will enforce the necessary
-+       * setup in __ipipe_ack_irq().
-+       */
-+
-+      for (irq = 0; irq < NR_IRQS; irq++) {
-+              struct hw_interrupt_type *handler = irq_desc[irq].handler;
-+
-+              if (handler == NULL)
-+                      continue;
-+
-+              if (handler->enable != NULL)
-+                      handler->enable = &__ipipe_override_irq_enable;
-+
-+              if (handler->disable != NULL)
-+                      handler->disable = &__ipipe_override_irq_disable;
-+
-+              if (handler->end != NULL)
-+                      handler->end = &__ipipe_override_irq_end;
-+
-+              if (handler->set_affinity != NULL)
-+                      handler->set_affinity = &__ipipe_override_irq_affinity;
-+      }
-+
-+      __ipipe_decr_next[ipipe_processor_id()] =
-+              __ipipe_read_timebase() + get_dec();
-+
-+      ipipe_critical_exit(flags);
-+}
-+
-+int __ipipe_ack_irq(unsigned irq)
-+{
-+      irq_desc_t *desc = irq_desc + irq;
-+      unsigned long flags;
-+      ipipe_declare_cpuid;
-+
-+      if (desc->handler->ack == NULL)
-+              return 1;
-+
-+      /*
-+       * No need to mask IRQs at hw level: we are always called from
-+       * __ipipe_handle_irq(), so interrupts are already off. We
-+       * stall the pipeline so that spin_lock_irq*() ops won't
-+       * unintentionally flush it, since this could cause infinite
-+       * recursion.
-+       */
-+
-+      ipipe_load_cpuid();
-+      flags = ipipe_test_and_stall_pipeline();
-+      preempt_disable();
-+      spin_lock_hw(&desc->lock);
-+      desc->handler->ack(irq);
-+#ifdef CONFIG_POWER4
-+      /* if it is a k2u3 cascaded irq, acknowledge it, also */
-+      if (desc->action == &k2u3_cascade_action) {
-+              struct pt_regs regs;
-+              int irq2 = openpic2_get_irq(&regs);
-+              if (irq2 != -1) {
-+                      irq_desc_t *desc2 = irq_desc + irq2;
-+                      if (desc2->handler->ack)
-+                              desc2->handler->ack(irq2);
-+              }
-+      }
-+#endif
-+      spin_unlock_hw(&desc->lock);
-+      preempt_enable_no_resched();
-+      ipipe_restore_pipeline_nosync(ipipe_percpu_domain[cpuid], flags, cpuid);
-+
-+      return 1;
-+}
-+
-+/*
-+ * __ipipe_walk_pipeline(): Plays interrupts pending in the log. Must
-+ * be called with local hw interrupts disabled.
-+ */
-+static inline void __ipipe_walk_pipeline(struct list_head *pos, int cpuid)
-+{
-+      struct ipipe_domain *this_domain = ipipe_percpu_domain[cpuid];
-+
-+      while (pos != &__ipipe_pipeline) {
-+              struct ipipe_domain *next_domain =
-+                      list_entry(pos, struct ipipe_domain, p_link);
-+
-+              if (test_bit(IPIPE_STALL_FLAG,
-+                           &next_domain->cpudata[cpuid].status))
-+                      break;  /* Stalled stage -- do not go further. */
-+
-+              if (next_domain->cpudata[cpuid].irq_pending_hi != 0) {
-+
-+                      if (next_domain == this_domain)
-+                              __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
-+                      else {
-+                              __ipipe_switch_to(this_domain, next_domain, 
cpuid);
-+
-+                              ipipe_load_cpuid();     /* Processor might have 
changed. */
-+
-+                              if (this_domain->cpudata[cpuid].irq_pending_hi 
!= 0
-+                                  && !test_bit(IPIPE_STALL_FLAG,
-+                                               
&this_domain->cpudata[cpuid].status))
-+                                      __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
-+                      }
-+
-+                      break;
-+              } else if (next_domain == this_domain)
-+                      break;
-+
-+              pos = next_domain->p_link.next;
-+      }
-+}
-+
-+/*
-+ * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
-+ * interrupt protection log is maintained here for each domain. Hw
-+ * interrupts are off on entry.
-+ */
-+void __ipipe_handle_irq(int irq, struct pt_regs *regs)
-+{
-+      struct ipipe_domain *this_domain;
-+      struct list_head *head, *pos;
-+      ipipe_declare_cpuid;
-+      int m_ack, s_ack;
-+
-+      m_ack = irq & IPIPE_IRQ_ACKED;
-+      irq &= IPIPE_IRQ_ACKED_MASK;
-+
-+      if (irq >= IPIPE_NR_IRQS) {
-+              printk(KERN_ERR "I-pipe: spurious interrupt %d\n", irq);
-+              return;
-+      }
-+
-+      ipipe_load_cpuid();
-+
-+      this_domain = ipipe_percpu_domain[cpuid];
-+
-+      s_ack = m_ack;
-+
-+      if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
-+              head = &this_domain->p_link;
-+      else
-+              head = __ipipe_pipeline.next;
-+
-+      /* Ack the interrupt. */
-+
-+      pos = head;
-+
-+      while (pos != &__ipipe_pipeline) {
-+              struct ipipe_domain *next_domain =
-+                      list_entry(pos, struct ipipe_domain, p_link);
-+
-+              /*
-+               * For each domain handling the incoming IRQ, mark it as
-+               * pending in its log.
-+               */
-+              if (test_bit(IPIPE_HANDLE_FLAG,
-+                           &next_domain->irqs[irq].control)) {
-+                      /*
-+                       * Domains that handle this IRQ are polled for
-+                       * acknowledging it by decreasing priority order. The
-+                       * interrupt must be made pending _first_ in the
-+                       * domain's status flags before the PIC is unlocked.
-+                       */
-+
-+                      next_domain->cpudata[cpuid].irq_hits[irq]++;
-+                      __ipipe_set_irq_bit(next_domain, cpuid, irq);
-+                      ipipe_mark_irq_receipt(next_domain, irq, cpuid);
-+
-+                      /*
-+                       * Always get the first master acknowledge available.
-+                       * Once we've got it, allow slave acknowledge
-+                       * handlers to run (until one of them stops us).
-+                       */
-+                      if (next_domain->irqs[irq].acknowledge != NULL) {
-+                              if (!m_ack)
-+                                      m_ack = 
next_domain->irqs[irq].acknowledge(irq);
-+                              else if (test_bit
-+                                       (IPIPE_SHARED_FLAG,
-+                                        &next_domain->irqs[irq].control) && 
!s_ack)
-+                                      s_ack = 
next_domain->irqs[irq].acknowledge(irq);
-+                      }
-+              }
-+
-+              /*
-+               * If the domain does not want the IRQ to be passed down the
-+               * interrupt pipe, exit the loop now.
-+               */
-+
-+              if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
-+                      break;
-+
-+              pos = next_domain->p_link.next;
-+      }
-+
-+      /*
-+       * Now walk the pipeline, yielding control to the highest
-+       * priority domain that has pending interrupt(s) or
-+       * immediately to the current domain if the interrupt has been
-+       * marked as 'sticky'. This search does not go beyond the
-+       * current domain in the pipeline.
-+       */
-+
-+      __ipipe_walk_pipeline(head, cpuid);
-+}
-+
-+asmlinkage int __ipipe_grab_irq(struct pt_regs *regs)
-+{
-+      extern int ppc_spurious_interrupts;
-+      ipipe_declare_cpuid;
-+      int irq, first = 1;
-+
-+      if ((irq = ppc_md.get_irq(regs)) >= 0) {
-+              __ipipe_handle_irq(irq, regs);
-+              first = 0;
-+      } else if (irq != -2 && first)
-+              ppc_spurious_interrupts++;
-+
-+      ipipe_load_cpuid();
-+
-+      return (ipipe_percpu_domain[cpuid] == ipipe_root_domain &&
-+              !test_bit(IPIPE_STALL_FLAG,
-+                        &ipipe_root_domain->cpudata[cpuid].status));
-+}
-+
-+void __ipipe_do_IRQ(int irq, struct pt_regs *regs)
-+{
-+      irq_enter();
-+      __do_IRQ(irq, regs);
-+      irq_exit();
-+}
-+
-+asmlinkage int __ipipe_grab_timer(struct pt_regs *regs)
-+{
-+      ipipe_declare_cpuid;
-+
-+#ifdef CONFIG_POWER4
-+      /* On 970 CPUs DEC cannot be disabled, and without setting DEC
-+       * here, DEC interrupt would be triggered as soon as interrupts
-+       * are enabled in __ipipe_sync_stage
-+       */
-+      set_dec(0x7fffffff);
-+#endif
-+
-+      __ipipe_tick_regs[cpuid].msr = regs->msr; /* for do_timer() */
-+
-+      __ipipe_handle_irq(IPIPE_TIMER_VIRQ, regs);
-+
-+      ipipe_load_cpuid();
-+
-+#ifndef CONFIG_40x
-+      if (__ipipe_decr_ticks != tb_ticks_per_jiffy) {
-+              unsigned long long next_date, now;
-+
-+              next_date = __ipipe_decr_next[cpuid];
-+
-+              while ((now = __ipipe_read_timebase()) >= next_date)
-+                      next_date += __ipipe_decr_ticks;
-+
-+              set_dec(next_date - now);
-+
-+              __ipipe_decr_next[cpuid] = next_date;
-+      }
-+#endif        /* !CONFIG_40x */
-+
-+      return (ipipe_percpu_domain[cpuid] == ipipe_root_domain &&
-+              !test_bit(IPIPE_STALL_FLAG,
-+                        &ipipe_root_domain->cpudata[cpuid].status));
-+}
-+
-+void __ipipe_do_timer(int irq, struct pt_regs *regs)
-+{
-+      timer_interrupt(regs);
-+}
-+
-+asmlinkage int __ipipe_check_root(struct pt_regs *regs)
-+{
-+      ipipe_declare_cpuid;
-+      /*
-+       * This routine is called with hw interrupts off, so no migration
-+       * can occur while checking the identity of the current domain.
-+       */
-+      ipipe_load_cpuid();
-+      return (ipipe_percpu_domain[cpuid] == ipipe_root_domain &&
-+              !test_bit(IPIPE_STALL_FLAG,
-+                        &ipipe_root_domain->cpudata[cpuid].status));
-+}
-+
-+asmlinkage void __ipipe_stall_root_raw(void)
-+{
-+      ipipe_declare_cpuid;
-+
-+      ipipe_load_cpuid();     /* hw IRQs are off on entry. */
-+
-+      __set_bit(IPIPE_STALL_FLAG,
-+                &ipipe_root_domain->cpudata[cpuid].status);
-+
-+      ipipe_mark_domain_stall(ipipe_root_domain, cpuid);
-+
-+      local_irq_enable_hw();
-+}
-+
-+asmlinkage void __ipipe_unstall_root_raw(void)
-+{
-+      ipipe_declare_cpuid;
-+
-+      local_irq_disable_hw();
-+
-+      ipipe_load_cpuid();
-+
-+      __clear_bit(IPIPE_STALL_FLAG,
-+                  &ipipe_root_domain->cpudata[cpuid].status);
-+
-+      ipipe_mark_domain_unstall(ipipe_root_domain, cpuid);
-+}
-+
-+int __ipipe_syscall_root(struct pt_regs *regs)
-+{
-+      ipipe_declare_cpuid;
-+      unsigned long flags;
-+
-+      /*
-+       * This routine either returns:
-+       * 0 -- if the syscall is to be passed to Linux;
-+       * >0 -- if the syscall should not be passed to Linux, and no
-+       * tail work should be performed;
-+       * <0 -- if the syscall should not be passed to Linux but the
-+       * tail work has to be performed (for handling signals etc).
-+       */
-+
-+      if (__ipipe_event_monitors[IPIPE_EVENT_SYSCALL] > 0 &&
-+          __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL,regs) > 0) {
-+              /*
-+               * We might enter here over a non-root domain and exit
-+               * over the root one as a result of the syscall
-+               * (i.e. by recycling the register set of the current
-+               * context across the migration), so we need to fixup
-+               * the interrupt flag upon return too, so that
-+               * __ipipe_unstall_iret_root() resets the correct
-+               * stall bit on exit.
-+               */
-+              if (ipipe_current_domain == ipipe_root_domain && !in_atomic()) {
-+                      /*
-+                       * Sync pending VIRQs before _TIF_NEED_RESCHED
-+                       * is tested.
-+                       */
-+                      ipipe_lock_cpu(flags);
-+                      if ((ipipe_root_domain->cpudata[cpuid].irq_pending_hi & 
IPIPE_IRQMASK_VIRT) != 0)
-+                              __ipipe_sync_stage(IPIPE_IRQMASK_VIRT);
-+                      ipipe_unlock_cpu(flags);
-+                      return -1;
-+              }
-+              return 1;
-+      }
-+
-+      return 0;
-+}
-+
-+void atomic_set_mask(unsigned long mask,
-+                   unsigned long *ptr);
-+
-+void atomic_clear_mask(unsigned long mask,
-+                     unsigned long *ptr);
-+
-+extern unsigned long context_map[];
-+
-+EXPORT_SYMBOL(__switch_to);
-+EXPORT_SYMBOL(show_stack);
-+EXPORT_SYMBOL(atomic_set_mask);
-+EXPORT_SYMBOL(atomic_clear_mask);
-+EXPORT_SYMBOL(context_map);
-+EXPORT_SYMBOL(_switch);
-+EXPORT_SYMBOL(last_task_used_math);
-+#ifdef FEW_CONTEXTS
-+EXPORT_SYMBOL(nr_free_contexts);
-+EXPORT_SYMBOL(context_mm);
-+EXPORT_SYMBOL(steal_context);
-+#endif
---- 2.6.13/arch/ppc/kernel/traps.c     2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/arch/ppc/kernel/traps.c       2005-09-07 14:02:09.000000000 
+0200
-@@ -214,6 +214,9 @@ void MachineCheckException(struct pt_reg
- {
-       unsigned long reason = get_mc_reason(regs);
- 
-+      if (ipipe_trap_notify(IPIPE_TRAP_MCE,regs))
-+              return;
-+
-       if (user_mode(regs)) {
-               regs->msr |= MSR_RI;
-               _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
-@@ -373,6 +376,8 @@ void SMIException(struct pt_regs *regs)
- 
- void UnknownException(struct pt_regs *regs)
- {
-+      if (ipipe_trap_notify(IPIPE_TRAP_UNKNOWN,regs))
-+              return;
-       printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
-              regs->nip, regs->msr, regs->trap, print_tainted());
-       _exception(SIGTRAP, regs, 0, 0);
-@@ -380,6 +385,8 @@ void UnknownException(struct pt_regs *re
- 
- void InstructionBreakpoint(struct pt_regs *regs)
- {
-+      if (ipipe_trap_notify(IPIPE_TRAP_IABR,regs))
-+              return;
-       if (debugger_iabr_match(regs))
-               return;
-       _exception(SIGTRAP, regs, TRAP_BRKPT, 0);
-@@ -387,6 +394,8 @@ void InstructionBreakpoint(struct pt_reg
- 
- void RunModeException(struct pt_regs *regs)
- {
-+      if (ipipe_trap_notify(IPIPE_TRAP_RM,regs))
-+              return;
-       _exception(SIGTRAP, regs, 0, 0);
- }
- 
-@@ -536,6 +545,8 @@ static void emulate_single_step(struct p
- {
-       if (single_stepping(regs)) {
-               clear_single_step(regs);
-+              if (ipipe_trap_notify(IPIPE_TRAP_SSTEP,regs))
-+                  return;
-               _exception(SIGTRAP, regs, TRAP_TRACE, 0);
-       }
- }
-@@ -605,6 +616,9 @@ void ProgramCheckException(struct pt_reg
-       unsigned int reason = get_reason(regs);
-       extern int do_mathemu(struct pt_regs *regs);
- 
-+      if (ipipe_trap_notify(IPIPE_TRAP_PCE,regs))
-+              return;
-+
- #ifdef CONFIG_MATH_EMULATION
-       /* (reason & REASON_ILLEGAL) would be the obvious thing here,
-        * but there seems to be a hardware bug on the 405GP (RevD)
-@@ -682,6 +696,8 @@ void ProgramCheckException(struct pt_reg
- void SingleStepException(struct pt_regs *regs)
- {
-       regs->msr &= ~(MSR_SE | MSR_BE);  /* Turn off 'trace' bits */
-+      if (ipipe_trap_notify(IPIPE_TRAP_SSTEP,regs))
-+              return;
-       if (debugger_sstep(regs))
-               return;
-       _exception(SIGTRAP, regs, TRAP_TRACE, 0);
-@@ -697,6 +713,8 @@ void AlignmentException(struct pt_regs *
-               emulate_single_step(regs);
-               return;
-       }
-+      if (ipipe_trap_notify(IPIPE_TRAP_ALIGNMENT,regs))
-+              return;
-       if (fixed == -EFAULT) {
-               /* fixed == -EFAULT means the operand address was bad */
-               if (user_mode(regs))
-@@ -719,6 +737,8 @@ void StackOverflow(struct pt_regs *regs)
- 
- void nonrecoverable_exception(struct pt_regs *regs)
- {
-+      if (ipipe_trap_notify(IPIPE_TRAP_NREC,regs))
-+              return;
-       printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
-              regs->nip, regs->msr);
-       debugger(regs);
-@@ -739,6 +759,9 @@ void SoftwareEmulation(struct pt_regs *r
-       extern int Soft_emulate_8xx(struct pt_regs *);
-       int errcode;
- 
-+      if (ipipe_trap_notify(IPIPE_TRAP_SOFTEMU,regs))
-+              return;
-+
-       CHECK_FULL_REGS(regs);
- 
-       if (!user_mode(regs)) {
-@@ -767,6 +790,8 @@ void SoftwareEmulation(struct pt_regs *r
- 
- void DebugException(struct pt_regs *regs, unsigned long debug_status)
- {
-+      if (ipipe_trap_notify(IPIPE_TRAP_DEBUG,regs))
-+              return;
-       if (debug_status & DBSR_IC) {   /* instruction completion */
-               regs->msr &= ~MSR_DE;
-               if (user_mode(regs)) {
-@@ -796,6 +821,9 @@ void AltivecUnavailException(struct pt_r
- {
-       static int kernel_altivec_count;
- 
-+      if (ipipe_trap_notify(IPIPE_TRAP_ALTUNAVAIL,regs))
-+              return;
-+
- #ifndef CONFIG_ALTIVEC
-       if (user_mode(regs)) {
-               /* A user program has executed an altivec instruction,
-@@ -817,6 +845,8 @@ void AltivecAssistException(struct pt_re
- {
-       int err;
- 
-+      if (ipipe_trap_notify(IPIPE_TRAP_ALTASSIST,regs))
-+              return;
-       preempt_disable();
-       if (regs->msr & MSR_VEC)
-               giveup_altivec(current);
-@@ -875,6 +905,9 @@ void SPEFloatingPointException(struct pt
-       int fpexc_mode;
-       int code = 0;
- 
-+      if (ipipe_trap_notify(IPIPE_TRAP_SPE,regs))
-+              return;
-+
-       spefscr = current->thread.spefscr;
-       fpexc_mode = current->thread.fpexc_mode;
- 
---- 2.6.13/arch/ppc/mm/fault.c 2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/arch/ppc/mm/fault.c   2005-09-07 14:02:09.000000000 +0200
-@@ -116,6 +116,9 @@ int do_page_fault(struct pt_regs *regs, 
-               is_write = error_code & 0x02000000;
- #endif /* CONFIG_4xx || CONFIG_BOOKE */
- 
-+      if (ipipe_trap_notify(IPIPE_TRAP_ACCESS,regs))
-+              return 0;
-+
- #if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
-       if (debugger_fault_handler && TRAP(regs) == 0x300) {
-               debugger_fault_handler(regs);
---- 2.6.13/arch/ppc/platforms/pmac_pic.c       2005-08-29 01:41:01.000000000 
+0200
-+++ 2.6.13-ipipe/arch/ppc/platforms/pmac_pic.c 2005-09-06 15:46:59.000000000 
+0200
-@@ -387,7 +387,7 @@ static irqreturn_t k2u3_action(int cpl, 
-       return IRQ_HANDLED;
- }
- 
--static struct irqaction k2u3_cascade_action = {
-+struct irqaction k2u3_cascade_action = {
-       .handler        = k2u3_action,
-       .flags          = 0,
-       .mask           = CPU_MASK_NONE,
---- 2.6.13/include/asm-ppc/hw_irq.h    2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/include/asm-ppc/hw_irq.h      2005-10-17 17:06:52.000000000 
+0200
-@@ -12,6 +12,72 @@ extern void timer_interrupt(struct pt_re
- 
- #define INLINE_IRQS
- 
-+#ifdef CONFIG_IPIPE
-+
-+void __ipipe_stall_root(void);
-+void __ipipe_unstall_root(void);
-+unsigned long __ipipe_test_root(void);
-+unsigned long __ipipe_test_and_stall_root(void);
-+void __ipipe_restore_root(unsigned long flags);
-+
-+#define irqs_disabled()  __ipipe_test_root()
-+
-+static inline void local_irq_disable(void)
-+{
-+    __ipipe_stall_root();
-+}
-+
-+static inline void local_irq_enable(void)
-+{
-+    __ipipe_unstall_root();
-+}
-+
-+static inline void local_irq_save_ptr(unsigned long *flags)
-+{
-+    *flags = (!__ipipe_test_and_stall_root()) << 15;
-+}
-+
-+static inline void local_irq_restore(unsigned long flags)
-+{
-+    __ipipe_restore_root(!(flags & MSR_EE));
-+}
-+
-+#define local_save_flags(flags)               ((flags) = 
(!__ipipe_test_root()) << 15)
-+#define local_irq_save(flags)         local_irq_save_ptr(&flags)
-+
-+static inline void local_irq_disable_hw(void)
-+{
-+      unsigned long msr;
-+      msr = mfmsr();
-+      mtmsr(msr & ~MSR_EE);
-+      __asm__ __volatile__("": : :"memory");
-+}
-+
-+static inline void local_irq_enable_hw(void)
-+{
-+      unsigned long msr;
-+      __asm__ __volatile__("": : :"memory");
-+      msr = mfmsr();
-+      mtmsr(msr | MSR_EE);
-+}
-+
-+static inline void local_irq_save_ptr_hw(unsigned long *flags)
-+{
-+      unsigned long msr;
-+      msr = mfmsr();
-+      *flags = msr;
-+      mtmsr(msr & ~MSR_EE);
-+      __asm__ __volatile__("": : :"memory");
-+}
-+
-+#define local_save_flags_hw(flags)    ((flags) = mfmsr())
-+#define local_irq_save_hw(flags)      local_irq_save_ptr_hw(&flags)
-+#define local_irq_restore_hw(flags)   mtmsr(flags)
-+#define local_test_iflag_hw(x)                ((x) & MSR_EE)
-+#define irqs_disabled_hw()            ((mfmsr() & MSR_EE) == 0)
-+
-+#else /* !CONFIG_IPIPE */
-+
- #define irqs_disabled()       ((mfmsr() & MSR_EE) == 0)
- 
- #ifdef INLINE_IRQS
-@@ -45,6 +111,11 @@ static inline void local_irq_save_ptr(un
- #define local_irq_save(flags)         local_irq_save_ptr(&flags)
- #define local_irq_restore(flags)      mtmsr(flags)
- 
-+#define local_irq_save_hw(flags)      local_irq_save(flags)
-+#define local_irq_restore_hw(flags)   local_irq_restore(flags)
-+#define local_irq_enable_hw()         local_irq_enable()
-+#define local_irq_disable_hw(flags)   local_irq_disable()
-+
- #else
- 
- extern void local_irq_enable(void);
-@@ -57,6 +128,8 @@ extern void local_save_flags_ptr(unsigne
- 
- #endif
- 
-+#endif /* CONFIG_IPIPE */
-+
- extern void do_lost_interrupts(unsigned long);
- 
- #define mask_irq(irq) ({if (irq_desc[irq].handler && 
irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);})
---- 2.6.13/include/asm-ppc/ipipe.h     1970-01-01 01:00:00.000000000 +0100
-+++ 2.6.13-ipipe/include/asm-ppc/ipipe.h       2005-10-19 13:04:21.000000000 
+0200
-@@ -0,1 +1,178 @@
-+/* -*- linux-c -*-
-+ * include/asm-ppc/ipipe.h
-+ *
-+ * Copyright (C) 2002-2005 Philippe Gerum.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ * USA; either version 2 of the License, or (at your option) any later
-+ * version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ */
-+
-+#ifndef __PPC_IPIPE_H
-+#define __PPC_IPIPE_H
-+
-+#include <linux/config.h>
-+
-+#ifdef CONFIG_IPIPE
-+
-+#include <asm/ptrace.h>
-+#include <asm/irq.h>
-+#include <asm/bitops.h>
-+#include <linux/list.h>
-+#include <linux/cpumask.h>
-+#include <linux/threads.h>
-+
-+#define IPIPE_ARCH_STRING     "1.0-03"
-+#define IPIPE_MAJOR_NUMBER    1
-+#define IPIPE_MINOR_NUMBER    0
-+#define IPIPE_PATCH_NUMBER    3
-+
-+#define IPIPE_NR_XIRQS                NR_IRQS
-+#define IPIPE_IRQ_ISHIFT      5       /* 25 for 32bits arch. */
-+
-+/*
-+ * The first virtual interrupt is reserved for the timer (see
-+ * __ipipe_init_platform).
-+ */
-+#define IPIPE_TIMER_VIRQ      IPIPE_VIRQ_BASE
-+#define IPIPE_IRQ_ACKED               0x1000
-+#define IPIPE_IRQ_ACKED_MASK  (IPIPE_IRQ_ACKED - 1)
-+
-+#ifdef CONFIG_SMP
-+#error "I-pipe/ppc: SMP not yet implemented"
-+#define ipipe_processor_id()  (current_thread_info()->cpu)
-+#else /* !CONFIG_SMP */
-+#define ipipe_processor_id()  0
-+#endif        /* CONFIG_SMP */
-+
-+#define prepare_arch_switch(next)                             \
-+do {                                                          \
-+      __ipipe_dispatch_event(IPIPE_EVENT_SCHEDULE,next);      \
-+      local_irq_disable_hw();                                 \
-+} while(0)
-+
-+#define task_hijacked(p)                                      \
-+      ( {                                                     \
-+      int x = ipipe_current_domain != ipipe_root_domain;      \
-+      __clear_bit(IPIPE_SYNC_FLAG,                            \
-+                  &ipipe_root_domain->cpudata[task_cpu(p)].status); \
-+      local_irq_enable_hw(); x;                               \
-+      } )
-+
-+ /* PPC traps */
-+#define IPIPE_TRAP_ACCESS      0      /* Data or instruction access exception 
*/
-+#define IPIPE_TRAP_ALIGNMENT   1      /* Alignment exception */
-+#define IPIPE_TRAP_ALTUNAVAIL  2      /* Altivec unavailable */
-+#define IPIPE_TRAP_PCE                 3      /* Program check exception */
-+#define IPIPE_TRAP_MCE                 4      /* Machine check exception */
-+#define IPIPE_TRAP_UNKNOWN     5      /* Unknown exception */
-+#define IPIPE_TRAP_IABR                6      /* Instruction breakpoint */
-+#define IPIPE_TRAP_RM          7      /* Run mode exception */
-+#define IPIPE_TRAP_SSTEP       8      /* Single-step exception */
-+#define IPIPE_TRAP_NREC                9      /* Non-recoverable exception */
-+#define IPIPE_TRAP_SOFTEMU    10      /* Software emulation */
-+#define IPIPE_TRAP_DEBUG      11      /* Debug exception */
-+#define IPIPE_TRAP_SPE                12      /* SPE exception */
-+#define IPIPE_TRAP_ALTASSIST  13      /* Altivec assist exception */
-+#define IPIPE_NR_FAULTS               14
-+/* Pseudo-vectors used for kernel events */
-+#define IPIPE_FIRST_EVENT     IPIPE_NR_FAULTS
-+#define IPIPE_EVENT_SYSCALL   (IPIPE_FIRST_EVENT)
-+#define IPIPE_EVENT_SCHEDULE  (IPIPE_FIRST_EVENT + 1)
-+#define IPIPE_EVENT_SIGWAKE   (IPIPE_FIRST_EVENT + 2)
-+#define IPIPE_EVENT_SETSCHED  (IPIPE_FIRST_EVENT + 3)
-+#define IPIPE_EVENT_EXIT      (IPIPE_FIRST_EVENT + 4)
-+#define IPIPE_LAST_EVENT      IPIPE_EVENT_EXIT
-+#define IPIPE_NR_EVENTS               (IPIPE_LAST_EVENT + 1)
-+
-+struct ipipe_domain;
-+
-+struct ipipe_sysinfo {
-+
-+      int ncpus;              /* Number of CPUs on board */
-+      u64 cpufreq;            /* CPU frequency (in Hz) */
-+
-+      /* Arch-dependent block */
-+
-+      struct {
-+              unsigned tmirq; /* Timer tick IRQ */
-+              u64 tmfreq;     /* Timer frequency */
-+      } archdep;
-+};
-+
-+#define ipipe_read_tsc(t)                                     \
-+      ({                                                      \
-+      unsigned long __tbu;                                    \
-+      __asm__ __volatile__ ("1: mftbu %0\n"                   \
-+                            "mftb %1\n"                       \
-+                            "mftbu %2\n"                      \
-+                            "cmpw %2,%0\n"                    \
-+                            "bne- 1b\n"                       \
-+                            :"=r" (((unsigned long *)&t)[0]), \
-+                            "=r" (((unsigned long *)&t)[1]),  \
-+                            "=r" (__tbu));                    \
-+      t;                                                      \
-+      })
-+
-+#define __ipipe_read_timebase()                                       \
-+      ({                                                      \
-+      unsigned long long t;                                   \
-+      ipipe_read_tsc(t);                                      \
-+      t;                                                      \
-+      })
-+
-+extern unsigned tb_ticks_per_jiffy;
-+#define ipipe_cpu_freq()      (HZ * tb_ticks_per_jiffy)
-+#define ipipe_tsc2ns(t)               (((t) * 1000) / (ipipe_cpu_freq() / 
1000000))
-+
-+/* Private interface -- Internal use only */
-+
-+#define __ipipe_check_platform()      do { } while(0)
-+
-+void __ipipe_init_platform(void);
-+
-+void __ipipe_enable_pipeline(void);
-+
-+void __ipipe_sync_stage(unsigned long syncmask);
-+
-+int __ipipe_ack_irq(unsigned irq);
-+
-+void __ipipe_do_IRQ(int irq,
-+                  struct pt_regs *regs);
-+
-+void __ipipe_do_timer(int irq,
-+                    struct pt_regs *regs);
-+
-+void __ipipe_do_critical_sync(unsigned irq);
-+
-+extern unsigned long __ipipe_decr_ticks;
-+
-+extern unsigned long long __ipipe_decr_next[];
-+
-+extern struct pt_regs __ipipe_tick_regs[];
-+
-+void __ipipe_handle_irq(int irq,
-+                      struct pt_regs *regs);
-+
-+#define __ipipe_tick_irq      IPIPE_TIMER_VIRQ
-+
-+#else /* !CONFIG_IPIPE */
-+
-+#define task_hijacked(p)      0
-+#define ipipe_trap_notify(t,r)  0
-+
-+#endif /* CONFIG_IPIPE */
-+
-+#endif        /* !__PPC_IPIPE_H */
---- 2.6.13/include/asm-ppc/mmu_context.h       2005-08-29 01:41:01.000000000 
+0200
-+++ 2.6.13-ipipe/include/asm-ppc/mmu_context.h 2005-09-06 15:59:50.000000000 
+0200
-@@ -149,7 +149,10 @@ static inline void get_mmu_context(struc
-  */
- static inline void destroy_context(struct mm_struct *mm)
- {
-+      unsigned long flags;
-+
-       preempt_disable();
-+      local_irq_save_hw_cond(flags);
-       if (mm->context != NO_CONTEXT) {
-               clear_bit(mm->context, context_map);
-               mm->context = NO_CONTEXT;
-@@ -157,6 +160,7 @@ static inline void destroy_context(struc
-               atomic_inc(&nr_free_contexts);
- #endif
-       }
-+      local_irq_restore_hw_cond(flags);
-       preempt_enable();
- }
- 
-@@ -191,7 +195,13 @@ static inline void switch_mm(struct mm_s
-  * After we have set current->mm to a new value, this activates
-  * the context for the new mm so we see the new mappings.
-  */
--#define activate_mm(active_mm, mm)   switch_mm(active_mm, mm, current)
-+#define activate_mm(active_mm, mm)   \
-+do { \
-+      unsigned long flags; \
-+      local_irq_save_hw_cond(flags); \
-+      switch_mm(active_mm, mm, current); \
-+      local_irq_restore_hw_cond(flags);          \
-+} while(0)
- 
- extern void mmu_context_init(void);
- 
---- 2.6.13/include/asm-ppc/pgalloc.h   2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/include/asm-ppc/pgalloc.h     2005-09-06 15:47:00.000000000 
+0200
-@@ -40,5 +40,10 @@ extern void pte_free(struct page *pte);
- 
- #define check_pgt_cache()     do { } while (0)
- 
-+static inline void set_pgdir(unsigned long address, pgd_t entry)
-+{
-+    /* nop */
-+}
-+
- #endif /* _PPC_PGALLOC_H */
- #endif /* __KERNEL__ */
-diff -uNrp 2.6.13/include/linux/hardirq.h 2.6.13-ipipe/include/linux/hardirq.h
---- 2.6.13/include/linux/hardirq.h     2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/include/linux/hardirq.h       2005-09-07 13:24:50.000000000 
+0200
-@@ -87,8 +87,21 @@ extern void synchronize_irq(unsigned int
- # define synchronize_irq(irq) barrier()
- #endif
- 
-+#ifdef CONFIG_IPIPE
-+#define nmi_enter() \
-+do { \
-+    if (ipipe_current_domain == ipipe_root_domain) \
-+      irq_enter(); \
-+} while(0)
-+#define nmi_exit() \
-+do { \
-+    if (ipipe_current_domain == ipipe_root_domain) \
-+      sub_preempt_count(HARDIRQ_OFFSET); \
-+} while(0)
-+#else /* !CONFIG_IPIPE */
- #define nmi_enter()           irq_enter()
- #define nmi_exit()            sub_preempt_count(HARDIRQ_OFFSET)
-+#endif /* CONFIG_IPIPE */
- 
- #ifndef CONFIG_VIRT_CPU_ACCOUNTING
- static inline void account_user_vtime(struct task_struct *tsk)
-diff -uNrp 2.6.13/include/linux/ipipe.h 2.6.13-ipipe/include/linux/ipipe.h
---- 2.6.13/include/linux/ipipe.h       1970-01-01 01:00:00.000000000 +0100
-+++ 2.6.13-ipipe/include/linux/ipipe.h 2005-10-19 15:04:12.000000000 +0200
-@@ -0,0 +1,746 @@
-+/* -*- linux-c -*-
-+ * include/linux/ipipe.h
-+ *
-+ * Copyright (C) 2002-2005 Philippe Gerum.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ * USA; either version 2 of the License, or (at your option) any later
-+ * version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ */
-+
-+#ifndef __LINUX_IPIPE_H
-+#define __LINUX_IPIPE_H
-+
-+#include <linux/config.h>
-+#include <linux/spinlock.h>
-+#include <asm/ipipe.h>
-+
-+#ifdef CONFIG_IPIPE
-+
-+#define IPIPE_VERSION_STRING  IPIPE_ARCH_STRING
-+#define IPIPE_RELEASE_NUMBER  ((IPIPE_MAJOR_NUMBER << 16) | \
-+                               (IPIPE_MINOR_NUMBER <<  8) | \
-+                               (IPIPE_PATCH_NUMBER))
-+
-+#define IPIPE_ROOT_PRIO               100
-+#define IPIPE_ROOT_ID         0
-+#define IPIPE_ROOT_NPTDKEYS   4       /* Must be <= BITS_PER_LONG */
-+
-+#define IPIPE_RESET_TIMER     0x1
-+#define IPIPE_GRAB_TIMER      0x2
-+#define IPIPE_SAME_HANDLER    ((void (*)(unsigned))(-1))
-+
-+/* Global domain flags */
-+#define IPIPE_SPRINTK_FLAG    0       /* Synchronous printk() allowed */
-+#define IPIPE_PPRINTK_FLAG    1       /* Asynchronous printk() request 
pending */
-+
-+#define IPIPE_STALL_FLAG      0       /* Stalls a pipeline stage */
-+#define IPIPE_SYNC_FLAG               1       /* The interrupt syncer is 
running for the domain */
-+
-+#define IPIPE_HANDLE_FLAG     0
-+#define IPIPE_PASS_FLAG               1
-+#define IPIPE_ENABLE_FLAG     2
-+#define IPIPE_DYNAMIC_FLAG    IPIPE_HANDLE_FLAG
-+#define IPIPE_STICKY_FLAG     3
-+#define IPIPE_SYSTEM_FLAG     4
-+#define IPIPE_LOCK_FLAG               5
-+#define IPIPE_SHARED_FLAG     6
-+#define IPIPE_EXCLUSIVE_FLAG  31      /* ipipe_catch_event() is the reason 
why. */
-+
-+#define IPIPE_HANDLE_MASK     (1 << IPIPE_HANDLE_FLAG)
-+#define IPIPE_PASS_MASK               (1 << IPIPE_PASS_FLAG)
-+#define IPIPE_ENABLE_MASK     (1 << IPIPE_ENABLE_FLAG)
-+#define IPIPE_DYNAMIC_MASK    IPIPE_HANDLE_MASK
-+#define IPIPE_EXCLUSIVE_MASK  (1 << IPIPE_EXCLUSIVE_FLAG)
-+#define IPIPE_STICKY_MASK     (1 << IPIPE_STICKY_FLAG)
-+#define IPIPE_SYSTEM_MASK     (1 << IPIPE_SYSTEM_FLAG)
-+#define IPIPE_LOCK_MASK               (1 << IPIPE_LOCK_FLAG)
-+#define IPIPE_SHARED_MASK     (1 << IPIPE_SHARED_FLAG)
-+#define IPIPE_SYNC_MASK               (1 << IPIPE_SYNC_FLAG)
-+
-+#define IPIPE_DEFAULT_MASK    (IPIPE_HANDLE_MASK|IPIPE_PASS_MASK)
-+#define IPIPE_STDROOT_MASK    
(IPIPE_HANDLE_MASK|IPIPE_PASS_MASK|IPIPE_SYSTEM_MASK)
-+
-+/* Number of virtual IRQs */
-+#define IPIPE_NR_VIRQS                BITS_PER_LONG
-+/* First virtual IRQ # */
-+#define IPIPE_VIRQ_BASE               (((IPIPE_NR_XIRQS + BITS_PER_LONG - 1) 
/ BITS_PER_LONG) * BITS_PER_LONG)
-+/* Total number of IRQ slots */
-+#define IPIPE_NR_IRQS         (IPIPE_VIRQ_BASE + IPIPE_NR_VIRQS)
-+/* Number of indirect words needed to map the whole IRQ space. */
-+#define IPIPE_IRQ_IWORDS      ((IPIPE_NR_IRQS + BITS_PER_LONG - 1) / 
BITS_PER_LONG)
-+#define IPIPE_IRQ_IMASK               (BITS_PER_LONG - 1)
-+#define IPIPE_IRQMASK_ANY     (~0L)
-+#define IPIPE_IRQMASK_VIRT    (IPIPE_IRQMASK_ANY << (IPIPE_VIRQ_BASE / 
BITS_PER_LONG))
-+
-+#ifdef CONFIG_SMP
-+
-+#define IPIPE_NR_CPUS         NR_CPUS
-+#define ipipe_declare_cpuid   int cpuid
-+#define ipipe_load_cpuid()    do { \
-+                                      (cpuid) = ipipe_processor_id(); \
-+                              } while(0)
-+#define ipipe_lock_cpu(flags) do { \
-+                                      local_irq_save_hw(flags); \
-+                                      (cpuid) = ipipe_processor_id(); \
-+                              } while(0)
-+#define ipipe_unlock_cpu(flags)       local_irq_restore_hw(flags)
-+#define ipipe_get_cpu(flags)  ipipe_lock_cpu(flags)
-+#define ipipe_put_cpu(flags)  ipipe_unlock_cpu(flags)
-+#define ipipe_current_domain  (ipipe_percpu_domain[ipipe_processor_id()])
-+
-+#else /* !CONFIG_SMP */
-+
-+#define IPIPE_NR_CPUS         1
-+#define ipipe_declare_cpuid   const int cpuid = 0
-+#define ipipe_load_cpuid()    do { } while(0)
-+#define ipipe_lock_cpu(flags) local_irq_save_hw(flags)
-+#define ipipe_unlock_cpu(flags)       local_irq_restore_hw(flags)
-+#define ipipe_get_cpu(flags)  do { flags = 0; } while(0)
-+#define ipipe_put_cpu(flags)  do { } while(0)
-+#define ipipe_current_domain  (ipipe_percpu_domain[0])
-+
-+#endif /* CONFIG_SMP */
-+
-+#define ipipe_virtual_irq_p(irq)      ((irq) >= IPIPE_VIRQ_BASE && \
-+                                       (irq) < IPIPE_NR_IRQS)
-+
-+struct ipipe_domain {
-+
-+      struct list_head p_link;        /* Link in pipeline */
-+
-+      struct ipcpudata {
-+              unsigned long status;
-+              unsigned long irq_pending_hi;
-+              unsigned long irq_pending_lo[IPIPE_IRQ_IWORDS];
-+              unsigned char irq_hits[IPIPE_NR_IRQS];
-+      } cpudata[IPIPE_NR_CPUS];
-+
-+      struct {
-+              int (*acknowledge) (unsigned irq);
-+              void (*handler) (unsigned irq);
-+              unsigned long control;
-+      } irqs[IPIPE_NR_IRQS];
-+
-+      int (*evhand[IPIPE_NR_EVENTS])(unsigned event,
-+                                     struct ipipe_domain *from,
-+                                     void *data); /* Event handlers. */
-+      unsigned long evexcl;   /* Exclusive event bits. */
-+
-+#ifdef CONFIG_IPIPE_STATS
-+      struct ipipe_stats { /* All in timebase units. */
-+              unsigned long long last_stall_date;
-+              unsigned long last_stall_eip;
-+              unsigned long max_stall_time;
-+              unsigned long max_stall_eip;
-+              struct ipipe_irq_stats {
-+                      unsigned long long last_receipt_date;
-+                      unsigned long max_delivery_time;
-+              } irq_stats[IPIPE_NR_IRQS];
-+      } stats[IPIPE_NR_CPUS];
-+#endif /* CONFIG_IPIPE_STATS */
-+      unsigned long flags;
-+      unsigned domid;
-+      const char *name;
-+      int priority;
-+      void *pdd;
-+};
-+
-+struct ipipe_domain_attr {
-+
-+      unsigned domid;         /* Domain identifier -- Magic value set by 
caller */
-+      const char *name;       /* Domain name -- Warning: won't be dup'ed! */
-+      int priority;           /* Priority in interrupt pipeline */
-+      void (*entry) (void);   /* Domain entry point */
-+      void *pdd;              /* Per-domain (opaque) data pointer */
-+};
-+
-+/* The following macros must be used hw interrupts off. */
-+
-+#define __ipipe_set_irq_bit(ipd,cpuid,irq) \
-+do { \
-+      if (!test_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) { \
-+              __set_bit(irq & 
IPIPE_IRQ_IMASK,&(ipd)->cpudata[cpuid].irq_pending_lo[irq >> 
IPIPE_IRQ_ISHIFT]); \
-+              __set_bit(irq >> 
IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[cpuid].irq_pending_hi); \
-+      } \
-+} while(0)
-+
-+#define __ipipe_clear_pend(ipd,cpuid,irq) \
-+do { \
-+      __clear_bit(irq & 
IPIPE_IRQ_IMASK,&(ipd)->cpudata[cpuid].irq_pending_lo[irq >> 
IPIPE_IRQ_ISHIFT]); \
-+      if ((ipd)->cpudata[cpuid].irq_pending_lo[irq >> IPIPE_IRQ_ISHIFT] == 0) 
\
-+              __clear_bit(irq >> 
IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[cpuid].irq_pending_hi); \
-+} while(0)
-+
-+#define __ipipe_lock_irq(ipd,cpuid,irq) \
-+do { \
-+      if (!test_and_set_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) \
-+              __ipipe_clear_pend(ipd,cpuid,irq); \
-+} while(0)
-+
-+#define __ipipe_unlock_irq(ipd,irq) \
-+do { \
-+      int __cpuid, __nr_cpus = num_online_cpus(); \
-+      if (test_and_clear_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control)) \
-+              for (__cpuid = 0; __cpuid < __nr_cpus; __cpuid++) \
-+                      if ((ipd)->cpudata[__cpuid].irq_hits[irq] > 0) { /* We 
need atomic ops next. */ \
-+                              set_bit(irq & 
IPIPE_IRQ_IMASK,&(ipd)->cpudata[__cpuid].irq_pending_lo[irq >> 
IPIPE_IRQ_ISHIFT]); \
-+                              set_bit(irq >> 
IPIPE_IRQ_ISHIFT,&(ipd)->cpudata[__cpuid].irq_pending_hi); \
-+                      } \
-+} while(0)
-+
-+#define __ipipe_clear_irq(ipd,irq) \
-+do { \
-+      int __cpuid, __nr_cpus = num_online_cpus(); \
-+      clear_bit(IPIPE_LOCK_FLAG,&(ipd)->irqs[irq].control); \
-+      for (__cpuid = 0; __cpuid < __nr_cpus; __cpuid++) { \
-+              (ipd)->cpudata[__cpuid].irq_hits[irq] = 0; \
-+              __ipipe_clear_pend(ipd,__cpuid,irq); \
-+      } \
-+} while(0)
-+
-+#ifdef RAW_SPIN_LOCK_UNLOCKED /* PREEMPT_RT kernel */
-+#define spin_lock_hw(x)               __raw_spin_lock(x)
-+#define spin_unlock_hw(x)     __raw_spin_unlock(x)
-+#define spin_trylock_hw(x)    __raw_spin_trylock(x)
-+#define write_lock_hw(x)      __raw_write_lock(x)
-+#define write_unlock_hw(x)    __raw_write_unlock(x)
-+#define write_trylock_hw(x)   __raw_write_trylock(x)
-+#define read_lock_hw(x)               __raw_read_lock(x)
-+#define read_unlock_hw(x)     __raw_read_unlock(x)
-+#else /* !RAW_SPIN_LOCK_UNLOCKED */
-+#define spin_lock_hw(x)               _spin_lock(x)
-+#define spin_unlock_hw(x)     _spin_unlock(x)
-+#define spin_trylock_hw(x)    _spin_trylock(x)
-+#define write_lock_hw(x)      _write_lock(x)
-+#define write_unlock_hw(x)    _write_unlock(x)
-+#define write_trylock_hw(x)   _write_trylock(x)
-+#define read_lock_hw(x)               _read_lock(x)
-+#define read_unlock_hw(x)     _read_unlock(x)
-+#define raw_spinlock_t                spinlock_t
-+#define RAW_SPIN_LOCK_UNLOCKED        SPIN_LOCK_UNLOCKED
-+#define raw_rwlock_t          rwlock_t
-+#define RAW_RW_LOCK_UNLOCKED  RW_LOCK_UNLOCKED
-+#endif        /* RAW_SPIN_LOCK_UNLOCKED */
-+
-+#define spin_lock_irqsave_hw(x,flags)         \
-+do {                                          \
-+      local_irq_save_hw(flags);               \
-+      spin_lock_hw(x);                        \
-+} while (0)
-+
-+#define spin_unlock_irqrestore_hw(x,flags)    \
-+do {                                          \
-+      spin_unlock_hw(x);                      \
-+      local_irq_restore_hw(flags);            \
-+} while (0)
-+
-+#define spin_lock_irq_hw(x)                   \
-+do {                                          \
-+      local_irq_disable_hw();                 \
-+      spin_lock_hw(x);                        \
-+} while (0)
-+
-+#define spin_unlock_irq_hw(x)                 \
-+do {                                          \
-+      spin_unlock_hw(x);                      \
-+      local_irq_enable_hw();                  \
-+} while (0)
-+
-+#define read_lock_irqsave_hw(lock, flags)     \
-+do {                                          \
-+      local_irq_save_hw(flags);               \
-+      read_lock_hw(lock);                     \
-+} while (0)
-+
-+#define read_unlock_irqrestore_hw(lock, flags)        \
-+do {                                          \
-+      read_unlock_hw(lock);                   \
-+      local_irq_restore_hw(flags);            \
-+} while (0)
-+
-+#define write_lock_irqsave_hw(lock, flags)    \
-+do {                                          \
-+      local_irq_save_hw(flags);               \
-+      write_lock_hw(lock);                    \
-+} while (0)
-+
-+#define write_unlock_irqrestore_hw(lock, flags)       \
-+do {                                          \
-+      write_unlock_hw(lock);                  \
-+      local_irq_restore_hw(flags);            \
-+} while (0)
-+
-+extern struct ipipe_domain *ipipe_percpu_domain[], *ipipe_root_domain;
-+
-+extern unsigned __ipipe_printk_virq;
-+
-+extern unsigned long __ipipe_virtual_irq_map;
-+
-+extern struct list_head __ipipe_pipeline;
-+
-+extern raw_spinlock_t __ipipe_pipelock;
-+
-+extern int __ipipe_event_monitors[];
-+
-+/* Private interface */
-+
-+void ipipe_init(void);
-+
-+#ifdef CONFIG_PROC_FS
-+void ipipe_init_proc(void);
-+#else /* !CONFIG_PROC_FS */
-+#define ipipe_init_proc()     do { } while(0)
-+#endif        /* CONFIG_PROC_FS */
-+
-+void __ipipe_init_stage(struct ipipe_domain *ipd);
-+
-+void __ipipe_cleanup_domain(struct ipipe_domain *ipd);
-+
-+void __ipipe_add_domain_proc(struct ipipe_domain *ipd);
-+
-+void __ipipe_remove_domain_proc(struct ipipe_domain *ipd);
-+
-+void __ipipe_flush_printk(unsigned irq);
-+
-+void __ipipe_stall_root(void);
-+
-+void __ipipe_unstall_root(void);
-+
-+unsigned long __ipipe_test_root(void);
-+
-+unsigned long __ipipe_test_and_stall_root(void);
-+
-+void fastcall __ipipe_restore_root(unsigned long flags);
-+
-+int fastcall __ipipe_schedule_irq(unsigned irq, struct list_head *head);
-+
-+int fastcall __ipipe_dispatch_event(unsigned event, void *data);
-+
-+#define __ipipe_pipeline_head_p(ipd) (&(ipd)->p_link == __ipipe_pipeline.next)
-+
-+#ifdef CONFIG_SMP
-+
-+cpumask_t __ipipe_set_irq_affinity(unsigned irq,
-+                                 cpumask_t cpumask);
-+
-+int fastcall __ipipe_send_ipi(unsigned ipi,
-+                            cpumask_t cpumask);
-+
-+#endif /* CONFIG_SMP */
-+
-+/* Called with hw interrupts off. */
-+static inline void __ipipe_switch_to(struct ipipe_domain *out,
-+                                   struct ipipe_domain *in, int cpuid)
-+{
-+      void ipipe_suspend_domain(void);
-+
-+      /*
-+       * "in" is guaranteed to be closer than "out" from the head of the
-+       * pipeline (and obviously different).
-+       */
-+
-+      ipipe_percpu_domain[cpuid] = in;
-+
-+      ipipe_suspend_domain(); /* Sync stage and propagate interrupts. */
-+      ipipe_load_cpuid();     /* Processor might have changed. */
-+
-+      if (ipipe_percpu_domain[cpuid] == in)
-+              /*
-+               * Otherwise, something has changed the current domain under
-+               * our feet recycling the register set; do not override.
-+               */
-+              ipipe_percpu_domain[cpuid] = out;
-+}
-+
-+static inline void ipipe_sigwake_notify(struct task_struct *p)
-+{
-+      if (__ipipe_event_monitors[IPIPE_EVENT_SIGWAKE] > 0)
-+              __ipipe_dispatch_event(IPIPE_EVENT_SIGWAKE,p);
-+}
-+
-+static inline void ipipe_setsched_notify(struct task_struct *p)
-+{
-+      if (__ipipe_event_monitors[IPIPE_EVENT_SETSCHED] > 0)
-+              __ipipe_dispatch_event(IPIPE_EVENT_SETSCHED,p);
-+}
-+
-+static inline void ipipe_exit_notify(struct task_struct *p)
-+{
-+      if (__ipipe_event_monitors[IPIPE_EVENT_EXIT] > 0)
-+              __ipipe_dispatch_event(IPIPE_EVENT_EXIT,p);
-+}
-+
-+static inline int ipipe_trap_notify(int ex, struct pt_regs *regs)
-+{
-+      return __ipipe_event_monitors[ex] ? __ipipe_dispatch_event(ex,regs) : 0;
-+}
-+
-+#ifdef CONFIG_IPIPE_STATS
-+
-+#define ipipe_mark_domain_stall(ipd, cpuid)                   \
-+do {                                                          \
-+      __label__ here;                                         \
-+      struct ipipe_stats *ips;                                \
-+here:                                                         \
-+      ips = (ipd)->stats + cpuid;                             \
-+      if (ips->last_stall_date == 0) {                        \
-+              ipipe_read_tsc(ips->last_stall_date);           \
-+              ips->last_stall_eip = (unsigned long)&&here;    \
-+      }                                                       \
-+} while(0)
-+
-+static inline void ipipe_mark_domain_unstall(struct ipipe_domain *ipd, int 
cpuid)
-+{ /* Called w/ hw interrupts off. */
-+      struct ipipe_stats *ips = ipd->stats + cpuid;
-+      unsigned long long t, d;
-+
-+      if (ips->last_stall_date != 0) {
-+              ipipe_read_tsc(t);
-+              d = t - ips->last_stall_date;
-+              if (d > ips->max_stall_time) {
-+                      ips->max_stall_time = d;
-+                      ips->max_stall_eip = ips->last_stall_eip;
-+              }
-+              ips->last_stall_date = 0;
-+      }
-+}
-+
-+static inline void ipipe_mark_irq_receipt(struct ipipe_domain *ipd, unsigned 
irq, int cpuid)
-+{
-+      struct ipipe_stats *ips = ipd->stats + cpuid;
-+
-+      if (ips->irq_stats[irq].last_receipt_date == 0) {
-+              ipipe_read_tsc(ips->irq_stats[irq].last_receipt_date);
-+      }
-+}
-+
-+static inline void ipipe_mark_irq_delivery(struct ipipe_domain *ipd, unsigned 
irq, int cpuid)
-+{ /* Called w/ hw interrupts off. */
-+      struct ipipe_stats *ips = ipd->stats + cpuid;
-+      unsigned long long t, d;
-+
-+      if (ips->irq_stats[irq].last_receipt_date != 0) {
-+              ipipe_read_tsc(t);
-+              d = t - ips->irq_stats[irq].last_receipt_date;
-+              ips->irq_stats[irq].last_receipt_date = 0;
-+              if (d > ips->irq_stats[irq].max_delivery_time)
-+                      ips->irq_stats[irq].max_delivery_time = d;
-+      }
-+}
-+
-+static inline void ipipe_reset_stats (void)
-+{
-+      int cpu, irq;
-+      for_each_online_cpu(cpu) {
-+              ipipe_root_domain->stats[cpu].last_stall_date = 0LL;
-+              for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
-+                      
ipipe_root_domain->stats[cpu].irq_stats[irq].last_receipt_date = 0LL;
-+      }
-+}
-+
-+#else /* !CONFIG_IPIPE_STATS */
-+
-+#define ipipe_mark_domain_stall(ipd,cpuid)    do { } while(0)
-+#define ipipe_mark_domain_unstall(ipd,cpuid)  do { } while(0)
-+#define ipipe_mark_irq_receipt(ipd,irq,cpuid) do { } while(0)
-+#define ipipe_mark_irq_delivery(ipd,irq,cpuid)        do { } while(0)
-+#define ipipe_reset_stats()                   do { } while(0)
-+
-+#endif /* CONFIG_IPIPE_STATS */
-+
-+/* Public interface */
-+
-+int ipipe_register_domain(struct ipipe_domain *ipd,
-+                        struct ipipe_domain_attr *attr);
-+
-+int ipipe_unregister_domain(struct ipipe_domain *ipd);
-+
-+void ipipe_suspend_domain(void);
-+
-+int ipipe_virtualize_irq(struct ipipe_domain *ipd,
-+                       unsigned irq,
-+                       void (*handler) (unsigned irq),
-+                       int (*acknowledge) (unsigned irq),
-+                       unsigned modemask);
-+
-+static inline int ipipe_share_irq(unsigned irq,
-+                                int (*acknowledge) (unsigned irq))
-+{
-+      return ipipe_virtualize_irq(ipipe_current_domain,
-+                                  irq,
-+                                  IPIPE_SAME_HANDLER,
-+                                  acknowledge,
-+                                  IPIPE_SHARED_MASK | IPIPE_HANDLE_MASK |
-+                                  IPIPE_PASS_MASK);
-+}
-+
-+int ipipe_control_irq(unsigned irq,
-+                    unsigned clrmask,
-+                    unsigned setmask);
-+
-+unsigned ipipe_alloc_virq(void);
-+
-+int ipipe_free_virq(unsigned virq);
-+
-+int fastcall ipipe_trigger_irq(unsigned irq);
-+
-+static inline int ipipe_propagate_irq(unsigned irq)
-+{
-+
-+      return __ipipe_schedule_irq(irq, ipipe_current_domain->p_link.next);
-+}
-+
-+static inline int ipipe_schedule_irq(unsigned irq)
-+{
-+
-+      return __ipipe_schedule_irq(irq, &ipipe_current_domain->p_link);
-+}
-+
-+static inline void ipipe_stall_pipeline_from(struct ipipe_domain *ipd)
-+{
-+      ipipe_declare_cpuid;
-+#ifdef CONFIG_SMP
-+      unsigned long flags;
-+
-+      ipipe_lock_cpu(flags); /* Care for migration. */
-+
-+      __set_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
-+      ipipe_mark_domain_stall(ipd, cpuid);
-+
-+      if (!__ipipe_pipeline_head_p(ipd))
-+              ipipe_unlock_cpu(flags);
-+#else /* CONFIG_SMP */
-+      set_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
-+      ipipe_mark_domain_stall(ipd, cpuid);
-+
-+      if (__ipipe_pipeline_head_p(ipd))
-+              local_irq_disable_hw();
-+#endif        /* CONFIG_SMP */
-+}
-+
-+static inline unsigned long ipipe_test_pipeline_from(struct ipipe_domain *ipd)
-+{
-+      unsigned long flags, s;
-+      ipipe_declare_cpuid;
-+
-+      ipipe_get_cpu(flags);
-+      s = test_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
-+      ipipe_put_cpu(flags);
-+
-+      return s;
-+}
-+
-+static inline unsigned long ipipe_test_and_stall_pipeline_from(struct
-+                                                             ipipe_domain
-+                                                             *ipd)
-+{
-+      ipipe_declare_cpuid;
-+      unsigned long s;
-+#ifdef CONFIG_SMP
-+      unsigned long flags;
-+
-+      ipipe_lock_cpu(flags); /* Care for migration. */
-+
-+      s = __test_and_set_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
-+      ipipe_mark_domain_stall(ipd, cpuid);
-+
-+      if (!__ipipe_pipeline_head_p(ipd))
-+              ipipe_unlock_cpu(flags);
-+#else /* CONFIG_SMP */
-+      s = test_and_set_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
-+      ipipe_mark_domain_stall(ipd, cpuid);
-+
-+      if (__ipipe_pipeline_head_p(ipd))
-+              local_irq_disable_hw();
-+#endif        /* CONFIG_SMP */
-+
-+      return s;
-+}
-+
-+void fastcall ipipe_unstall_pipeline_from(struct ipipe_domain *ipd);
-+
-+static inline unsigned long ipipe_test_and_unstall_pipeline_from(struct
-+                                                               ipipe_domain
-+                                                               *ipd)
-+{
-+      unsigned long flags, s;
-+      ipipe_declare_cpuid;
-+
-+      ipipe_get_cpu(flags);
-+      s = test_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
-+      ipipe_unstall_pipeline_from(ipd);
-+      ipipe_put_cpu(flags);
-+
-+      return s;
-+}
-+
-+static inline void ipipe_unstall_pipeline(void)
-+{
-+      ipipe_unstall_pipeline_from(ipipe_current_domain);
-+}
-+
-+static inline unsigned long ipipe_test_and_unstall_pipeline(void)
-+{
-+      return ipipe_test_and_unstall_pipeline_from(ipipe_current_domain);
-+}
-+
-+static inline unsigned long ipipe_test_pipeline(void)
-+{
-+      return ipipe_test_pipeline_from(ipipe_current_domain);
-+}
-+
-+static inline unsigned long ipipe_test_and_stall_pipeline(void)
-+{
-+      return ipipe_test_and_stall_pipeline_from(ipipe_current_domain);
-+}
-+
-+static inline void ipipe_restore_pipeline_from(struct ipipe_domain *ipd,
-+                                             unsigned long flags)
-+{
-+      if (flags)
-+              ipipe_stall_pipeline_from(ipd);
-+      else
-+              ipipe_unstall_pipeline_from(ipd);
-+}
-+
-+static inline void ipipe_stall_pipeline(void)
-+{
-+      ipipe_stall_pipeline_from(ipipe_current_domain);
-+}
-+
-+static inline void ipipe_restore_pipeline(unsigned long flags)
-+{
-+      ipipe_restore_pipeline_from(ipipe_current_domain, flags);
-+}
-+
-+static inline void ipipe_restore_pipeline_nosync(struct ipipe_domain *ipd,
-+                                               unsigned long flags, int cpuid)
-+{
-+      /*
-+       * If cpuid is current, then it must be held on entry
-+       * (ipipe_get_cpu/local_irq_save_hw/local_irq_disable_hw).
-+       */
-+
-+      if (flags) {
-+              __set_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
-+              ipipe_mark_domain_stall(ipd,cpuid);
-+      }
-+      else {
-+              __clear_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
-+              ipipe_mark_domain_unstall(ipd,cpuid);
-+      }
-+}
-+
-+void ipipe_init_attr(struct ipipe_domain_attr *attr);
-+
-+int ipipe_get_sysinfo(struct ipipe_sysinfo *sysinfo);
-+
-+int ipipe_tune_timer(unsigned long ns,
-+                   int flags);
-+
-+unsigned long ipipe_critical_enter(void (*syncfn) (void));
-+
-+void ipipe_critical_exit(unsigned long flags);
-+
-+static inline void ipipe_set_printk_sync(struct ipipe_domain *ipd)
-+{
-+      set_bit(IPIPE_SPRINTK_FLAG, &ipd->flags);
-+}
-+
-+static inline void ipipe_set_printk_async(struct ipipe_domain *ipd)
-+{
-+      clear_bit(IPIPE_SPRINTK_FLAG, &ipd->flags);
-+}
-+
-+int ipipe_catch_event(struct ipipe_domain *ipd,
-+                    unsigned event,
-+                    int (*handler)(unsigned event,
-+                                   struct ipipe_domain *ipd,
-+                                   void *data));
-+
-+cpumask_t ipipe_set_irq_affinity(unsigned irq,
-+                               cpumask_t cpumask);
-+
-+int fastcall ipipe_send_ipi(unsigned ipi,
-+                          cpumask_t cpumask);
-+
-+int ipipe_setscheduler_root(struct task_struct *p,
-+                          int policy,
-+                          int prio);
-+
-+int ipipe_reenter_root(struct task_struct *prev,
-+                     int policy,
-+                     int prio);
-+
-+int ipipe_alloc_ptdkey(void);
-+
-+int ipipe_free_ptdkey(int key);
-+
-+int fastcall ipipe_set_ptd(int key,
-+                         void *value);
-+
-+void fastcall *ipipe_get_ptd(int key);
-+
-+#define local_irq_enable_hw_cond()            local_irq_enable_hw()
-+#define local_irq_disable_hw_cond()           local_irq_disable_hw()
-+#define local_irq_save_hw_cond(flags)         local_irq_save_hw(flags)
-+#define local_irq_restore_hw_cond(flags)      local_irq_restore_hw(flags)
-+#define spin_lock_irqsave_hw_cond(lock,flags) spin_lock_irqsave_hw(lock,flags)
-+#define spin_unlock_irqrestore_hw_cond(lock,flags) 
spin_unlock_irqrestore_hw(lock,flags)
-+
-+#define ipipe_irq_lock(irq)                                           \
-+      do {                                                            \
-+              ipipe_declare_cpuid;                                    \
-+              ipipe_load_cpuid();                                     \
-+              __ipipe_lock_irq(ipipe_percpu_domain[cpuid], cpuid, irq);\
-+      } while(0)
-+
-+#define ipipe_irq_unlock(irq)                                         \
-+      do {                                                            \
-+              ipipe_declare_cpuid;                                    \
-+              ipipe_load_cpuid();                                     \
-+              __ipipe_unlock_irq(ipipe_percpu_domain[cpuid], irq);    \
-+      } while(0)
-+
-+#else /* !CONFIG_IPIPE */
-+
-+#define ipipe_init()                          do { } while(0)
-+#define ipipe_suspend_domain()                        do { } while(0)
-+#define ipipe_sigwake_notify(p)                       do { } while(0)
-+#define ipipe_setsched_notify(p)              do { } while(0)
-+#define ipipe_exit_notify(p)                  do { } while(0)
-+#define ipipe_init_proc()                     do { } while(0)
-+#define ipipe_reset_stats()                   do { } while(0)
-+
-+#define spin_lock_hw(lock)                    spin_lock(lock)
-+#define spin_unlock_hw(lock)                  spin_unlock(lock)
-+#define spin_lock_irq_hw(lock)                        spin_lock_irq(lock)
-+#define spin_unlock_irq_hw(lock)              spin_unlock_irq(lock)
-+#define spin_lock_irqsave_hw(lock,flags)      spin_lock_irqsave(lock, flags)
-+#define spin_unlock_irqrestore_hw(lock,flags) spin_unlock_irqrestore(lock, 
flags)
-+
-+#define local_irq_enable_hw_cond()            do { } while(0)
-+#define local_irq_disable_hw_cond()           do { } while(0)
-+#define local_irq_save_hw_cond(flags)         do { flags = 0; /* Optimized 
out */ } while(0)
-+#define local_irq_restore_hw_cond(flags)      do { } while(0)
-+#define spin_lock_irqsave_hw_cond(lock,flags) do { flags = 0; 
spin_lock(lock); } while(0)
-+#define spin_unlock_irqrestore_hw_cond(lock,flags)    spin_unlock(lock)
-+
-+#define ipipe_irq_lock(irq)                   do { } while(0)
-+#define ipipe_irq_unlock(irq)                 do { } while(0)
-+
-+#endif        /* CONFIG_IPIPE */
-+
-+#endif        /* !__LINUX_IPIPE_H */
-diff -uNrp 2.6.13/include/linux/preempt.h 2.6.13-ipipe/include/linux/preempt.h
---- 2.6.13/include/linux/preempt.h     2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/include/linux/preempt.h       2005-10-11 14:16:02.000000000 
+0200
-@@ -13,41 +13,58 @@
-   extern void fastcall add_preempt_count(int val);
-   extern void fastcall sub_preempt_count(int val);
- #else
--# define add_preempt_count(val)       do { preempt_count() += (val); } while 
(0)
--# define sub_preempt_count(val)       do { preempt_count() -= (val); } while 
(0)
-+#define add_preempt_count(val)        do { preempt_count() += (val); } while 
(0)
-+#define sub_preempt_count(val)        do { preempt_count() -= (val); } while 
(0)
- #endif
- 
--#define inc_preempt_count() add_preempt_count(1)
--#define dec_preempt_count() sub_preempt_count(1)
-+#define inc_preempt_count()   add_preempt_count(1)
-+#define dec_preempt_count()   sub_preempt_count(1)
- 
--#define preempt_count()       (current_thread_info()->preempt_count)
-+#define preempt_count()               (current_thread_info()->preempt_count)
- 
- #ifdef CONFIG_PREEMPT
- 
- asmlinkage void preempt_schedule(void);
- 
--#define preempt_disable() \
--do { \
--      inc_preempt_count(); \
--      barrier(); \
-+#ifdef CONFIG_IPIPE
-+
-+#include <asm/ipipe.h>
-+
-+extern struct ipipe_domain *ipipe_percpu_domain[], *ipipe_root_domain;
-+
-+#define ipipe_preempt_guard() (ipipe_percpu_domain[ipipe_processor_id()] == 
ipipe_root_domain)
-+#else
-+#define ipipe_preempt_guard() 1
-+#endif
-+
-+#define preempt_disable()                                             \
-+do {                                                                  \
-+      if (ipipe_preempt_guard()) {                                    \
-+              inc_preempt_count();                                    \
-+              barrier();                                              \
-+      }                                                               \
- } while (0)
- 
--#define preempt_enable_no_resched() \
--do { \
--      barrier(); \
--      dec_preempt_count(); \
-+#define preempt_enable_no_resched()                                   \
-+do {                                                                  \
-+      if (ipipe_preempt_guard()) {                                    \
-+              barrier();                                              \
-+              dec_preempt_count();                                    \
-+      }                                                               \
- } while (0)
- 
--#define preempt_check_resched() \
--do { \
--      if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
--              preempt_schedule(); \
-+#define preempt_check_resched()                                               
\
-+do {                                                                  \
-+      if (ipipe_preempt_guard()) {                                    \
-+              if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))       \
-+                      preempt_schedule();                             \
-+      }                                                               \
- } while (0)
- 
--#define preempt_enable() \
--do { \
--      preempt_enable_no_resched(); \
--      preempt_check_resched(); \
-+#define preempt_enable()                                              \
-+do {                                                                  \
-+      preempt_enable_no_resched();                                    \
-+      preempt_check_resched();                                        \
- } while (0)
- 
- #else
-diff -uNrp 2.6.13/include/linux/sched.h 2.6.13-ipipe/include/linux/sched.h
---- 2.6.13/include/linux/sched.h       2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/include/linux/sched.h 2005-09-07 13:24:50.000000000 +0200
-@@ -4,6 +4,7 @@
- #include <asm/param.h>        /* for HZ */
- 
- #include <linux/config.h>
-+#include <linux/ipipe.h>
- #include <linux/capability.h>
- #include <linux/threads.h>
- #include <linux/kernel.h>
-@@ -770,6 +771,9 @@ struct task_struct {
-       int cpuset_mems_generation;
- #endif
-       atomic_t fs_excl;       /* holding fs exclusive resources */
-+#ifdef CONFIG_IPIPE
-+        void *ptd[IPIPE_ROOT_NPTDKEYS];
-+#endif
- };
- 
- static inline pid_t process_group(struct task_struct *tsk)
-diff -uNrp 2.6.13/init/Kconfig 2.6.13-ipipe/init/Kconfig
---- 2.6.13/init/Kconfig        2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/init/Kconfig  2005-09-07 13:24:50.000000000 +0200
-@@ -69,6 +69,7 @@ menu "General setup"
- 
- config LOCALVERSION
-       string "Local version - append to kernel release"
-+      default "-ipipe"
-       help
-         Append an extra string to the end of your kernel version.
-         This will show up when you type uname, for example.
-diff -uNrp 2.6.13/init/main.c 2.6.13-ipipe/init/main.c
---- 2.6.13/init/main.c 2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/init/main.c   2005-10-17 16:55:11.000000000 +0200
-@@ -389,8 +389,9 @@ static void noinline rest_init(void)
-        */
-       schedule();
- 
-+      ipipe_reset_stats();
-       cpu_idle();
--} 
-+}
- 
- /* Check for early params. */
- static int __init do_early_param(char *param, char *val)
-@@ -474,6 +475,11 @@ asmlinkage void __init start_kernel(void
-       init_timers();
-       softirq_init();
-       time_init();
-+      /*
-+       * We need to wait for the interrupt and time subsystems to be
-+       * initialized before enabling the pipeline.
-+       */
-+      ipipe_init();
- 
-       /*
-        * HACK ALERT! This is early. We're enabling the console before
-@@ -598,6 +604,7 @@ static void __init do_basic_setup(void)
- #ifdef CONFIG_SYSCTL
-       sysctl_init();
- #endif
-+      ipipe_init_proc();
- 
-       /* Networking initialization needs a process context */ 
-       sock_init();
-diff -uNrp 2.6.13/kernel/Makefile 2.6.13-ipipe/kernel/Makefile
---- 2.6.13/kernel/Makefile     2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/kernel/Makefile       2005-09-07 13:24:50.000000000 +0200
-@@ -30,6 +30,7 @@ obj-$(CONFIG_SYSFS) += ksysfs.o
- obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
- obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
- obj-$(CONFIG_SECCOMP) += seccomp.o
-+obj-$(CONFIG_IPIPE) += ipipe/
- 
- ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
- # According to Alan Modra <[EMAIL PROTECTED]>, the -fno-omit-frame-pointer is
-diff -uNrp 2.6.13/kernel/exit.c 2.6.13-ipipe/kernel/exit.c
---- 2.6.13/kernel/exit.c       2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/kernel/exit.c 2005-09-07 13:24:50.000000000 +0200
-@@ -833,6 +833,7 @@ fastcall NORET_TYPE void do_exit(long co
-               del_timer_sync(&tsk->signal->real_timer);
-               acct_process(code);
-       }
-+      ipipe_exit_notify(tsk);
-       exit_mm(tsk);
- 
-       exit_sem(tsk);
-diff -uNrp 2.6.13/kernel/fork.c 2.6.13-ipipe/kernel/fork.c
---- 2.6.13/kernel/fork.c       2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/kernel/fork.c 2005-10-14 14:36:03.000000000 +0200
-@@ -1116,6 +1116,14 @@ static task_t *copy_process(unsigned lon
-       total_forks++;
-       write_unlock_irq(&tasklist_lock);
-       retval = 0;
-+#ifdef CONFIG_IPIPE
-+      {
-+      int k;
-+
-+      for (k = 0; k < IPIPE_ROOT_NPTDKEYS; k++)
-+          p->ptd[k] = NULL;
-+      }
-+#endif /* CONFIG_IPIPE */
- 
- fork_out:
-       if (retval)
-diff -uNrp 2.6.13/kernel/ipipe/Kconfig 2.6.13-ipipe/kernel/ipipe/Kconfig
---- 2.6.13/kernel/ipipe/Kconfig        1970-01-01 01:00:00.000000000 +0100
-+++ 2.6.13-ipipe/kernel/ipipe/Kconfig  2005-09-07 14:30:42.000000000 +0200
-@@ -0,0 +1,18 @@
-+config IPIPE
-+      bool "Interrupt pipeline"
-+      default y
-+      ---help---
-+        Activate this option if you want the interrupt pipeline to be
-+        compiled in.
-+
-+config IPIPE_STATS
-+      bool "Collect statistics"
-+      depends on IPIPE
-+      default n
-+      ---help---
-+        Activate this option if you want runtime statistics to be collected
-+        while the I-pipe is operating. This option adds a small overhead, but
-+        is useful to detect unexpected latency points.
-+
-+config IPIPE_EXTENDED
-+      def_bool IPIPE
-diff -uNrp 2.6.13/kernel/ipipe/Makefile 2.6.13-ipipe/kernel/ipipe/Makefile
---- 2.6.13/kernel/ipipe/Makefile       1970-01-01 01:00:00.000000000 +0100
-+++ 2.6.13-ipipe/kernel/ipipe/Makefile 2005-09-07 13:17:29.000000000 +0200
-@@ -0,0 +1,2 @@
-+
-+obj-$(CONFIG_IPIPE)   += core.o generic.o
-diff -uNrp 2.6.13/kernel/ipipe/core.c 2.6.13-ipipe/kernel/ipipe/core.c
---- 2.6.13/kernel/ipipe/core.c 1970-01-01 01:00:00.000000000 +0100
-+++ 2.6.13-ipipe/kernel/ipipe/core.c   2005-10-17 16:51:47.000000000 +0200
-@@ -0,0 +1,678 @@
-+/* -*- linux-c -*-
-+ * linux/kernel/ipipe/core.c
-+ *
-+ * Copyright (C) 2002-2005 Philippe Gerum.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ * USA; either version 2 of the License, or (at your option) any later
-+ * version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * Architecture-independent I-PIPE core support.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/module.h>
-+#include <linux/kallsyms.h>
-+#ifdef CONFIG_PROC_FS
-+#include <linux/proc_fs.h>
-+#endif        /* CONFIG_PROC_FS */
-+
-+static struct ipipe_domain ipipe_root =
-+      { .cpudata = {[0 ... IPIPE_NR_CPUS-1] =
-+              { .status = (1<<IPIPE_STALL_FLAG) } } };
-+
-+struct ipipe_domain *ipipe_root_domain = &ipipe_root;
-+
-+struct ipipe_domain *ipipe_percpu_domain[IPIPE_NR_CPUS] =
-+      {[0 ... IPIPE_NR_CPUS - 1] = &ipipe_root };
-+
-+raw_spinlock_t __ipipe_pipelock = RAW_SPIN_LOCK_UNLOCKED;
-+
-+struct list_head __ipipe_pipeline;
-+
-+unsigned long __ipipe_virtual_irq_map = 0;
-+
-+unsigned __ipipe_printk_virq;
-+
-+int __ipipe_event_monitors[IPIPE_NR_EVENTS];
-+
-+/*
-+ * ipipe_init() -- Initialization routine of the IPIPE layer. Called
-+ * by the host kernel early during the boot procedure.
-+ */
-+void ipipe_init(void)
-+{
-+      struct ipipe_domain *ipd = &ipipe_root;
-+
-+      __ipipe_check_platform();       /* Do platform dependent checks first. 
*/
-+
-+      /*
-+       * A lightweight registration code for the root domain. We are
-+       * running on the boot CPU, hw interrupts are off, and
-+       * secondary CPUs are still lost in space.
-+       */
-+
-+      INIT_LIST_HEAD(&__ipipe_pipeline);
-+
-+      ipd->name = "Linux";
-+      ipd->domid = IPIPE_ROOT_ID;
-+      ipd->priority = IPIPE_ROOT_PRIO;
-+
-+      __ipipe_init_stage(ipd);
-+
-+      INIT_LIST_HEAD(&ipd->p_link);
-+      list_add_tail(&ipd->p_link, &__ipipe_pipeline);
-+
-+      __ipipe_init_platform();
-+
-+      __ipipe_printk_virq = ipipe_alloc_virq();       /* Cannot fail here. */
-+      ipd->irqs[__ipipe_printk_virq].handler = &__ipipe_flush_printk;
-+      ipd->irqs[__ipipe_printk_virq].acknowledge = NULL;
-+      ipd->irqs[__ipipe_printk_virq].control = IPIPE_HANDLE_MASK;
-+
-+      __ipipe_enable_pipeline();
-+
-+      printk(KERN_INFO "I-pipe %s: pipeline enabled.\n",
-+             IPIPE_VERSION_STRING);
-+}
-+
-+void __ipipe_init_stage(struct ipipe_domain *ipd)
-+{
-+      int cpuid, n;
-+
-+      for (cpuid = 0; cpuid < IPIPE_NR_CPUS; cpuid++) {
-+              ipd->cpudata[cpuid].irq_pending_hi = 0;
-+
-+              for (n = 0; n < IPIPE_IRQ_IWORDS; n++)
-+                      ipd->cpudata[cpuid].irq_pending_lo[n] = 0;
-+
-+              for (n = 0; n < IPIPE_NR_IRQS; n++)
-+                      ipd->cpudata[cpuid].irq_hits[n] = 0;
-+      }
-+
-+      for (n = 0; n < IPIPE_NR_IRQS; n++) {
-+              ipd->irqs[n].acknowledge = NULL;
-+              ipd->irqs[n].handler = NULL;
-+              ipd->irqs[n].control = IPIPE_PASS_MASK; /* Pass but don't 
handle */
-+      }
-+
-+      for (n = 0; n < IPIPE_NR_EVENTS; n++)
-+              ipd->evhand[n] = NULL;
-+
-+      ipd->evexcl = 0;
-+
-+#ifdef CONFIG_SMP
-+      ipd->irqs[IPIPE_CRITICAL_IPI].acknowledge = &__ipipe_ack_system_irq;
-+      ipd->irqs[IPIPE_CRITICAL_IPI].handler = &__ipipe_do_critical_sync;
-+      /* Immediately handle in the current domain but *never* pass */
-+      ipd->irqs[IPIPE_CRITICAL_IPI].control =
-+              IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SYSTEM_MASK;
-+#endif        /* CONFIG_SMP */
-+}
-+
-+void __ipipe_stall_root(void)
-+{
-+      ipipe_declare_cpuid;
-+      unsigned long flags;
-+
-+      ipipe_get_cpu(flags); /* Care for migration. */
-+
-+      set_bit(IPIPE_STALL_FLAG, &ipipe_root_domain->cpudata[cpuid].status);
-+
-+#ifdef CONFIG_SMP
-+      if (!__ipipe_pipeline_head_p(ipipe_root_domain))
-+              ipipe_put_cpu(flags);
-+#else /* CONFIG_SMP */
-+      if (__ipipe_pipeline_head_p(ipipe_root_domain))
-+              local_irq_disable_hw();
-+#endif /* CONFIG_SMP */
-+      ipipe_mark_domain_stall(ipipe_root_domain,cpuid);
-+}
-+
-+void __ipipe_cleanup_domain(struct ipipe_domain *ipd)
-+{
-+      ipipe_unstall_pipeline_from(ipd);
-+
-+#ifdef CONFIG_SMP
-+      {
-+              int cpu;
-+
-+              for_each_online_cpu(cpu) {
-+                      while (ipd->cpudata[cpu].irq_pending_hi != 0)
-+                              cpu_relax();
-+              }
-+      }
-+#endif        /* CONFIG_SMP */
-+}
-+
-+void __ipipe_unstall_root(void)
-+{
-+      ipipe_declare_cpuid;
-+
-+      local_irq_disable_hw();
-+
-+      ipipe_load_cpuid();
-+
-+      __clear_bit(IPIPE_STALL_FLAG, 
&ipipe_root_domain->cpudata[cpuid].status);
-+
-+      ipipe_mark_domain_unstall(ipipe_root_domain, cpuid);
-+
-+      if (ipipe_root_domain->cpudata[cpuid].irq_pending_hi != 0)
-+              __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
-+
-+      local_irq_enable_hw();
-+}
-+
-+unsigned long __ipipe_test_root(void)
-+{
-+      unsigned long flags, s;
-+      ipipe_declare_cpuid;
-+
-+      ipipe_get_cpu(flags); /* Care for migration. */
-+      s = test_bit(IPIPE_STALL_FLAG, 
&ipipe_root_domain->cpudata[cpuid].status);
-+      ipipe_put_cpu(flags);
-+
-+      return s;
-+}
-+
-+unsigned long __ipipe_test_and_stall_root(void)
-+{
-+      unsigned long flags, s;
-+      ipipe_declare_cpuid;
-+
-+      ipipe_get_cpu(flags); /* Care for migration. */
-+      s = test_and_set_bit(IPIPE_STALL_FLAG,
-+                           &ipipe_root_domain->cpudata[cpuid].status);
-+      ipipe_mark_domain_stall(ipipe_root_domain,cpuid);
-+      ipipe_put_cpu(flags);
-+
-+      return s;
-+}
-+
-+void fastcall __ipipe_restore_root(unsigned long flags)
-+{
-+      if (flags)
-+              __ipipe_stall_root();
-+      else
-+              __ipipe_unstall_root();
-+}
-+
-+/*
-+ * ipipe_unstall_pipeline_from() -- Unstall the pipeline and
-+ * synchronize pending interrupts for a given domain. See
-+ * __ipipe_walk_pipeline() for more information.
-+ */
-+void fastcall ipipe_unstall_pipeline_from(struct ipipe_domain *ipd)
-+{
-+      struct ipipe_domain *this_domain;
-+      struct list_head *pos;
-+      unsigned long flags;
-+      ipipe_declare_cpuid;
-+
-+      ipipe_lock_cpu(flags);
-+
-+      __clear_bit(IPIPE_STALL_FLAG, &ipd->cpudata[cpuid].status);
-+
-+      ipipe_mark_domain_unstall(ipd, cpuid);
-+
-+      this_domain = ipipe_percpu_domain[cpuid];
-+
-+      if (ipd == this_domain) {
-+              if (ipd->cpudata[cpuid].irq_pending_hi != 0)
-+                      __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
-+
-+              goto release_cpu_and_exit;
-+      }
-+
-+      list_for_each(pos, &__ipipe_pipeline) {
-+
-+              struct ipipe_domain *next_domain =
-+                      list_entry(pos, struct ipipe_domain, p_link);
-+
-+              if (test_bit(IPIPE_STALL_FLAG,
-+                           &next_domain->cpudata[cpuid].status))
-+                      break;  /* Stalled stage -- do not go further. */
-+
-+              if (next_domain->cpudata[cpuid].irq_pending_hi != 0) {
-+
-+                      if (next_domain == this_domain)
-+                              __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
-+                      else {
-+                              __ipipe_switch_to(this_domain, next_domain,
-+                                                cpuid);
-+
-+                              ipipe_load_cpuid();     /* Processor might have 
changed. */
-+
-+                              if (this_domain->cpudata[cpuid].
-+                                  irq_pending_hi != 0
-+                                  && !test_bit(IPIPE_STALL_FLAG,
-+                                               &this_domain->cpudata[cpuid].
-+                                               status))
-+                                      __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
-+                      }
-+
-+                      break;
-+              } else if (next_domain == this_domain)
-+                      break;
-+      }
-+
-+release_cpu_and_exit:
-+
-+      if (__ipipe_pipeline_head_p(ipd))
-+              local_irq_enable_hw();
-+      else
-+              ipipe_unlock_cpu(flags);
-+}
-+
-+/*
-+ * ipipe_suspend_domain() -- Suspend the current domain, switching to
-+ * the next one which has pending work down the pipeline.
-+ */
-+void ipipe_suspend_domain(void)
-+{
-+      struct ipipe_domain *this_domain, *next_domain;
-+      struct list_head *ln;
-+      unsigned long flags;
-+      ipipe_declare_cpuid;
-+
-+      ipipe_lock_cpu(flags);
-+
-+      this_domain = next_domain = ipipe_percpu_domain[cpuid];
-+
-+      __clear_bit(IPIPE_STALL_FLAG, &this_domain->cpudata[cpuid].status);
-+
-+      ipipe_mark_domain_unstall(this_domain, cpuid);
-+
-+      if (this_domain->cpudata[cpuid].irq_pending_hi != 0)
-+              goto sync_stage;
-+
-+      for (;;) {
-+              ln = next_domain->p_link.next;
-+
-+              if (ln == &__ipipe_pipeline)
-+                      break;
-+
-+              next_domain = list_entry(ln, struct ipipe_domain, p_link);
-+
-+              if (test_bit(IPIPE_STALL_FLAG,
-+                           &next_domain->cpudata[cpuid].status))
-+                      break;
-+
-+              if (next_domain->cpudata[cpuid].irq_pending_hi == 0)
-+                      continue;
-+
-+              ipipe_percpu_domain[cpuid] = next_domain;
-+
-+sync_stage:
-+
-+              __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
-+
-+              ipipe_load_cpuid();     /* Processor might have changed. */
-+
-+              if (ipipe_percpu_domain[cpuid] != next_domain)
-+                      /*
-+                       * Something has changed the current domain under our
-+                       * feet, recycling the register set; take note.
-+                       */
-+                      this_domain = ipipe_percpu_domain[cpuid];
-+      }
-+
-+      ipipe_percpu_domain[cpuid] = this_domain;
-+
-+      ipipe_unlock_cpu(flags);
-+}
-+
-+/* ipipe_alloc_virq() -- Allocate a pipelined virtual/soft interrupt.
-+ * Virtual interrupts are handled in exactly the same way than their
-+ * hw-generated counterparts wrt pipelining.
-+ */
-+unsigned ipipe_alloc_virq(void)
-+{
-+      unsigned long flags, irq = 0;
-+      int ipos;
-+
-+      spin_lock_irqsave_hw(&__ipipe_pipelock, flags);
-+
-+      if (__ipipe_virtual_irq_map != ~0) {
-+              ipos = ffz(__ipipe_virtual_irq_map);
-+              set_bit(ipos, &__ipipe_virtual_irq_map);
-+              irq = ipos + IPIPE_VIRQ_BASE;
-+      }
-+
-+      spin_unlock_irqrestore_hw(&__ipipe_pipelock, flags);
-+
-+      return irq;
-+}
-+
-+/* __ipipe_dispatch_event() -- Low-level event dispatcher. */
-+
-+int fastcall __ipipe_dispatch_event (unsigned event, void *data)
-+{
-+      struct ipipe_domain *start_domain, *this_domain, *next_domain;
-+      struct list_head *pos, *npos;
-+      unsigned long flags;
-+      ipipe_declare_cpuid;
-+      int propagate = 1;
-+
-+      ipipe_lock_cpu(flags);
-+
-+      start_domain = this_domain = ipipe_percpu_domain[cpuid];
-+
-+      list_for_each_safe(pos,npos,&__ipipe_pipeline) {
-+
-+              next_domain = list_entry(pos,struct ipipe_domain,p_link);
-+
-+              /*
-+               * Note: Domain migration may occur while running
-+               * event or interrupt handlers, in which case the
-+               * current register set is going to be recycled for a
-+               * different domain than the initiating one. We do
-+               * care for that, always tracking the current domain
-+               * descriptor upon return from those handlers.
-+               */
-+              if (next_domain->evhand[event] != NULL) {
-+                      ipipe_percpu_domain[cpuid] = next_domain;
-+                      ipipe_unlock_cpu(flags);
-+                      propagate = 
!next_domain->evhand[event](event,start_domain,data);
-+                      ipipe_lock_cpu(flags);
-+                      if (ipipe_percpu_domain[cpuid] != next_domain)
-+                              this_domain = ipipe_percpu_domain[cpuid];
-+              }
-+
-+              if (next_domain != ipipe_root_domain && /* NEVER sync the root 
stage here. */
-+                  next_domain->cpudata[cpuid].irq_pending_hi != 0 &&
-+                  
!test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status)) {
-+                      ipipe_percpu_domain[cpuid] = next_domain;
-+                      __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
-+                      ipipe_load_cpuid();
-+                      if (ipipe_percpu_domain[cpuid] != next_domain)
-+                              this_domain = ipipe_percpu_domain[cpuid];
-+              }
-+
-+              ipipe_percpu_domain[cpuid] = this_domain;
-+
-+              if (next_domain == this_domain || !propagate)
-+                      break;
-+      }
-+
-+      ipipe_unlock_cpu(flags);
-+
-+      return !propagate;
-+}
-+
-+#ifdef CONFIG_PROC_FS
-+
-+#include <linux/proc_fs.h>
-+
-+static struct proc_dir_entry *ipipe_proc_root;
-+
-+static int __ipipe_version_info_proc(char *page,
-+                                   char **start,
-+                                   off_t off, int count, int *eof, void *data)
-+{
-+      int len = sprintf(page, "%s\n", IPIPE_VERSION_STRING);
-+
-+      len -= off;
-+
-+      if (len <= off + count)
-+              *eof = 1;
-+
-+      *start = page + off;
-+
-+      if(len > count)
-+              len = count;
-+
-+      if(len < 0)
-+              len = 0;
-+
-+      return len;
-+}
-+
-+static int __ipipe_common_info_proc(char *page,
-+                                  char **start,
-+                                  off_t off, int count, int *eof, void *data)
-+{
-+      struct ipipe_domain *ipd = (struct ipipe_domain *)data;
-+      unsigned long ctlbits;
-+      unsigned irq, _irq;
-+      char *p = page;
-+      int len;
-+
-+      spin_lock(&__ipipe_pipelock);
-+
-+      p += sprintf(p, "Priority=%d, Id=0x%.8x\n",
-+                   ipd->priority, ipd->domid);
-+      irq = 0;
-+
-+      while (irq < IPIPE_NR_IRQS) {
-+              ctlbits =
-+                      (ipd->irqs[irq].
-+                       control & (IPIPE_HANDLE_MASK | IPIPE_PASS_MASK |
-+                                  IPIPE_STICKY_MASK));
-+              if (irq >= IPIPE_NR_XIRQS && !ipipe_virtual_irq_p(irq)) {
-+                      /*
-+                       * There might be a hole between the last external
-+                       * IRQ and the first virtual one; skip it.
-+                       */
-+                      irq++;
-+                      continue;
-+              }
-+
-+              if (ipipe_virtual_irq_p(irq)
-+                  && !test_bit(irq - IPIPE_VIRQ_BASE,
-+                               &__ipipe_virtual_irq_map)) {
-+                      /* Non-allocated virtual IRQ; skip it. */
-+                      irq++;
-+                      continue;
-+              }
-+
-+              /*
-+               * Attempt to group consecutive IRQ numbers having the
-+               * same virtualization settings in a single line.
-+               */
-+
-+              _irq = irq;
-+
-+              while (++_irq < IPIPE_NR_IRQS) {
-+                      if (ipipe_virtual_irq_p(_irq) !=
-+                          ipipe_virtual_irq_p(irq)
-+                          || (ipipe_virtual_irq_p(_irq)
-+                              && !test_bit(_irq - IPIPE_VIRQ_BASE,
-+                                           &__ipipe_virtual_irq_map))
-+                          || ctlbits != (ipd->irqs[_irq].
-+                           control & (IPIPE_HANDLE_MASK |
-+                                      IPIPE_PASS_MASK |
-+                                      IPIPE_STICKY_MASK)))
-+                              break;
-+              }
-+
-+              if (_irq == irq + 1)
-+                      p += sprintf(p, "irq%u: ", irq);
-+              else
-+                      p += sprintf(p, "irq%u-%u: ", irq, _irq - 1);
-+
-+              /*
-+               * Statuses are as follows:
-+               * o "accepted" means handled _and_ passed down the pipeline.
-+               * o "grabbed" means handled, but the interrupt might be
-+               * terminated _or_ passed down the pipeline depending on
-+               * what the domain handler asks for to the I-pipe.
-+               * o "passed" means unhandled by the domain but passed
-+               * down the pipeline.
-+               * o "discarded" means unhandled and _not_ passed down the
-+               * pipeline. The interrupt merely disappears from the
-+               * current domain down to the end of the pipeline.
-+               */
-+              if (ctlbits & IPIPE_HANDLE_MASK) {
-+                      if (ctlbits & IPIPE_PASS_MASK)
-+                              p += sprintf(p, "accepted");
-+                      else
-+                              p += sprintf(p, "grabbed");
-+              } else if (ctlbits & IPIPE_PASS_MASK)
-+                      p += sprintf(p, "passed");
-+              else
-+                      p += sprintf(p, "discarded");
-+
-+              if (ctlbits & IPIPE_STICKY_MASK)
-+                      p += sprintf(p, ", sticky");
-+
-+              if (ipipe_virtual_irq_p(irq))
-+                      p += sprintf(p, ", virtual");
-+
-+              p += sprintf(p, "\n");
-+
-+              irq = _irq;
-+      }
-+
-+      spin_unlock(&__ipipe_pipelock);
-+
-+      len = p - page;
-+
-+      if (len <= off + count)
-+              *eof = 1;
-+
-+      *start = page + off;
-+
-+      len -= off;
-+
-+      if (len > count)
-+              len = count;
-+
-+      if (len < 0)
-+              len = 0;
-+
-+      return len;
-+}
-+
-+#ifdef CONFIG_IPIPE_STATS
-+
-+static int __ipipe_stat_info_proc(char *page,
-+                                char **start,
-+                                off_t off, int count, int *eof, void *data)
-+{
-+      struct ipipe_domain *ipd = (struct ipipe_domain *)data;
-+      int len = 0, cpu, irq;
-+      char *p = page;
-+
-+      p += sprintf(p,"> STALL TIME:\n");
-+
-+      for_each_online_cpu(cpu) {
-+              unsigned long eip = ipd->stats[cpu].max_stall_eip;
-+              char namebuf[KSYM_NAME_LEN+1];
-+              unsigned long offset, size, t;
-+              const char *name;
-+              char *modname;
-+
-+              name = kallsyms_lookup(eip, &size, &offset, &modname, namebuf);
-+              t = ipipe_tsc2ns(ipd->stats[cpu].max_stall_time);
-+
-+              if (name) {
-+                      if (modname)
-+                              p += sprintf(p,"CPU%d  %12lu  (%s+%#lx [%s])\n",
-+                                           cpu,t,name,offset,modname);
-+                      else
-+                              p += sprintf(p,"CPU%d  %12lu  (%s+%#lx)\n",
-+                                           cpu,t,name,offset);
-+              }
-+              else
-+                      p += sprintf(p,"CPU%d  %12lu  (%lx)\n",
-+                                   cpu,t,eip);
-+      }
-+
-+      p += sprintf(p,"> PROPAGATION TIME:\nIRQ");
-+
-+      for_each_online_cpu(cpu) {
-+              p += sprintf(p,"         CPU%d",cpu);
-+      }
-+
-+      for (irq = 0; irq < IPIPE_NR_IRQS; irq++) {
-+
-+              unsigned long long t = 0;
-+
-+              for_each_online_cpu(cpu) {
-+                      t += ipd->stats[cpu].irq_stats[irq].max_delivery_time;
-+              }
-+
-+              if (!t)
-+                      continue;
-+
-+              p += sprintf(p,"\n%3d:",irq);
-+
-+              for_each_online_cpu(cpu) {
-+                      p += sprintf(p,"%13lu",
-+                                   
ipipe_tsc2ns(ipd->stats[cpu].irq_stats[irq].max_delivery_time));
-+              }
-+      }
-+
-+      p += sprintf(p,"\n");
-+
-+      len = p - page - off;
-+      if (len <= off + count) *eof = 1;
-+      *start = page + off;
-+      if (len > count) len = count;
-+      if (len < 0) len = 0;
-+
-+      return len;
-+}
-+
-+#endif /* CONFIG_IPIPE_STATS */
-+
-+void __ipipe_add_domain_proc(struct ipipe_domain *ipd)
-+{
-+
-+      
create_proc_read_entry(ipd->name,0444,ipipe_proc_root,&__ipipe_common_info_proc,ipd);
-+#ifdef CONFIG_IPIPE_STATS
-+      {
-+              char name[64];
-+              snprintf(name,sizeof(name),"%s_stats",ipd->name);
-+              
create_proc_read_entry(name,0444,ipipe_proc_root,&__ipipe_stat_info_proc,ipd);
-+      }
-+#endif /* CONFIG_IPIPE_STATS */
-+}
-+
-+void __ipipe_remove_domain_proc(struct ipipe_domain *ipd)
-+{
-+      remove_proc_entry(ipd->name,ipipe_proc_root);
-+#ifdef CONFIG_IPIPE_STATS
-+      {
-+              char name[64];
-+              snprintf(name,sizeof(name),"%s_stats",ipd->name);
-+              remove_proc_entry(name,ipipe_proc_root);
-+      }
-+#endif /* CONFIG_IPIPE_STATS */
-+}
-+
-+void ipipe_init_proc(void)
-+{
-+      ipipe_proc_root = create_proc_entry("ipipe",S_IFDIR, 0);
-+      
create_proc_read_entry("version",0444,ipipe_proc_root,&__ipipe_version_info_proc,NULL);
-+      __ipipe_add_domain_proc(ipipe_root_domain);
-+}
-+
-+#endif        /* CONFIG_PROC_FS */
-+
-+EXPORT_SYMBOL(ipipe_suspend_domain);
-+EXPORT_SYMBOL(ipipe_alloc_virq);
-+EXPORT_SYMBOL(ipipe_unstall_pipeline_from);
-+EXPORT_SYMBOL(ipipe_percpu_domain);
-+EXPORT_SYMBOL(ipipe_root_domain);
-+EXPORT_SYMBOL(__ipipe_unstall_root);
-+EXPORT_SYMBOL(__ipipe_stall_root);
-+EXPORT_SYMBOL(__ipipe_restore_root);
-+EXPORT_SYMBOL(__ipipe_test_and_stall_root);
-+EXPORT_SYMBOL(__ipipe_test_root);
-+EXPORT_SYMBOL(__ipipe_dispatch_event);
-+EXPORT_SYMBOL(__ipipe_pipeline);
-+EXPORT_SYMBOL(__ipipe_pipelock);
-+EXPORT_SYMBOL(__ipipe_virtual_irq_map);
-diff -uNrp 2.6.13/kernel/ipipe/generic.c 2.6.13-ipipe/kernel/ipipe/generic.c
---- 2.6.13/kernel/ipipe/generic.c      1970-01-01 01:00:00.000000000 +0100
-+++ 2.6.13-ipipe/kernel/ipipe/generic.c        2005-10-16 23:28:19.000000000 
+0200
-@@ -0,0 +1,390 @@
-+/* -*- linux-c -*-
-+ * linux/kernel/ipipe/generic.c
-+ *
-+ * Copyright (C) 2002-2005 Philippe Gerum.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ * USA; either version 2 of the License, or (at your option) any later
-+ * version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * Architecture-independent I-PIPE services.
-+ */
-+
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#ifdef CONFIG_PROC_FS
-+#include <linux/proc_fs.h>
-+#endif        /* CONFIG_PROC_FS */
-+
-+MODULE_DESCRIPTION("I-pipe");
-+MODULE_LICENSE("GPL");
-+
-+static int __ipipe_ptd_key_count;
-+
-+static unsigned long __ipipe_ptd_key_map;
-+
-+/* ipipe_register_domain() -- Link a new domain to the pipeline. */
-+
-+int ipipe_register_domain(struct ipipe_domain *ipd,
-+                        struct ipipe_domain_attr *attr)
-+{
-+      struct list_head *pos;
-+      unsigned long flags;
-+
-+      if (ipipe_current_domain != ipipe_root_domain) {
-+              printk(KERN_WARNING
-+                     "I-pipe: Only the root domain may register a new 
domain.\n");
-+              return -EPERM;
-+      }
-+
-+      flags = ipipe_critical_enter(NULL);
-+
-+      list_for_each(pos, &__ipipe_pipeline) {
-+              struct ipipe_domain *_ipd =
-+                      list_entry(pos, struct ipipe_domain, p_link);
-+              if (_ipd->domid == attr->domid)
-+                      break;
-+      }
-+
-+      ipipe_critical_exit(flags);
-+
-+      if (pos != &__ipipe_pipeline)
-+              /* A domain with the given id already exists -- fail. */
-+              return -EBUSY;
-+
-+      ipd->name = attr->name;
-+      ipd->priority = attr->priority;
-+      ipd->domid = attr->domid;
-+      ipd->pdd = attr->pdd;
-+      ipd->flags = 0;
-+
-+#ifdef CONFIG_IPIPE_STATS
-+      {
-+              int cpu, irq;
-+              for_each_online_cpu(cpu) {
-+                      ipd->stats[cpu].last_stall_date = 0LL;
-+                      for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
-+                              
ipd->stats[cpu].irq_stats[irq].last_receipt_date = 0LL;
-+              }
-+      }
-+#endif /* CONFIG_IPIPE_STATS */
-+
-+      __ipipe_init_stage(ipd);
-+
-+      INIT_LIST_HEAD(&ipd->p_link);
-+
-+#ifdef CONFIG_PROC_FS
-+      __ipipe_add_domain_proc(ipd);
-+#endif /* CONFIG_PROC_FS */
-+
-+      flags = ipipe_critical_enter(NULL);
-+
-+      list_for_each(pos, &__ipipe_pipeline) {
-+              struct ipipe_domain *_ipd =
-+                      list_entry(pos, struct ipipe_domain, p_link);
-+              if (ipd->priority > _ipd->priority)
-+                      break;
-+      }
-+
-+      list_add_tail(&ipd->p_link, pos);
-+
-+      ipipe_critical_exit(flags);
-+
-+      printk(KERN_WARNING "I-pipe: Domain %s registered.\n", ipd->name);
-+
-+      /*
-+       * Finally, allow the new domain to perform its initialization
-+       * chores.
-+       */
-+
-+      if (attr->entry != NULL) {
-+              ipipe_declare_cpuid;
-+
-+              ipipe_lock_cpu(flags);
-+
-+              ipipe_percpu_domain[cpuid] = ipd;
-+              attr->entry();
-+              ipipe_percpu_domain[cpuid] = ipipe_root_domain;
-+
-+              ipipe_load_cpuid();     /* Processor might have changed. */
-+
-+              if (ipipe_root_domain->cpudata[cpuid].irq_pending_hi != 0 &&
-+                  !test_bit(IPIPE_STALL_FLAG,
-+                            &ipipe_root_domain->cpudata[cpuid].status))
-+                      __ipipe_sync_stage(IPIPE_IRQMASK_ANY);
-+
-+              ipipe_unlock_cpu(flags);
-+      }
-+
-+      return 0;
-+}
-+
-+/* ipipe_unregister_domain() -- Remove a domain from the pipeline. */
-+
-+int ipipe_unregister_domain(struct ipipe_domain *ipd)
-+{
-+      unsigned long flags;
-+
-+      if (ipipe_current_domain != ipipe_root_domain) {
-+              printk(KERN_WARNING
-+                     "I-pipe: Only the root domain may unregister a 
domain.\n");
-+              return -EPERM;
-+      }
-+
-+      if (ipd == ipipe_root_domain) {
-+              printk(KERN_WARNING
-+                     "I-pipe: Cannot unregister the root domain.\n");
-+              return -EPERM;
-+      }
-+#ifdef CONFIG_SMP
-+      {
-+              int nr_cpus = num_online_cpus(), _cpuid;
-+              unsigned irq;
-+
-+              /*
-+               * In the SMP case, wait for the logged events to drain on
-+               * other processors before eventually removing the domain
-+               * from the pipeline.
-+               */
-+
-+              ipipe_unstall_pipeline_from(ipd);
-+
-+              flags = ipipe_critical_enter(NULL);
-+
-+              for (irq = 0; irq < IPIPE_NR_IRQS; irq++) {
-+                      clear_bit(IPIPE_HANDLE_FLAG, &ipd->irqs[irq].control);
-+                      clear_bit(IPIPE_STICKY_FLAG, &ipd->irqs[irq].control);
-+                      set_bit(IPIPE_PASS_FLAG, &ipd->irqs[irq].control);
-+              }
-+
-+              ipipe_critical_exit(flags);
-+
-+              for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)
-+                      for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
-+                              while (ipd->cpudata[_cpuid].irq_hits[irq] > 0)
-+                                      cpu_relax();
-+      }
-+#endif        /* CONFIG_SMP */
-+
-+#ifdef CONFIG_PROC_FS
-+      __ipipe_remove_domain_proc(ipd);
-+#endif /* CONFIG_PROC_FS */
-+
-+      /*
-+       * Simply remove the domain from the pipeline and we are almost done.
-+       */
-+
-+      flags = ipipe_critical_enter(NULL);
-+      list_del_init(&ipd->p_link);
-+      ipipe_critical_exit(flags);
-+
-+      __ipipe_cleanup_domain(ipd);
-+
-+      printk(KERN_WARNING "I-pipe: Domain %s unregistered.\n", ipd->name);
-+
-+      return 0;
-+}
-+
-+/*
-+ * ipipe_propagate_irq() -- Force a given IRQ propagation on behalf of
-+ * a running interrupt handler to the next domain down the pipeline.
-+ * ipipe_schedule_irq() -- Does almost the same as above, but attempts
-+ * to pend the interrupt for the current domain first.
-+ */
-+int fastcall __ipipe_schedule_irq(unsigned irq, struct list_head *head)
-+{
-+      struct list_head *ln;
-+      unsigned long flags;
-+      ipipe_declare_cpuid;
-+
-+      if (irq >= IPIPE_NR_IRQS ||
-+          (ipipe_virtual_irq_p(irq)
-+           && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
-+              return -EINVAL;
-+
-+      ipipe_lock_cpu(flags);
-+
-+      ln = head;
-+
-+      while (ln != &__ipipe_pipeline) {
-+              struct ipipe_domain *ipd =
-+                      list_entry(ln, struct ipipe_domain, p_link);
-+
-+              if (test_bit(IPIPE_HANDLE_FLAG, &ipd->irqs[irq].control)) {
-+                      ipd->cpudata[cpuid].irq_hits[irq]++;
-+                      __ipipe_set_irq_bit(ipd, cpuid, irq);
-+                      ipipe_mark_irq_receipt(ipd, irq, cpuid);
-+                      ipipe_unlock_cpu(flags);
-+                      return 1;
-+              }
-+
-+              ln = ipd->p_link.next;
-+      }
-+
-+      ipipe_unlock_cpu(flags);
-+
-+      return 0;
-+}
-+
-+/* ipipe_free_virq() -- Release a virtual/soft interrupt. */
-+
-+int ipipe_free_virq(unsigned virq)
-+{
-+      if (!ipipe_virtual_irq_p(virq))
-+              return -EINVAL;
-+
-+      clear_bit(virq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map);
-+
-+      return 0;
-+}
-+
-+void ipipe_init_attr(struct ipipe_domain_attr *attr)
-+{
-+      attr->name = "anon";
-+      attr->domid = 1;
-+      attr->entry = NULL;
-+      attr->priority = IPIPE_ROOT_PRIO;
-+      attr->pdd = NULL;
-+}
-+
-+/*
-+ * ipipe_catch_event() -- Interpose or remove an event handler for a
-+ * given domain.
-+ */
-+int ipipe_catch_event(struct ipipe_domain *ipd,
-+                    unsigned event,
-+                    int (*handler)(unsigned event, struct ipipe_domain *ipd, 
void *data))
-+{
-+      if (event >= IPIPE_NR_EVENTS)
-+              return -EINVAL;
-+
-+      if (!xchg(&ipd->evhand[event],handler)) {
-+              if (handler)
-+                      __ipipe_event_monitors[event]++;
-+      }
-+      else if (!handler)
-+              __ipipe_event_monitors[event]--;
-+
-+      return 0;
-+}
-+
-+cpumask_t ipipe_set_irq_affinity (unsigned irq, cpumask_t cpumask)
-+{
-+#ifdef CONFIG_SMP
-+      if (irq >= IPIPE_NR_XIRQS)
-+              /* Allow changing affinity of external IRQs only. */
-+              return CPU_MASK_NONE;
-+
-+      if (num_online_cpus() > 1)
-+              /* Allow changing affinity of external IRQs only. */
-+              return __ipipe_set_irq_affinity(irq,cpumask);
-+#endif /* CONFIG_SMP */
-+
-+      return CPU_MASK_NONE;
-+}
-+
-+int fastcall ipipe_send_ipi (unsigned ipi, cpumask_t cpumask)
-+
-+{
-+#ifdef CONFIG_SMP
-+      switch (ipi) {
-+
-+      case IPIPE_SERVICE_IPI0:
-+      case IPIPE_SERVICE_IPI1:
-+      case IPIPE_SERVICE_IPI2:
-+      case IPIPE_SERVICE_IPI3:
-+
-+              break;
-+
-+      default:
-+
-+              return -EINVAL;
-+      }
-+
-+      return __ipipe_send_ipi(ipi,cpumask);
-+#endif /* CONFIG_SMP */
-+
-+      return -EINVAL;
-+}
-+
-+int ipipe_alloc_ptdkey (void)
-+{
-+      unsigned long flags;
-+      int key = -1;
-+
-+      spin_lock_irqsave_hw(&__ipipe_pipelock,flags);
-+
-+      if (__ipipe_ptd_key_count < IPIPE_ROOT_NPTDKEYS) {
-+              key = ffz(__ipipe_ptd_key_map);
-+              set_bit(key,&__ipipe_ptd_key_map);
-+              __ipipe_ptd_key_count++;
-+      }
-+
-+      spin_unlock_irqrestore_hw(&__ipipe_pipelock,flags);
-+
-+      return key;
-+}
-+
-+int ipipe_free_ptdkey (int key)
-+{
-+      unsigned long flags;
-+
-+      if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS)
-+              return -EINVAL;
-+
-+      spin_lock_irqsave_hw(&__ipipe_pipelock,flags);
-+
-+      if (test_and_clear_bit(key,&__ipipe_ptd_key_map))
-+              __ipipe_ptd_key_count--;
-+
-+      spin_unlock_irqrestore_hw(&__ipipe_pipelock,flags);
-+
-+      return 0;
-+}
-+
-+int fastcall ipipe_set_ptd (int key, void *value)
-+
-+{
-+      if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS)
-+              return -EINVAL;
-+
-+      current->ptd[key] = value;
-+
-+      return 0;
-+}
-+
-+void fastcall *ipipe_get_ptd (int key)
-+
-+{
-+      if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS)
-+              return NULL;
-+
-+      return current->ptd[key];
-+}
-+
-+EXPORT_SYMBOL(ipipe_register_domain);
-+EXPORT_SYMBOL(ipipe_unregister_domain);
-+EXPORT_SYMBOL(ipipe_free_virq);
-+EXPORT_SYMBOL(ipipe_init_attr);
-+EXPORT_SYMBOL(ipipe_catch_event);
-+EXPORT_SYMBOL(ipipe_alloc_ptdkey);
-+EXPORT_SYMBOL(ipipe_free_ptdkey);
-+EXPORT_SYMBOL(ipipe_set_ptd);
-+EXPORT_SYMBOL(ipipe_get_ptd);
-+EXPORT_SYMBOL(ipipe_set_irq_affinity);
-+EXPORT_SYMBOL(ipipe_send_ipi);
-+EXPORT_SYMBOL(__ipipe_schedule_irq);
-diff -uNrp 2.6.13/kernel/irq/handle.c 2.6.13-ipipe/kernel/irq/handle.c
---- 2.6.13/kernel/irq/handle.c 2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/kernel/irq/handle.c   2005-10-11 14:12:16.000000000 +0200
-@@ -81,6 +81,17 @@ fastcall int handle_IRQ_event(unsigned i
- {
-       int ret, retval = 0, status = 0;
- 
-+#ifdef CONFIG_IPIPE
-+      /*
-+       * If processing a timer tick, pass the original regs as
-+       * collected during preemption and not our phony - always
-+       * kernel-originated - frame, so that we don't wreck the
-+       * profiling code.
-+       */
-+      if (__ipipe_tick_irq == irq)
-+              regs = __ipipe_tick_regs + smp_processor_id();
-+#endif /* CONFIG_IPIPE */
-+
-       if (!(action->flags & SA_INTERRUPT))
-               local_irq_enable();
- 
-@@ -117,14 +128,18 @@ fastcall unsigned int __do_IRQ(unsigned 
-               /*
-                * No locking required for CPU-local interrupts:
-                */
-+#ifndef CONFIG_IPIPE
-               desc->handler->ack(irq);
-+#endif /* CONFIG_IPIPE */
-               action_ret = handle_IRQ_event(irq, regs, desc->action);
-               desc->handler->end(irq);
-               return 1;
-       }
- 
-       spin_lock(&desc->lock);
-+#ifndef CONFIG_IPIPE
-       desc->handler->ack(irq);
-+#endif /* CONFIG_IPIPE */
-       /*
-        * REPLAY is when Linux resends an IRQ that was dropped earlier
-        * WAITING is used by probe to mark irqs that are being tested
-diff -uNrp 2.6.13/kernel/printk.c 2.6.13-ipipe/kernel/printk.c
---- 2.6.13/kernel/printk.c     2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/kernel/printk.c       2005-09-07 13:24:50.000000000 +0200
-@@ -502,6 +502,66 @@ __setup("time", printk_time_setup);
-  * is inspected when the actual printing occurs.
-  */
- 
-+#ifdef CONFIG_IPIPE
-+
-+static raw_spinlock_t __ipipe_printk_lock = RAW_SPIN_LOCK_UNLOCKED;
-+
-+static int __ipipe_printk_fill;
-+
-+static char __ipipe_printk_buf[__LOG_BUF_LEN];
-+
-+void __ipipe_flush_printk (unsigned virq)
-+{
-+      char *p = __ipipe_printk_buf;
-+      int out = 0, len;
-+
-+      clear_bit(IPIPE_PPRINTK_FLAG,&ipipe_root_domain->flags);
-+
-+      while (out < __ipipe_printk_fill) {
-+              len = strlen(p) + 1;
-+              printk("%s",p);
-+              p += len;
-+              out += len;
-+      }
-+      __ipipe_printk_fill = 0;
-+}
-+
-+asmlinkage int printk(const char *fmt, ...)
-+{
-+      unsigned long flags;
-+      int r, fbytes;
-+      va_list args;
-+
-+      va_start(args, fmt);
-+
-+      if (ipipe_current_domain == ipipe_root_domain ||
-+          test_bit(IPIPE_SPRINTK_FLAG,&ipipe_current_domain->flags) ||
-+          oops_in_progress) {
-+              r = vprintk(fmt, args);
-+              goto out;
-+      }
-+
-+      spin_lock_irqsave_hw(&__ipipe_printk_lock,flags);
-+
-+      fbytes = __LOG_BUF_LEN - __ipipe_printk_fill;
-+
-+      if (fbytes > 1) {
-+              r = vscnprintf(__ipipe_printk_buf + __ipipe_printk_fill,
-+                             fbytes, fmt, args) + 1; /* account for the null 
byte */
-+              __ipipe_printk_fill += r;
-+      } else
-+              r = 0;
-+
-+      spin_unlock_irqrestore_hw(&__ipipe_printk_lock,flags);
-+
-+      if (!test_and_set_bit(IPIPE_PPRINTK_FLAG,&ipipe_root_domain->flags))
-+              ipipe_trigger_irq(__ipipe_printk_virq);
-+out: 
-+      va_end(args);
-+
-+      return r;
-+}
-+#else /* !CONFIG_IPIPE */
- asmlinkage int printk(const char *fmt, ...)
- {
-       va_list args;
-@@ -513,6 +573,7 @@ asmlinkage int printk(const char *fmt, .
- 
-       return r;
- }
-+#endif /* CONFIG_IPIPE */
- 
- asmlinkage int vprintk(const char *fmt, va_list args)
- {
-diff -uNrp 2.6.13/kernel/sched.c 2.6.13-ipipe/kernel/sched.c
---- 2.6.13/kernel/sched.c      2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/kernel/sched.c        2005-10-11 18:17:29.000000000 +0200
-@@ -2907,6 +2907,8 @@ switch_tasks:
-               prepare_task_switch(rq, next);
-               prev = context_switch(rq, prev, next);
-               barrier();
-+              if (task_hijacked(prev))
-+                  return;
-               /*
-                * this_rq must be evaluated again because prev may have moved
-                * CPUs since it called schedule(), thus the 'rq' on its stack
-@@ -2939,6 +2941,11 @@ asmlinkage void __sched preempt_schedule
-       struct task_struct *task = current;
-       int saved_lock_depth;
- #endif
-+#ifdef CONFIG_IPIPE
-+      /* Do not reschedule over non-Linux domains. */
-+      if (ipipe_current_domain != ipipe_root_domain)
-+              return;
-+#endif /* CONFIG_IPIPE */
-       /*
-        * If there is a non-zero preempt_count or interrupts are disabled,
-        * we do not want to preempt the current task.  Just return..
-@@ -3563,6 +3570,7 @@ recheck:
-               deactivate_task(p, rq);
-       oldprio = p->prio;
-       __setscheduler(p, policy, param->sched_priority);
-+      ipipe_setsched_notify(p);
-       if (array) {
-               __activate_task(p, rq);
-               /*
-@@ -5263,3 +5271,53 @@ void normalize_rt_tasks(void)
- }
- 
- #endif /* CONFIG_MAGIC_SYSRQ */
-+
-+#ifdef CONFIG_IPIPE
-+
-+int ipipe_setscheduler_root (struct task_struct *p, int policy, int prio)
-+{
-+      prio_array_t *array;
-+      unsigned long flags;
-+      runqueue_t *rq;
-+      int oldprio;
-+
-+      if (prio < 1 || prio > MAX_RT_PRIO-1)
-+              return -EINVAL;
-+
-+      rq = task_rq_lock(p, &flags);
-+      array = p->array;
-+      if (array)
-+              deactivate_task(p, rq);
-+      oldprio = p->prio;
-+      __setscheduler(p, policy, prio);
-+      if (array) {
-+              __activate_task(p, rq);
-+              if (task_running(rq, p)) {
-+                      if (p->prio > oldprio)
-+                              resched_task(rq->curr);
-+              } else if (TASK_PREEMPTS_CURR(p, rq))
-+                      resched_task(rq->curr);
-+      }
-+      task_rq_unlock(rq, &flags);
-+
-+      return 0;
-+}
-+
-+EXPORT_SYMBOL(ipipe_setscheduler_root);
-+
-+int ipipe_reenter_root (struct task_struct *prev, int policy, int prio)
-+{
-+      finish_task_switch(this_rq(), prev);
-+      if (reacquire_kernel_lock(current) < 0)
-+              ;
-+      preempt_enable_no_resched();
-+
-+      if (current->policy != policy || current->rt_priority != prio)
-+              return ipipe_setscheduler_root(current,policy,prio);
-+
-+      return 0;
-+}
-+
-+EXPORT_SYMBOL(ipipe_reenter_root);
-+
-+#endif /* CONFIG_IPIPE */
-diff -uNrp 2.6.13/kernel/signal.c 2.6.13-ipipe/kernel/signal.c
---- 2.6.13/kernel/signal.c     2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/kernel/signal.c       2005-09-07 13:24:50.000000000 +0200
-@@ -612,6 +612,7 @@ void signal_wake_up(struct task_struct *
-       unsigned int mask;
- 
-       set_tsk_thread_flag(t, TIF_SIGPENDING);
-+      ipipe_sigwake_notify(t); /* TIF_SIGPENDING must be set first. */
- 
-       /*
-        * For SIGKILL, we want to wake it up in the stopped/traced case.
-diff -uNrp 2.6.13/lib/smp_processor_id.c 2.6.13-ipipe/lib/smp_processor_id.c
---- 2.6.13/lib/smp_processor_id.c      2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/lib/smp_processor_id.c        2005-09-07 13:24:50.000000000 
+0200
-@@ -12,6 +12,11 @@ unsigned int debug_smp_processor_id(void
-       int this_cpu = raw_smp_processor_id();
-       cpumask_t this_mask;
- 
-+#ifdef CONFIG_IPIPE
-+      if (ipipe_current_domain != ipipe_root_domain)
-+          return this_cpu;
-+#endif /* CONFIG_IPIPE */
-+
-       if (likely(preempt_count))
-               goto out;
- 
-diff -uNrp 2.6.13/mm/vmalloc.c 2.6.13-ipipe/mm/vmalloc.c
---- 2.6.13/mm/vmalloc.c        2005-08-29 01:41:01.000000000 +0200
-+++ 2.6.13-ipipe/mm/vmalloc.c  2005-10-14 14:25:36.000000000 +0200
-@@ -18,6 +18,7 @@
- 
- #include <asm/uaccess.h>
- #include <asm/tlbflush.h>
-+#include <asm/pgalloc.h>
- 
- 
- DEFINE_RWLOCK(vmlist_lock);
-@@ -148,10 +149,14 @@ int map_vm_area(struct vm_struct *area, 
-       pgd = pgd_offset_k(addr);
-       spin_lock(&init_mm.page_table_lock);
-       do {
-+              pgd_t oldpgd;
-+              memcpy(&oldpgd,pgd,sizeof(pgd_t));
-               next = pgd_addr_end(addr, end);
-               err = vmap_pud_range(pgd, addr, next, prot, pages);
-               if (err)
-                       break;
-+              if (pgd_val(oldpgd) != pgd_val(*pgd))
-+                      set_pgdir(addr, *pgd);
-       } while (pgd++, addr = next, addr != end);
-       spin_unlock(&init_mm.page_table_lock);
-       flush_cache_vmap((unsigned long) area->addr, end);
diff -Nru --exclude=.svn 
xenomai-orig/arch/ppc/patches/adeos-linux-2.6.10-ppc-r8c4.patch 
xenomai-devel/arch/ppc/patches/adeos-linux-2.6.10-ppc-r8c4.patch
--- xenomai-orig/arch/ppc/patches/adeos-linux-2.6.10-ppc-r8c4.patch     
2005-10-11 10:32:16.000000000 +0300
+++ xenomai-devel/arch/ppc/patches/adeos-linux-2.6.10-ppc-r8c4.patch    
1970-01-01 02:00:00.000000000 +0200
@@ -1,5262 +0,0 @@
-diff -uNrp linux-2.6.10/Documentation/adeos.txt 
linux-2.6.10-ppc-adeos/Documentation/adeos.txt
---- linux-2.6.10/Documentation/adeos.txt       1970-01-01 01:00:00.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/Documentation/adeos.txt     2004-11-24 
12:33:17.000000000 +0100
-@@ -0,0 +1,176 @@
-+
-+The Adeos nanokernel is based on research and publications made in the
-+early '90s on the subject of nanokernels. Our basic method was to
-+reverse the approach described in most of the papers on the subject.
-+Instead of first building the nanokernel and then building the client
-+OSes, we started from a live and known-to-be-functional OS, Linux, and
-+inserted a nanokernel beneath it. Starting from Adeos, other client
-+OSes can now be put side-by-side with the Linux kernel.
-+
-+To this end, Adeos enables multiple domains to exist simultaneously on
-+the same hardware. None of these domains see each other, but all of
-+them see Adeos. A domain is most probably a complete OS, but there is
-+no assumption being made regarding the sophistication of what's in
-+a domain.
-+
-+To share the hardware among the different OSes, Adeos implements an
-+interrupt pipeline (ipipe). Every OS domain has an entry in the ipipe.
-+Each interrupt that comes in the ipipe is passed on to every domain
-+in the ipipe. Instead of disabling/enabling interrupts, each domain
-+in the pipeline only needs to stall/unstall his pipeline stage. If
-+an ipipe stage is stalled, then the interrupts do not progress in the
-+ipipe until that stage has been unstalled. Each stage of the ipipe
-+can, of course, decide to do a number of things with an interrupt.
-+Among other things, it can decide that it's the last recipient of the
-+interrupt. In that case, the ipipe does not propagate the interrupt
-+to the rest of the domains in the ipipe.
-+
-+Regardless of the operations being done in the ipipe, the Adeos code
-+does __not__ play with the interrupt masks. The only case where the
-+hardware masks are altered is during the addition/removal of a domain
-+from the ipipe. This also means that no OS is allowed to use the real
-+hardware cli/sti. But this is OK, since the stall/unstall calls
-+achieve the same functionality.
-+
-+Our approach is based on the following papers (links to these
-+papers are provided at the bottom of this message):
-+[1] D. Probert, J. Bruno, and M. Karzaorman. "Space: a new approach to
-+operating system abstraction." In: International Workshop on Object
-+Orientation in Operating Systems, pages 133-137, October 1991.
-+[2] D. Probert, J. Bruno. "Building fundamentally extensible application-
-+specific operating systems in Space", March 1995.
-+[3] D. Cheriton, K. Duda. "A caching model of operating system kernel
-+functionality". In: Proc. Symp. on Operating Systems Design and
-+Implementation, pages 179-194, Monterey CA (USA), 1994.
-+[4] D. Engler, M. Kaashoek, and J. O'Toole Jr. "Exokernel: an operating
-+system architecture for application-specific resource management",
-+December 1995.
-+
-+If you don't want to go fetch the complete papers, here's a summary.
-+The first 2 discuss the Space nanokernel, the 3rd discussed the cache
-+nanokernel, and the last discusses exokernel.
-+
-+The complete Adeos approach has been thoroughly documented in a whitepaper
-+published more than a year ago entitled "Adaptive Domain Environment
-+for Operating Systems" and available here: http://www.opersys.com/adeos
-+The current implementation is slightly different. Mainly, we do not
-+implement the functionality to move Linux out of ring 0. Although of
-+interest, this approach is not very portable.
-+
-+Instead, our patch taps right into Linux's main source of control
-+over the hardware, the interrupt dispatching code, and inserts an
-+interrupt pipeline which can then serve all the nanokernel's clients,
-+including Linux.
-+
-+This is not a novelty in itself. Other OSes have been modified in such
-+a way for a wide range of purposes. One of the most interesting
-+examples is described by Stodolsky, Chen, and Bershad in a paper
-+entitled "Fast Interrupt Priority Management in Operating System
-+Kernels" published in 1993 as part of the Usenix Microkernels and
-+Other Kernel Architectures Symposium. In that case, cli/sti were
-+replaced by virtual cli/sti which did not modify the real interrupt
-+mask in any way. Instead, interrupts were defered and delivered to
-+the OS upon a call to the virtualized sti.
-+
-+Mainly, this resulted in increased performance for the OS. Although
-+we haven't done any measurements on Linux's interrupt handling
-+performance with Adeos, our nanokernel includes by definition the
-+code implementing the technique described in the abovementioned
-+Stodolsky paper, which we use to redirect the hardware interrupt flow
-+to the pipeline.
-+
-+i386 and armnommu are currently supported. Most of the
-+architecture-dependent code is easily portable to other architectures.
-+
-+Aside of adding the Adeos module (driver/adeos), we also modified some
-+files to tap into Linux interrupt and system event dispatching (all
-+the modifications are encapsulated in #ifdef CONFIG_ADEOS_*/#endif).
-+
-+We modified the idle task so it gives control back to Adeos in order for
-+the ipipe to continue propagation.
-+
-+We modified init/main.c to initialize Adeos very early in the startup.
-+
-+Of course, we also added the appropriate makefile modifications and
-+config options so that you can choose to enable/disable Adeos as
-+part of the kernel build configuration.
-+
-+Adeos' public API is fully documented here:
-+http://www.freesoftware.fsf.org/adeos/doc/api/index.html.
-+
-+In Linux's case, adeos_register_domain() is called very early during
-+system startup.
-+
-+To add your domain to the ipipe, you need to:
-+1) Register your domain with Adeos using adeos_register_domain()
-+2) Call adeos_virtualize_irq() for all the IRQs you wish to be
-+notified about in the ipipe.
-+
-+That's it. Provided you gave Adeos appropriate handlers in step
-+#2, your interrupts will be delivered via the ipipe.
-+
-+During runtime, you may change your position in the ipipe using
-+adeos_renice_domain(). You may also stall/unstall the pipeline
-+and change the ipipe's handling of the interrupts according to your
-+needs.
-+
-+Adeos supports SMP, and APIC support on UP.
-+
-+Here are some of the possible uses for Adeos (this list is far
-+from complete):
-+1) Much like User-Mode Linux, it should now be possible to have 2
-+Linux kernels living side-by-side on the same hardware. In contrast
-+to UML, this would not be 2 kernels one ontop of the other, but
-+really side-by-side. Since Linux can be told at boot time to use
-+only one portion of the available RAM, on a 128MB machine this
-+would mean that the first could be made to use the 0-64MB space and
-+the second would use the 64-128MB space. We realize that many
-+modifications are required. Among other things, one of the 2 kernels
-+will not need to conduct hardware initialization. Nevertheless, this
-+possibility should be studied closer.
-+
-+2) It follows from #1 that adding other kernels beside Linux should
-+be feasible. BSD is a prime candidate, but it would also be nice to
-+see what virtualizers such as VMWare and Plex86 could do with Adeos.
-+Proprietary operating systems could potentially also be accomodated.
-+
-+3) All the previous work that has been done on nanokernels should now
-+be easily ported to Linux. Mainly, we would be very interested to
-+hear about extensions to Adeos. Primarily, we have no mechanisms
-+currently enabling multiple domains to share information. The papers
-+mentioned earlier provide such mechanisms, but we'd like to see
-+actual practical examples.
-+
-+4) Kernel debuggers' main problem (tapping into the kernel's
-+interrupts) is solved and it should then be possible to provide
-+patchless kernel debuggers. They would then become loadable kernel
-+modules.
-+
-+5) Drivers who require absolute priority and dislike other kernel
-+portions who use cli/sti can now create a domain of their own
-+and place themselves before Linux in the ipipe. This provides a
-+mechanism for the implementation of systems that can provide guaranteed
-+realtime response.
-+
-+Philippe Gerum <[EMAIL PROTECTED]>
-+Karim Yaghmour <[EMAIL PROTECTED]>
-+
-+----------------------------------------------------------------------
-+Links to papers:
-+1-
-+http://citeseer.nj.nec.com/probert91space.html
-+ftp://ftp.cs.ucsb.edu/pub/papers/space/iwooos91.ps.gz (not working)
-+http://www4.informatik.uni-erlangen.de/~tsthiel/Papers/Space-iwooos91.ps.gz
-+
-+2-
-+http://www.cs.ucsb.edu/research/trcs/abstracts/1995-06.shtml
-+http://www4.informatik.uni-erlangen.de/~tsthiel/Papers/Space-trcs95-06.ps.gz
-+
-+3-
-+http://citeseer.nj.nec.com/kenneth94caching.html
-+http://guir.cs.berkeley.edu/projects/osprelims/papers/cachmodel-OSkernel.ps.gz
-+
-+4-
-+http://citeseer.nj.nec.com/engler95exokernel.html
-+ftp://ftp.cag.lcs.mit.edu/multiscale/exokernel.ps.Z
-+----------------------------------------------------------------------
-diff -uNrp linux-2.6.10/Makefile linux-2.6.10-ppc-adeos/Makefile
---- linux-2.6.10/Makefile      2004-12-24 22:35:01.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/Makefile    2005-03-27 16:35:18.000000000 +0200
-@@ -558,6 +558,8 @@ export MODLIB
- ifeq ($(KBUILD_EXTMOD),)
- core-y                += kernel/ mm/ fs/ ipc/ security/ crypto/
- 
-+core-$(CONFIG_ADEOS) += adeos/
-+
- vmlinux-dirs  := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
-                    $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-                    $(net-y) $(net-m) $(libs-y) $(libs-m)))
-diff -uNrp linux-2.6.10/adeos/Kconfig linux-2.6.10-ppc-adeos/adeos/Kconfig
---- linux-2.6.10/adeos/Kconfig 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/adeos/Kconfig       2005-09-07 15:02:32.000000000 
+0200
-@@ -0,0 +1,36 @@
-+menu "Adeos support"
-+
-+config ADEOS
-+      tristate "Adeos support"
-+      default y
-+      ---help---
-+        Activate this option if you want the Adeos nanokernel to be
-+        compiled in.
-+
-+config ADEOS_CORE
-+      def_bool ADEOS
-+
-+config ADEOS_THREADS
-+      bool "Threaded domains"
-+      depends on ADEOS
-+      default n
-+      ---help---
-+        This option causes the domains to run as lightweight
-+        threads, which is useful for having seperate stacks
-+        for them. If disabled, interrupts/events are directly
-+        processed on behalf of the preempted context. Say N if
-+        unsure.
-+
-+config ADEOS_NOTHREADS
-+      def_bool !ADEOS_THREADS
-+
-+config ADEOS_PROFILING
-+      bool "Pipeline profiling"
-+      depends on ADEOS
-+      default n
-+      ---help---
-+        This option activates the profiling code which collects the
-+        timestamps needed to measure the propagation time of
-+        interrupts through the pipeline. Say N if unsure.
-+
-+endmenu
-diff -uNrp linux-2.6.10/adeos/Makefile linux-2.6.10-ppc-adeos/adeos/Makefile
---- linux-2.6.10/adeos/Makefile        1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/adeos/Makefile      2005-05-27 19:17:37.000000000 
+0200
-@@ -0,0 +1,13 @@
-+#
-+# Makefile for the Adeos layer.
-+#
-+
-+obj-$(CONFIG_ADEOS)   += adeos.o
-+
-+adeos-objs            := generic.o
-+
-+adeos-$(CONFIG_X86)   += x86.o
-+
-+adeos-$(CONFIG_IA64)  += ia64.o
-+
-+adeos-$(CONFIG_PPC)   += ppc.o
-diff -uNrp linux-2.6.10/adeos/generic.c linux-2.6.10-ppc-adeos/adeos/generic.c
---- linux-2.6.10/adeos/generic.c       1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/adeos/generic.c     2005-07-24 19:34:54.000000000 
+0200
-@@ -0,0 +1,640 @@
-+/*
-+ *   linux/adeos/generic.c
-+ *
-+ *   Copyright (C) 2002 Philippe Gerum.
-+ *
-+ *   This program is free software; you can redistribute it and/or modify
-+ *   it under the terms of the GNU General Public License as published by
-+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ *   USA; either version 2 of the License, or (at your option) any later
-+ *   version.
-+ *
-+ *   This program is distributed in the hope that it will be useful,
-+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *   GNU General Public License for more details.
-+ *
-+ *   You should have received a copy of the GNU General Public License
-+ *   along with this program; if not, write to the Free Software
-+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, 
USA.
-+ *
-+ *   Architecture-independent ADEOS services.
-+ */
-+
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/irq.h>
-+
-+MODULE_DESCRIPTION("Adeos nanokernel");
-+MODULE_AUTHOR("Philippe Gerum");
-+MODULE_LICENSE("GPL");
-+
-+/* adeos_register_domain() -- Add a new domain to the system. All
-+   client domains must call this routine to register themselves to
-+   ADEOS before using its services. */
-+
-+int adeos_register_domain (adomain_t *adp, adattr_t *attr)
-+
-+{
-+    struct list_head *pos;
-+    unsigned long flags;
-+    int n;
-+
-+    if (adp_current != adp_root)
-+      {
-+      printk(KERN_WARNING "Adeos: Only the root domain may register a new 
domain.\n");
-+      return -EPERM;
-+      }
-+
-+    flags = adeos_critical_enter(NULL);
-+
-+    list_for_each(pos,&__adeos_pipeline) {
-+      adomain_t *_adp = list_entry(pos,adomain_t,p_link);
-+      if (_adp->domid == attr->domid)
-+            break;
-+    }
-+
-+    adeos_critical_exit(flags);
-+
-+    if (pos != &__adeos_pipeline)
-+      /* A domain with the given id already exists -- fail. */
-+      return -EBUSY;
-+
-+    for (n = 0; n < ADEOS_NR_CPUS; n++)
-+      {
-+      /* Each domain starts in sleeping state on every CPU. */
-+      adp->cpudata[n].status = (1 << IPIPE_SLEEP_FLAG);
-+#ifdef CONFIG_ADEOS_THREADS
-+      adp->estackbase[n] = 0;
-+#endif /* CONFIG_ADEOS_THREADS */
-+      }
-+
-+    adp->name = attr->name;
-+    adp->priority = attr->priority;
-+    adp->domid = attr->domid;
-+    adp->dswitch = attr->dswitch;
-+    adp->flags = 0;
-+    adp->ptd_setfun = attr->ptdset;
-+    adp->ptd_getfun = attr->ptdget;
-+    adp->ptd_keymap = 0;
-+    adp->ptd_keycount = 0;
-+    adp->ptd_keymax = attr->nptdkeys;
-+
-+    for (n = 0; n < ADEOS_NR_EVENTS; n++)
-+      /* Event handlers must be cleared before the i-pipe stage is
-+         inserted since an exception may occur on behalf of the new
-+         emerging domain. */
-+      adp->events[n].handler = NULL;
-+
-+    if (attr->entry != NULL)
-+      __adeos_init_domain(adp,attr);
-+
-+    /* Insert the domain in the interrupt pipeline last, so it won't
-+       be resumed for processing interrupts until it has a valid stack
-+       context. */
-+
-+    __adeos_init_stage(adp);
-+
-+    INIT_LIST_HEAD(&adp->p_link);
-+
-+    flags = adeos_critical_enter(NULL);
-+
-+    list_for_each(pos,&__adeos_pipeline) {
-+      adomain_t *_adp = list_entry(pos,adomain_t,p_link);
-+      if (adp->priority > _adp->priority)
-+            break;
-+    }
-+
-+    list_add_tail(&adp->p_link,pos);
-+
-+    adeos_critical_exit(flags);
-+
-+    printk(KERN_WARNING "Adeos: Domain %s registered.\n",adp->name);
-+
-+    /* Finally, allow the new domain to perform its initialization
-+       chores. */
-+
-+    if (attr->entry != NULL)
-+      {
-+      adeos_declare_cpuid;
-+
-+      adeos_lock_cpu(flags);
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+      __adeos_switch_to(adp_root,adp,cpuid);
-+#else /* !CONFIG_ADEOS_THREADS */
-+      adp_cpu_current[cpuid] = adp;
-+      attr->entry(1);
-+      adp_cpu_current[cpuid] = adp_root;
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+      adeos_load_cpuid();     /* Processor might have changed. */
-+
-+      if (adp_root->cpudata[cpuid].irq_pending_hi != 0 &&
-+          !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status))
-+          __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+
-+      adeos_unlock_cpu(flags);
-+      }
-+
-+    return 0;
-+}
-+
-+/* adeos_unregister_domain() -- Remove a domain from the system. All
-+   client domains must call this routine to unregister themselves from
-+   the ADEOS layer. */
-+
-+int adeos_unregister_domain (adomain_t *adp)
-+
-+{
-+    unsigned long flags;
-+    unsigned event;
-+
-+    if (adp_current != adp_root)
-+      {
-+      printk(KERN_WARNING "Adeos: Only the root domain may unregister a 
domain.\n");
-+      return -EPERM;
-+      }
-+
-+    if (adp == adp_root)
-+      {
-+      printk(KERN_WARNING "Adeos: Cannot unregister the root domain.\n");
-+      return -EPERM;
-+      }
-+
-+    for (event = 0; event < ADEOS_NR_EVENTS; event++)
-+      /* Need this to update the monitor count. */
-+      adeos_catch_event_from(adp,event,NULL);
-+
-+#ifdef CONFIG_SMP
-+    {
-+    int nr_cpus = num_online_cpus(), _cpuid;
-+    unsigned irq;
-+
-+    /* In the SMP case, wait for the logged events to drain on other
-+       processors before eventually removing the domain from the
-+       pipeline. */
-+
-+    adeos_unstall_pipeline_from(adp);
-+
-+    flags = adeos_critical_enter(NULL);
-+
-+    for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
-+      {
-+      clear_bit(IPIPE_HANDLE_FLAG,&adp->irqs[irq].control);
-+      clear_bit(IPIPE_STICKY_FLAG,&adp->irqs[irq].control);
-+      set_bit(IPIPE_PASS_FLAG,&adp->irqs[irq].control);
-+      }
-+
-+    adeos_critical_exit(flags);
-+
-+    for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)
-+      {
-+      for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
-+          while (adp->cpudata[_cpuid].irq_hits[irq] > 0)
-+              cpu_relax();
-+
-+      while (test_bit(IPIPE_XPEND_FLAG,&adp->cpudata[_cpuid].status))
-+          cpu_relax();
-+
-+      while (!test_bit(IPIPE_SLEEP_FLAG,&adp->cpudata[_cpuid].status))
-+           cpu_relax();
-+      }
-+    }
-+#endif /* CONFIG_SMP */
-+
-+    /* Simply remove the domain from the pipeline and we are almost
-+       done. */
-+
-+    flags = adeos_critical_enter(NULL);
-+    list_del_init(&adp->p_link);
-+    adeos_critical_exit(flags);
-+
-+    __adeos_cleanup_domain(adp);
-+
-+    printk(KERN_WARNING "Adeos: Domain %s unregistered.\n",adp->name);
-+
-+    return 0;
-+}
-+
-+/* adeos_propagate_irq() -- Force a given IRQ propagation on behalf of
-+   a running interrupt handler to the next domain down the pipeline.
-+   Returns non-zero if a domain has received the interrupt
-+   notification, zero otherwise.
-+   This call is useful for handling shared interrupts among domains.
-+   e.g. pipeline = [domain-A]---[domain-B]...
-+   Both domains share IRQ #X.
-+   - domain-A handles IRQ #X but does not pass it down (i.e. Terminate
-+   or Dynamic interrupt control mode)
-+   - domain-B handles IRQ #X (i.e. Terminate or Accept interrupt
-+   control modes).
-+   When IRQ #X is raised, domain-A's handler determines whether it
-+   should process the interrupt by identifying its source. If not,
-+   adeos_propagate_irq() is called so that the next domain down the
-+   pipeline which handles IRQ #X is given a chance to process it. This
-+   process can be repeated until the end of the pipeline is
-+   reached. */
-+
-+/* adeos_schedule_irq() -- Almost the same as adeos_propagate_irq(),
-+   but attempts to pend the interrupt for the current domain first. */
-+
-+int fastcall __adeos_schedule_irq (unsigned irq, struct list_head *head)
-+
-+{
-+    struct list_head *ln;
-+    unsigned long flags;
-+    adeos_declare_cpuid;
-+
-+    if (irq >= IPIPE_NR_IRQS ||
-+      (adeos_virtual_irq_p(irq) && !test_bit(irq - 
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map)))
-+      return -EINVAL;
-+
-+    adeos_lock_cpu(flags);
-+
-+    ln = head;
-+
-+    while (ln != &__adeos_pipeline)
-+      {
-+      adomain_t *adp = list_entry(ln,adomain_t,p_link);
-+
-+      if (test_bit(IPIPE_HANDLE_FLAG,&adp->irqs[irq].control))
-+          {
-+          adp->cpudata[cpuid].irq_hits[irq]++;
-+          __adeos_set_irq_bit(adp,cpuid,irq);
-+          adeos_unlock_cpu(flags);
-+          return 1;
-+          }
-+
-+      ln = adp->p_link.next;
-+      }
-+
-+    adeos_unlock_cpu(flags);
-+
-+    return 0;
-+}
-+
-+/* adeos_free_irq() -- Return a previously allocated virtual/soft
-+   pipelined interrupt to the pool of allocatable interrupts. */
-+
-+int adeos_free_irq (unsigned irq)
-+
-+{
-+    if (irq >= IPIPE_NR_IRQS)
-+      return -EINVAL;
-+
-+    clear_bit(irq - IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map);
-+
-+    return 0;
-+}
-+
-+cpumask_t adeos_set_irq_affinity (unsigned irq, cpumask_t cpumask)
-+
-+{
-+#ifdef CONFIG_SMP
-+     if (irq >= IPIPE_NR_XIRQS)
-+       /* Allow changing affinity of external IRQs only. */
-+       return CPU_MASK_NONE;
-+
-+     if (num_online_cpus() > 1)
-+       /* Allow changing affinity of external IRQs only. */
-+       return __adeos_set_irq_affinity(irq,cpumask);
-+#endif /* CONFIG_SMP */
-+
-+    return CPU_MASK_NONE;
-+}
-+
-+/* adeos_catch_event_from() -- Interpose an event handler starting
-+   from a given domain. */
-+
-+adevhand_t adeos_catch_event_from (adomain_t *adp, unsigned event, adevhand_t 
handler)
-+
-+{
-+    adevhand_t oldhandler;
-+
-+    if (event >= ADEOS_NR_EVENTS)
-+      return NULL;
-+
-+    if ((oldhandler = (adevhand_t)xchg(&adp->events[event].handler,handler)) 
== NULL)
-+      {
-+      if (handler)
-+          __adeos_event_monitors[event]++;
-+      }
-+    else if (!handler)
-+      __adeos_event_monitors[event]--;
-+
-+    return oldhandler;
-+}
-+
-+void adeos_init_attr (adattr_t *attr)
-+
-+{
-+    attr->name = "Anonymous";
-+    attr->domid = 1;
-+    attr->entry = NULL;
-+    attr->estacksz = 0;       /* Let ADEOS choose a reasonable stack size */
-+    attr->priority = ADEOS_ROOT_PRI;
-+    attr->dswitch = NULL;
-+    attr->nptdkeys = 0;
-+    attr->ptdset = NULL;
-+    attr->ptdget = NULL;
-+}
-+
-+int adeos_alloc_ptdkey (void)
-+
-+{
-+    unsigned long flags;
-+    int key = -1;
-+
-+    spin_lock_irqsave_hw(&__adeos_pipelock,flags);
-+
-+    if (adp_current->ptd_keycount < adp_current->ptd_keymax)
-+      {
-+      key = ffz(adp_current->ptd_keymap);
-+      set_bit(key,&adp_current->ptd_keymap);
-+      adp_current->ptd_keycount++;
-+      }
-+
-+    spin_unlock_irqrestore_hw(&__adeos_pipelock,flags);
-+
-+    return key;
-+}
-+
-+int adeos_free_ptdkey (int key)
-+
-+{
-+    unsigned long flags; 
-+
-+    if (key < 0 || key >= adp_current->ptd_keymax)
-+      return -EINVAL;
-+
-+    spin_lock_irqsave_hw(&__adeos_pipelock,flags);
-+
-+    if (test_and_clear_bit(key,&adp_current->ptd_keymap))
-+      adp_current->ptd_keycount--;
-+
-+    spin_unlock_irqrestore_hw(&__adeos_pipelock,flags);
-+
-+    return 0;
-+}
-+
-+int adeos_set_ptd (int key, void *value)
-+
-+{
-+    if (key < 0 || key >= adp_current->ptd_keymax)
-+      return -EINVAL;
-+
-+    if (!adp_current->ptd_setfun)
-+      {
-+      printk(KERN_WARNING "Adeos: No ptdset hook for %s\n",adp_current->name);
-+      return -EINVAL;
-+      }
-+
-+    adp_current->ptd_setfun(key,value);
-+
-+    return 0;
-+}
-+
-+void *adeos_get_ptd (int key)
-+
-+{
-+    if (key < 0 || key >= adp_current->ptd_keymax)
-+      return NULL;
-+
-+    if (!adp_current->ptd_getfun)
-+      {
-+      printk(KERN_WARNING "Adeos: No ptdget hook for %s\n",adp_current->name);
-+      return NULL;
-+      }
-+
-+    return adp_current->ptd_getfun(key);
-+}
-+
-+int adeos_init_mutex (admutex_t *mutex)
-+
-+{
-+    admutex_t initm = ADEOS_MUTEX_UNLOCKED;
-+    *mutex = initm;
-+    return 0;
-+}
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+
-+int adeos_destroy_mutex (admutex_t *mutex)
-+
-+{
-+    if (!adeos_spin_trylock(&mutex->lock) &&
-+      adp_current != adp_root &&
-+      mutex->owner != adp_current)
-+      return -EBUSY;
-+
-+    return 0;
-+}
-+
-+static inline void __adeos_sleepon_mutex (admutex_t *mutex, adomain_t 
*sleeper, int cpuid)
-+
-+{
-+    adomain_t *owner = mutex->owner;
-+
-+    /* Make the current domain (== sleeper) wait for the mutex to be
-+       released. Adeos' pipelined scheme guarantees that the new
-+       sleeper _is_ higher priority than any aslept domain since we
-+       have stalled each sleeper's stage. Must be called with local hw
-+       interrupts off. */
-+
-+    sleeper->m_link = mutex->sleepq;
-+    mutex->sleepq = sleeper;
-+    __adeos_switch_to(adp_cpu_current[cpuid],owner,cpuid);
-+    mutex->owner = sleeper;
-+    adeos_spin_unlock(&mutex->lock);
-+}
-+
-+unsigned long fastcall adeos_lock_mutex (admutex_t *mutex)
-+
-+{
-+    unsigned long flags, hwflags;
-+    adeos_declare_cpuid;
-+    adomain_t *adp;
-+
-+    if (!adp_pipelined)
-+      {
-+      adeos_hw_local_irq_save(hwflags);
-+      flags = !adeos_hw_test_iflag(hwflags);
-+      adeos_spin_lock(&mutex->lock);
-+      return flags;
-+      }
-+
-+    adeos_lock_cpu(hwflags);
-+
-+    adp = adp_cpu_current[cpuid];
-+
-+    flags = __test_and_set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+
-+    /* Two cases to handle here on SMP systems, only one for UP: 1) in
-+       case of a conflicting access from a higher priority domain
-+       running on the same cpu, make this domain sleep on the mutex,
-+       and resume the current owner so it can release the lock asap.
-+       2) in case of a conflicting access from any domain on a
-+       different cpu than the current owner's, simply enter a spinning
-+       loop. Note that testing mutex->owncpu is safe since it is only
-+       changed by the current owner, and set to -1 when the mutex is
-+       unlocked. */
-+
-+#ifdef CONFIG_SMP
-+    while (!adeos_spin_trylock(&mutex->lock))
-+      {
-+      if (mutex->owncpu == cpuid)
-+          {
-+          __adeos_sleepon_mutex(mutex,adp,cpuid);
-+          adeos_load_cpuid();
-+          }
-+      }
-+
-+    mutex->owncpu = cpuid;
-+#else  /* !CONFIG_SMP */
-+    while (mutex->owner != NULL && mutex->owner != adp)
-+      __adeos_sleepon_mutex(mutex,adp,cpuid);
-+#endif /* CONFIG_SMP */
-+
-+    mutex->owner = adp;
-+
-+    adeos_unlock_cpu(hwflags);
-+
-+    return flags;
-+}
-+
-+void fastcall adeos_unlock_mutex (admutex_t *mutex, unsigned long flags)
-+
-+{
-+    unsigned long hwflags;
-+    adeos_declare_cpuid;
-+    adomain_t *adp;
-+
-+    if (!adp_pipelined)
-+      {
-+      adeos_spin_unlock(&mutex->lock);
-+
-+      if (flags)
-+          adeos_hw_cli();
-+      else
-+          adeos_hw_sti();
-+
-+      return;
-+      }
-+
-+#ifdef CONFIG_SMP
-+    mutex->owncpu = -1;
-+#endif /* CONFIG_SMP */
-+
-+    if (!flags)
-+      adeos_hw_sti(); /* Absolutely needed. */
-+      
-+    adeos_lock_cpu(hwflags);
-+
-+    if (mutex->sleepq != NULL)
-+      {
-+      adomain_t *sleeper = mutex->sleepq;
-+      /* Wake up the highest priority sleeper. */
-+      mutex->sleepq = sleeper->m_link;
-+      __adeos_switch_to(adp_cpu_current[cpuid],sleeper,cpuid);
-+      adeos_load_cpuid();
-+      }
-+    else
-+      {
-+      mutex->owner = NULL;
-+      adeos_spin_unlock(&mutex->lock);
-+      }
-+
-+    adp = adp_cpu_current[cpuid];
-+
-+    if (flags)
-+      __set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+    else
-+      {
-+      __clear_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+      
-+      if (adp->cpudata[cpuid].irq_pending_hi != 0)
-+          __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+      }
-+
-+    adeos_unlock_cpu(hwflags);
-+}
-+
-+#else /* !CONFIG_ADEOS_THREADS */
-+
-+int adeos_destroy_mutex (admutex_t *mutex)
-+
-+{
-+    if (!adeos_spin_trylock(&mutex->lock) &&
-+      adp_current != adp_root)
-+      return -EBUSY;
-+
-+    return 0;
-+}
-+
-+unsigned long fastcall adeos_lock_mutex (admutex_t *mutex)
-+
-+{
-+    unsigned long flags; /* FIXME: won't work on SPARC */
-+    spin_lock_irqsave_hw(&mutex->lock,flags);
-+    return flags;
-+}
-+
-+void fastcall adeos_unlock_mutex (admutex_t *mutex, unsigned long flags)
-+
-+{
-+    spin_unlock_irqrestore_hw(&mutex->lock,flags);
-+}
-+
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+void __adeos_takeover (void)
-+
-+{
-+    __adeos_enable_pipeline();
-+    printk(KERN_WARNING "Adeos: Pipelining started.\n");
-+}
-+
-+#ifdef MODULE
-+
-+static int __init adeos_init_module (void)
-+
-+{
-+    __adeos_takeover();
-+    return 0;
-+}
-+
-+static void __exit adeos_exit_module (void)
-+
-+{
-+    __adeos_disable_pipeline();
-+    printk(KERN_WARNING "Adeos: Pipelining stopped.\n");
-+}
-+
-+module_init(adeos_init_module);
-+module_exit(adeos_exit_module);
-+
-+#endif /* MODULE */
-+
-+EXPORT_SYMBOL(adeos_register_domain);
-+EXPORT_SYMBOL(adeos_unregister_domain);
-+EXPORT_SYMBOL(adeos_virtualize_irq_from);
-+EXPORT_SYMBOL(adeos_control_irq);
-+EXPORT_SYMBOL(__adeos_schedule_irq);
-+EXPORT_SYMBOL(adeos_free_irq);
-+EXPORT_SYMBOL(adeos_send_ipi);
-+EXPORT_SYMBOL(adeos_catch_event_from);
-+EXPORT_SYMBOL(adeos_init_attr);
-+EXPORT_SYMBOL(adeos_get_sysinfo);
-+EXPORT_SYMBOL(adeos_tune_timer);
-+EXPORT_SYMBOL(adeos_alloc_ptdkey);
-+EXPORT_SYMBOL(adeos_free_ptdkey);
-+EXPORT_SYMBOL(adeos_set_ptd);
-+EXPORT_SYMBOL(adeos_get_ptd);
-+EXPORT_SYMBOL(adeos_set_irq_affinity);
-+EXPORT_SYMBOL(adeos_init_mutex);
-+EXPORT_SYMBOL(adeos_destroy_mutex);
-+EXPORT_SYMBOL(adeos_lock_mutex);
-+EXPORT_SYMBOL(adeos_unlock_mutex);
-diff -uNrp linux-2.6.10/adeos/ppc.c linux-2.6.10-ppc-adeos/adeos/ppc.c
---- linux-2.6.10/adeos/ppc.c   1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/adeos/ppc.c 2005-03-13 09:33:35.000000000 +0100
-@@ -0,0 +1,514 @@
-+/*
-+ *   linux/adeos/ppc.c
-+ *
-+ *   Copyright (C) 2004 Philippe Gerum.
-+ *
-+ *   Adeos/PPC port over 2.6 based on the previous 2.4 implementation by:
-+ *
-+ *   Copyright (C) 2004 Wolfgang Grandegger.
-+ *
-+ *   It follows closely the ARM and x86 ports of ADEOS.
-+ *
-+ *   Copyright (C) 2003 Philippe Gerum.
-+ *
-+ *   This program is free software; you can redistribute it and/or modify
-+ *   it under the terms of the GNU General Public License as published by
-+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ *   USA; either version 2 of the License, or (at your option) any later
-+ *   version.
-+ *
-+ *   This program is distributed in the hope that it will be useful,
-+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *   GNU General Public License for more details.
-+ *
-+ *   You should have received a copy of the GNU General Public License
-+ *   along with this program; if not, write to the Free Software
-+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, 
USA.
-+ *
-+ *   Architecture-dependent ADEOS support for PowerPC.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/irq.h>
-+#include <linux/errno.h>
-+#include <asm/system.h>
-+#include <asm/hw_irq.h>
-+#include <asm/irq.h>
-+#include <asm/atomic.h>
-+#include <asm/io.h>
-+#include <asm/time.h>
-+
-+extern spinlock_t __adeos_pipelock;
-+
-+extern unsigned long __adeos_virtual_irq_map;
-+
-+extern struct list_head __adeos_pipeline;
-+
-+extern irq_desc_t irq_desc[];
-+
-+static struct hw_interrupt_type __adeos_std_irq_dtype[NR_IRQS];
-+
-+static void __adeos_override_irq_enable (unsigned irq)
-+
-+{
-+    unsigned long adflags, hwflags;
-+    adeos_declare_cpuid;
-+
-+    adeos_hw_local_irq_save(hwflags);
-+    adflags = adeos_test_and_stall_pipeline();
-+    preempt_disable();
-+    __adeos_unlock_irq(adp_cpu_current[cpuid],irq);
-+    __adeos_std_irq_dtype[irq].enable(irq);
-+    preempt_enable_no_resched();
-+    adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
-+    adeos_hw_local_irq_restore(hwflags);
-+}
-+
-+static void __adeos_override_irq_disable (unsigned irq)
-+
-+{
-+    unsigned long adflags, hwflags;
-+    adeos_declare_cpuid;
-+
-+    adeos_hw_local_irq_save(hwflags);
-+    adflags = adeos_test_and_stall_pipeline();
-+    preempt_disable();
-+    __adeos_std_irq_dtype[irq].disable(irq);
-+    __adeos_lock_irq(adp_cpu_current[cpuid],cpuid,irq);
-+    preempt_enable_no_resched();
-+    adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
-+    adeos_hw_local_irq_restore(hwflags);
-+}
-+
-+static void __adeos_override_irq_end (unsigned irq)
-+
-+{
-+    unsigned long adflags, hwflags;
-+    adeos_declare_cpuid;
-+
-+    adeos_hw_local_irq_save(hwflags);
-+    adflags = adeos_test_and_stall_pipeline();
-+    preempt_disable();
-+
-+    if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-+      __adeos_unlock_irq(adp_cpu_current[cpuid],irq);
-+
-+    __adeos_std_irq_dtype[irq].end(irq);
-+
-+    preempt_enable_no_resched();
-+    adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
-+    adeos_hw_local_irq_restore(hwflags);
-+}
-+
-+static void __adeos_override_irq_affinity (unsigned irq, cpumask_t mask)
-+
-+{
-+    unsigned long adflags, hwflags;
-+    adeos_declare_cpuid;
-+
-+    adeos_hw_local_irq_save(hwflags);
-+    adflags = adeos_test_and_stall_pipeline();
-+    preempt_disable();
-+    __adeos_std_irq_dtype[irq].set_affinity(irq,mask);
-+    preempt_enable_no_resched();
-+    adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
-+    adeos_hw_local_irq_restore(hwflags);
-+}
-+
-+static void  __adeos_enable_sync (void)
-+
-+{
-+    __adeos_decr_next[adeos_processor_id()] = __adeos_read_timebase() + 
get_dec();
-+}
-+
-+/* __adeos_enable_pipeline() -- Take over the interrupt control from
-+   the root domain (i.e. Linux). After this routine has returned, all
-+   interrupts go through the pipeline. */
-+
-+void __adeos_enable_pipeline (void)
-+
-+{
-+    unsigned long flags;
-+    unsigned irq;
-+
-+    flags = adeos_critical_enter(&__adeos_enable_sync);
-+
-+    /* First, virtualize all interrupts from the root domain. */
-+
-+    for (irq = 0; irq < NR_IRQS; irq++)
-+      adeos_virtualize_irq(irq,
-+                           (void (*)(unsigned))&__adeos_do_IRQ,
-+                           &__adeos_ack_irq,
-+                           IPIPE_HANDLE_MASK|IPIPE_PASS_MASK);
-+
-+    /* We use a virtual IRQ to handle the timer irq (decrementer trap)
-+       which has been allocated early in __adeos_init_platform(). */
-+
-+    adeos_virtualize_irq(ADEOS_TIMER_VIRQ,
-+                       (void (*)(unsigned))&__adeos_do_timer,
-+                       NULL,
-+                       IPIPE_HANDLE_MASK|IPIPE_PASS_MASK);
-+  
-+
-+    /* Interpose on the IRQ control routines so we can make them
-+       atomic using hw masking and prevent the interrupt log from
-+       being untimely flushed. */
-+
-+    for (irq = 0; irq < NR_IRQS; irq++) 
-+      {
-+      if (irq_desc[irq].handler != NULL)
-+          __adeos_std_irq_dtype[irq] = *irq_desc[irq].handler;
-+      }
-+
-+    /* The original controller structs are often shared, so we first
-+       save them all before changing any of them. Notice that we don't
-+       override the ack() handler since we will enforce the necessary
-+       setup in __adeos_ack_irq(). */
-+
-+    for (irq = 0; irq < NR_IRQS; irq++)
-+      {
-+      struct hw_interrupt_type *handler = irq_desc[irq].handler;
-+
-+      if (handler == NULL)
-+          continue;
-+
-+      if (handler->enable != NULL)
-+          handler->enable = &__adeos_override_irq_enable;
-+
-+      if (handler->disable != NULL)
-+          handler->disable = &__adeos_override_irq_disable;
-+
-+      if (handler->end != NULL)
-+          handler->end = &__adeos_override_irq_end;
-+
-+      if (handler->set_affinity != NULL)
-+          handler->set_affinity = &__adeos_override_irq_affinity;
-+      }
-+
-+    __adeos_decr_next[adeos_processor_id()] = __adeos_read_timebase() + 
get_dec();
-+
-+    adp_pipelined = 1;
-+
-+    adeos_critical_exit(flags);
-+}
-+
-+/* __adeos_disable_pipeline() -- Disengage the pipeline. */
-+
-+void __adeos_disable_pipeline (void)
-+
-+{
-+    unsigned long flags;
-+    unsigned irq;
-+
-+    flags = adeos_critical_enter(NULL);
-+
-+    /* Restore interrupt controllers. */
-+
-+    for (irq = 0; irq < NR_IRQS; irq++)
-+      {
-+      if (irq_desc[irq].handler != NULL)
-+          *irq_desc[irq].handler = __adeos_std_irq_dtype[irq];
-+      }
-+
-+    adp_pipelined = 0;
-+
-+    adeos_critical_exit(flags);
-+}
-+
-+/* adeos_virtualize_irq_from() -- Attach a handler (and optionally a
-+   hw acknowledge routine) to an interrupt for the given domain. */
-+
-+int adeos_virtualize_irq_from (adomain_t *adp,
-+                             unsigned irq,
-+                             void (*handler)(unsigned irq),
-+                             int (*acknowledge)(unsigned irq),
-+                             unsigned modemask)
-+{
-+    unsigned long flags;
-+    int err;
-+
-+    if (irq >= IPIPE_NR_IRQS)
-+      return -EINVAL;
-+
-+    if (adp->irqs[irq].control & IPIPE_SYSTEM_MASK)
-+      return -EPERM;
-+      
-+    adeos_spin_lock_irqsave(&__adeos_pipelock,flags);
-+
-+    if (handler != NULL)
-+      {
-+      /* A bit of hack here: if we are re-virtualizing an IRQ just
-+         to change the acknowledge routine by passing the special
-+         ADEOS_SAME_HANDLER value, then allow to recycle the current
-+         handler for the IRQ. This allows Linux device drivers
-+         managing shared IRQ lines to call adeos_virtualize_irq() in
-+         addition to request_irq() just for the purpose of
-+         interposing their own shared acknowledge routine. */
-+
-+      if (handler == ADEOS_SAME_HANDLER)
-+          {
-+          handler = adp->irqs[irq].handler;
-+
-+          if (handler == NULL)
-+              {
-+              err = -EINVAL;
-+              goto unlock_and_exit;
-+              }
-+          }
-+      else if ((modemask & IPIPE_EXCLUSIVE_MASK) != 0 &&
-+               adp->irqs[irq].handler != NULL)
-+          {
-+          err = -EBUSY;
-+          goto unlock_and_exit;
-+          }
-+      
-+      if ((modemask & (IPIPE_SHARED_MASK|IPIPE_PASS_MASK)) == 
IPIPE_SHARED_MASK)
-+          {
-+          err = -EINVAL;
-+          goto unlock_and_exit;
-+          }
-+
-+      if ((modemask & IPIPE_STICKY_MASK) != 0)
-+          modemask |= IPIPE_HANDLE_MASK;
-+      }
-+    else
-+      modemask &= ~(IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SHARED_MASK);
-+
-+    if (acknowledge == NULL)
-+      {
-+      if ((modemask & IPIPE_SHARED_MASK) == 0)
-+          /* Acknowledge handler unspecified -- this is ok in
-+             non-shared management mode, but we will force the use
-+             of the Linux-defined handler instead. */
-+          acknowledge = adp_root->irqs[irq].acknowledge;
-+      else
-+          {
-+          /* A valid acknowledge handler to be called in shared mode
-+             is required when declaring a shared IRQ. */
-+          err = -EINVAL;
-+          goto unlock_and_exit;
-+          }
-+      }
-+
-+    adp->irqs[irq].handler = handler;
-+    adp->irqs[irq].acknowledge = acknowledge;
-+    adp->irqs[irq].control = modemask;
-+
-+    if (irq < NR_IRQS &&
-+      handler != NULL &&
-+      !adeos_virtual_irq_p(irq) &&
-+      (modemask & IPIPE_ENABLE_MASK) != 0)
-+      {
-+      if (adp != adp_current)
-+          {
-+          /* IRQ enable/disable state is domain-sensitive, so we may
-+             not change it for another domain. What is allowed
-+             however is forcing some domain to handle an interrupt
-+             source, by passing the proper 'adp' descriptor which
-+             thus may be different from adp_current. */
-+          err = -EPERM;
-+          goto unlock_and_exit;
-+          }
-+
-+      enable_irq(irq);
-+      }
-+
-+    err = 0;
-+
-+unlock_and_exit:
-+
-+    adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);
-+
-+    return err;
-+}
-+
-+/* adeos_control_irq() -- Change an interrupt mode. This affects the
-+   way a given interrupt is handled by ADEOS for the current
-+   domain. setmask is a bitmask telling whether:
-+   - the interrupt should be passed to the domain (IPIPE_HANDLE_MASK),
-+     and/or
-+   - the interrupt should be passed down to the lower priority domain(s)
-+     in the pipeline (IPIPE_PASS_MASK).
-+   This leads to four possibilities:
-+   - PASS only => Ignore the interrupt
-+   - HANDLE only => Terminate the interrupt (process but don't pass down)
-+   - PASS + HANDLE => Accept the interrupt (process and pass down)
-+   - <none> => Discard the interrupt
-+   - DYNAMIC is currently an alias of HANDLE since it marks an interrupt
-+   which is processed by the current domain but not implicitely passed
-+   down to the pipeline, letting the domain's handler choose on a case-
-+   by-case basis whether the interrupt propagation should be forced
-+   using adeos_propagate_irq().
-+   clrmask clears the corresponding bits from the control field before
-+   setmask is applied.
-+*/
-+
-+int adeos_control_irq (unsigned irq,
-+                     unsigned clrmask,
-+                     unsigned setmask)
-+{
-+    irq_desc_t *desc;
-+    unsigned long flags;
-+
-+    if (irq >= IPIPE_NR_IRQS)
-+      return -EINVAL;
-+
-+    if (adp_current->irqs[irq].control & IPIPE_SYSTEM_MASK)
-+      return -EPERM;
-+      
-+    if (((setmask|clrmask) & IPIPE_SHARED_MASK) != 0)
-+      return -EINVAL;
-+      
-+    desc = irq_desc + irq;
-+
-+    if (adp_current->irqs[irq].handler == NULL)
-+      setmask &= ~(IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK);
-+
-+    if ((setmask & IPIPE_STICKY_MASK) != 0)
-+      setmask |= IPIPE_HANDLE_MASK;
-+
-+    if ((clrmask & (IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK)) != 0)       /* If 
one goes, both go. */
-+      clrmask |= (IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK);
-+
-+    adeos_spin_lock_irqsave(&__adeos_pipelock,flags);
-+
-+    adp_current->irqs[irq].control &= ~clrmask;
-+    adp_current->irqs[irq].control |= setmask;
-+
-+    if ((setmask & IPIPE_ENABLE_MASK) != 0)
-+      enable_irq(irq);
-+    else if ((clrmask & IPIPE_ENABLE_MASK) != 0)
-+      disable_irq(irq);
-+
-+    adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);
-+
-+    return 0;
-+}
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+
-+void __adeos_init_domain (adomain_t *adp, adattr_t *attr)
-+
-+{
-+    int estacksz = attr->estacksz > 0 ? attr->estacksz : 8192, _cpuid;
-+    unsigned long flags, *ksp;
-+    adeos_declare_cpuid;
-+
-+    adeos_hw_local_irq_flags(flags);
-+
-+    for (_cpuid = 0; _cpuid < num_online_cpus(); _cpuid++)
-+      {
-+      adp->estackbase[_cpuid] = (unsigned long)kmalloc(estacksz,GFP_KERNEL);
-+    
-+      if (adp->estackbase[_cpuid] == 0)
-+          panic("Adeos: No memory for domain stack on CPU #%d",_cpuid);
-+
-+      adp->esp[_cpuid] = adp->estackbase[_cpuid];
-+      *((unsigned long *)adp->esp[_cpuid]) = 0;
-+      ksp = (unsigned long *)(((adp->esp[_cpuid] + estacksz - 16) & ~0xf) - 
108);
-+      adp->esp[_cpuid] = (unsigned long)ksp - STACK_FRAME_OVERHEAD;
-+      ksp[19] = (_cpuid == cpuid); /* r3 */
-+      ksp[25] = (unsigned long)attr->entry; /* lr */
-+      ksp[26] = flags & ~MSR_EE; /* msr */
-+      }
-+}
-+
-+#else /* !CONFIG_ADEOS_THREADS */
-+
-+void __adeos_init_domain (adomain_t *adp, adattr_t *attr)
-+
-+{}
-+
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+void __adeos_cleanup_domain (adomain_t *adp)
-+
-+{
-+    int nr_cpus = num_online_cpus();
-+    int _cpuid;
-+
-+    adeos_unstall_pipeline_from(adp);
-+
-+    for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)
-+      {
-+#ifdef CONFIG_SMP
-+      while (adp->cpudata[_cpuid].irq_pending_hi != 0)
-+          cpu_relax();
-+
-+      while (test_bit(IPIPE_XPEND_FLAG,&adp->cpudata[_cpuid].status))
-+          cpu_relax();
-+#endif /* CONFIG_SMP */
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+      if (adp->estackbase[_cpuid] != 0)
-+          kfree((void *)adp->estackbase[_cpuid]);
-+#endif /* CONFIG_ADEOS_THREADS */
-+      }
-+}
-+
-+int adeos_get_sysinfo (adsysinfo_t *info)
-+
-+{
-+    info->ncpus = num_online_cpus();
-+    info->cpufreq = adeos_cpu_freq();
-+    info->archdep.tmirq = ADEOS_TIMER_VIRQ;
-+    info->archdep.tmfreq = info->cpufreq;
-+
-+    return 0;
-+}
-+
-+static void __adeos_set_decr (void)
-+
-+{
-+    adeos_declare_cpuid;
-+
-+    adeos_load_cpuid();
-+
-+    disarm_decr[cpuid] = (__adeos_decr_ticks != tb_ticks_per_jiffy);
-+#ifdef CONFIG_40x
-+    /* Enable and set auto-reload. */
-+    mtspr(SPRN_TCR,mfspr(SPRN_TCR) | TCR_ARE);
-+    mtspr(SPRN_PIT,__adeos_decr_ticks);
-+#else /* !CONFIG_40x */
-+    __adeos_decr_next[cpuid] = __adeos_read_timebase() + __adeos_decr_ticks;
-+    set_dec(__adeos_decr_ticks);
-+#endif /* CONFIG_40x */
-+}
-+
-+int adeos_tune_timer (unsigned long ns, int flags)
-+
-+{
-+    unsigned long x, ticks;
-+
-+    if (flags & ADEOS_RESET_TIMER)
-+      ticks = tb_ticks_per_jiffy;
-+    else
-+      {
-+      ticks = ns * tb_ticks_per_jiffy / (1000000000 / HZ);
-+
-+      if (ticks > tb_ticks_per_jiffy)
-+          return -EINVAL;
-+      }
-+
-+    x = adeos_critical_enter(&__adeos_set_decr); /* Sync with all CPUs */
-+    __adeos_decr_ticks = ticks;
-+    __adeos_set_decr();
-+    adeos_critical_exit(x);
-+
-+    return 0;
-+}
-+
-+/* adeos_send_ipi() -- Send a specified service IPI to a set of
-+   processors. */
-+
-+int adeos_send_ipi (unsigned ipi, cpumask_t cpumask)
-+
-+{
-+    printk(KERN_WARNING "Adeos: Call to unimplemented adeos_send_ipi() from 
%s\n",adp_current->name);
-+    return 0;
-+}
-diff -uNrp linux-2.6.10/arch/ppc/Kconfig 
linux-2.6.10-ppc-adeos/arch/ppc/Kconfig
---- linux-2.6.10/arch/ppc/Kconfig      2004-12-24 22:35:40.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/Kconfig    2005-03-27 16:35:18.000000000 
+0200
-@@ -1207,6 +1207,8 @@ endmenu
- 
- source "lib/Kconfig"
- 
-+source "adeos/Kconfig"
-+
- source "arch/ppc/oprofile/Kconfig"
- 
- source "arch/ppc/Kconfig.debug"
-diff -uNrp linux-2.6.10/arch/ppc/kernel/Makefile 
linux-2.6.10-ppc-adeos/arch/ppc/kernel/Makefile
---- linux-2.6.10/arch/ppc/kernel/Makefile      2004-12-24 22:35:28.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/kernel/Makefile    2005-03-27 
16:35:18.000000000 +0200
-@@ -29,3 +29,4 @@ ifndef CONFIG_MATH_EMULATION
- obj-$(CONFIG_8xx)             += softemu8xx.o
- endif
- 
-+obj-$(CONFIG_ADEOS_CORE)      += adeos.o
-diff -uNrp linux-2.6.10/arch/ppc/kernel/adeos.c 
linux-2.6.10-ppc-adeos/arch/ppc/kernel/adeos.c
---- linux-2.6.10/arch/ppc/kernel/adeos.c       1970-01-01 01:00:00.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/kernel/adeos.c     2005-08-17 
13:54:27.000000000 +0200
-@@ -0,0 +1,716 @@
-+/*
-+ *   linux/arch/ppc/kernel/adeos.c
-+ *
-+ *   Copyright (C) 2004 Philippe Gerum.
-+ *
-+ *   Adeos/PPC port over 2.6 based on the previous 2.4 implementation by:
-+ *
-+ *   Copyright (C) 2004 Wolfgang Grandegger.
-+ *
-+ *   It follows closely the ARM and x86 ports of ADEOS.
-+ *
-+ *   Copyright (C) 2003 Philippe Gerum.
-+ *
-+ *   This program is free software; you can redistribute it and/or modify
-+ *   it under the terms of the GNU General Public License as published by
-+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ *   USA; either version 2 of the License, or (at your option) any later
-+ *   version.
-+ *
-+ *   This program is distributed in the hope that it will be useful,
-+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *   GNU General Public License for more details.
-+ *
-+ *   You should have received a copy of the GNU General Public License
-+ *   along with this program; if not, write to the Free Software
-+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, 
USA.
-+ *
-+ *   Architecture-dependent ADEOS core support for PowerPC
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/smp.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/bitops.h>
-+#include <linux/interrupt.h>
-+#include <linux/irq.h>
-+#include <linux/module.h>
-+#include <asm/system.h>
-+#include <asm/atomic.h>
-+#include <asm/hw_irq.h>
-+#include <asm/irq.h>
-+#include <asm/io.h>
-+#include <asm/time.h>
-+
-+#ifdef CONFIG_SMP
-+
-+static cpumask_t __adeos_cpu_sync_map;
-+
-+static cpumask_t __adeos_cpu_lock_map;
-+
-+static spinlock_t __adeos_cpu_barrier = SPIN_LOCK_UNLOCKED;
-+
-+static atomic_t __adeos_critical_count = ATOMIC_INIT(0);
-+
-+static void (*__adeos_cpu_sync)(void);
-+
-+#endif /* CONFIG_SMP */
-+
-+int do_IRQ(struct pt_regs *regs);
-+
-+extern struct list_head __adeos_pipeline;
-+
-+struct pt_regs __adeos_irq_regs;
-+
-+#ifdef CONFIG_POWER4
-+extern struct irqaction k2u3_cascade_action;
-+extern int openpic2_get_irq(struct pt_regs *regs);
-+#endif
-+
-+/* Current reload value for the decrementer. */
-+unsigned long __adeos_decr_ticks;
-+
-+/* Next tick date (timebase value). */
-+unsigned long long __adeos_decr_next[ADEOS_NR_CPUS];
-+
-+static inline unsigned long ffnz (unsigned long ul) {
-+
-+    __asm__ __volatile__ ("cntlzw %0, %1" : "=r" (ul) : "r" (ul & (-ul)));
-+    return 31 - ul;
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+/* Always called with hw interrupts off. */
-+
-+static void __adeos_do_critical_sync (unsigned irq)
-+
-+{
-+    adeos_declare_cpuid;
-+
-+    adeos_load_cpuid();
-+
-+    cpu_set(cpuid,__adeos_cpu_sync_map);
-+
-+    /* Now we are in sync with the lock requestor running on another
-+       CPU. Enter a spinning wait until he releases the global
-+       lock. */
-+    adeos_spin_lock(&__adeos_cpu_barrier);
-+
-+    /* Got it. Now get out. */
-+
-+    if (__adeos_cpu_sync)
-+      /* Call the sync routine if any. */
-+      __adeos_cpu_sync();
-+
-+    adeos_spin_unlock(&__adeos_cpu_barrier);
-+
-+    cpu_clear(cpuid,__adeos_cpu_sync_map);
-+}
-+
-+#endif /* CONFIG_SMP */
-+
-+/* adeos_critical_enter() -- Grab the superlock for entering a global
-+   critical section. On this uniprocessor-only arch, this is identical
-+   to hw cli(). */
-+
-+unsigned long adeos_critical_enter (void (*syncfn)(void))
-+
-+{
-+    unsigned long flags;
-+
-+    adeos_hw_local_irq_save(flags);
-+
-+#ifdef CONFIG_SMP
-+    if (num_online_cpus() > 1) /* We might be running a SMP-kernel on a UP 
box... */
-+      {
-+      adeos_declare_cpuid;
-+      cpumask_t lock_map;
-+
-+      adeos_load_cpuid();
-+
-+      if (!cpu_test_and_set(cpuid,__adeos_cpu_lock_map))
-+          {
-+          while (cpu_test_and_set(BITS_PER_LONG - 1,__adeos_cpu_lock_map))
-+              {
-+              /* Refer to the explanations found in
-+                 linux/arch/asm-i386/irq.c about
-+                 SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND for more about
-+                 this strange loop. */
-+              int n = 0;
-+              do { cpu_relax(); } while (++n < cpuid);
-+              }
-+
-+          adeos_spin_lock(&__adeos_cpu_barrier);
-+
-+          __adeos_cpu_sync = syncfn;
-+
-+          /* Send the sync IPI to all processors but the current one. */
-+          __adeos_send_IPI_allbutself(ADEOS_CRITICAL_VECTOR);
-+
-+          cpus_andnot(lock_map,cpu_online_map,__adeos_cpu_lock_map);
-+
-+          while (!cpus_equal(__adeos_cpu_sync_map,lock_map))
-+              cpu_relax();
-+          }
-+
-+      atomic_inc(&__adeos_critical_count);
-+      }
-+#endif /* CONFIG_SMP */
-+
-+    return flags;
-+}
-+
-+/* adeos_critical_exit() -- Release the superlock. */
-+
-+void adeos_critical_exit (unsigned long flags)
-+
-+{
-+#ifdef CONFIG_SMP
-+    if (num_online_cpus() > 1) /* We might be running a SMP-kernel on a UP 
box... */
-+      {
-+      adeos_declare_cpuid;
-+
-+      adeos_load_cpuid();
-+
-+      if (atomic_dec_and_test(&__adeos_critical_count))
-+          {
-+          adeos_spin_unlock(&__adeos_cpu_barrier);
-+
-+          while (!cpus_empty(__adeos_cpu_sync_map))
-+              cpu_relax();
-+
-+          cpu_clear(cpuid,__adeos_cpu_lock_map);
-+          cpu_clear(BITS_PER_LONG - 1,__adeos_cpu_lock_map);
-+          }
-+      }
-+#endif /* CONFIG_SMP */
-+
-+    adeos_hw_local_irq_restore(flags);
-+}
-+
-+void __adeos_init_platform (void)
-+
-+{
-+    unsigned timer_virq;
-+
-+    /* Allocate a virtual IRQ for the decrementer trap early to get it
-+       mapped to IPIPE_VIRQ_BASE */
-+
-+    timer_virq = adeos_alloc_irq();
-+
-+    if (timer_virq != ADEOS_TIMER_VIRQ)
-+      panic("Adeos: cannot reserve timer virq #%d (got #%d)",
-+            ADEOS_TIMER_VIRQ,
-+            timer_virq);
-+
-+    __adeos_decr_ticks = tb_ticks_per_jiffy;
-+}
-+
-+void __adeos_init_stage (adomain_t *adp)
-+
-+{
-+    int cpuid, n;
-+
-+    for (cpuid = 0; cpuid < ADEOS_NR_CPUS; cpuid++)
-+      {
-+      adp->cpudata[cpuid].irq_pending_hi = 0;
-+
-+      for (n = 0; n < IPIPE_IRQ_IWORDS; n++)
-+          adp->cpudata[cpuid].irq_pending_lo[n] = 0;
-+
-+      for (n = 0; n < IPIPE_NR_IRQS; n++)
-+          adp->cpudata[cpuid].irq_hits[n] = 0;
-+      }
-+
-+    for (n = 0; n < IPIPE_NR_IRQS; n++)
-+      {
-+      adp->irqs[n].acknowledge = NULL;
-+      adp->irqs[n].handler = NULL;
-+      adp->irqs[n].control = IPIPE_PASS_MASK; /* Pass but don't handle */
-+      }
-+
-+#ifdef CONFIG_SMP
-+    adp->irqs[ADEOS_CRITICAL_IPI].acknowledge = &__adeos_ack_irq;
-+    adp->irqs[ADEOS_CRITICAL_IPI].handler = &__adeos_do_critical_sync;
-+    /* Immediately handle in the current domain but *never* pass */
-+    adp->irqs[ADEOS_CRITICAL_IPI].control = 
IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SYSTEM_MASK;
-+#endif /* CONFIG_SMP */
-+}
-+
-+/* __adeos_sync_stage() -- Flush the pending IRQs for the current
-+   domain (and processor).  This routine flushes the interrupt log
-+   (see "Optimistic interrupt protection" from D. Stodolsky et al. for
-+   more on the deferred interrupt scheme). Every interrupt that
-+   occurred while the pipeline was stalled gets played.  WARNING:
-+   callers on SMP boxen should always check for CPU migration on
-+   return of this routine. One can control the kind of interrupts
-+   which are going to be sync'ed using the syncmask
-+   parameter. IPIPE_IRQMASK_ANY plays them all, IPIPE_IRQMASK_VIRT
-+   plays virtual interrupts only. This routine must be called with hw
-+   interrupts off. */
-+
-+void __adeos_sync_stage (unsigned long syncmask)
-+
-+{
-+    unsigned long mask, submask;
-+    struct adcpudata *cpudata;
-+    adeos_declare_cpuid;
-+    int level, rank;
-+    adomain_t *adp;
-+    unsigned irq;
-+
-+    adeos_load_cpuid();
-+    adp = adp_cpu_current[cpuid];
-+    cpudata = &adp->cpudata[cpuid];
-+
-+    if (__test_and_set_bit(IPIPE_SYNC_FLAG,&cpudata->status))
-+      return;
-+
-+    /* The policy here is to keep the dispatching code interrupt-free
-+       by stalling the current stage. If the upper domain handler
-+       (which we call) wants to re-enable interrupts while in a safe
-+       portion of the code (e.g. SA_INTERRUPT flag unset for Linux's
-+       sigaction()), it will have to unstall (then stall again before
-+       returning to us!) the stage when it sees fit. */
-+
-+    while ((mask = (cpudata->irq_pending_hi & syncmask)) != 0)
-+      {
-+      /* Give a slight priority advantage to high-numbered IRQs
-+         like the virtual ones. */
-+      level = ffnz(mask);
-+      __clear_bit(level,&cpudata->irq_pending_hi);
-+
-+      while ((submask = cpudata->irq_pending_lo[level]) != 0)
-+          {
-+          rank = ffnz(submask);
-+          irq = (level << IPIPE_IRQ_ISHIFT) + rank;
-+
-+          if (test_bit(IPIPE_LOCK_FLAG,&adp->irqs[irq].control))
-+              {
-+              __clear_bit(rank,&cpudata->irq_pending_lo[level]);
-+              continue;
-+              }
-+
-+          if (--cpudata->irq_hits[irq] == 0)
-+              __clear_bit(rank,&cpudata->irq_pending_lo[level]);
-+
-+          __set_bit(IPIPE_STALL_FLAG,&cpudata->status);
-+
-+#ifdef CONFIG_ADEOS_PROFILING
-+          __adeos_profile_data[cpuid].irqs[irq].n_synced++;
-+          adeos_hw_tsc(__adeos_profile_data[cpuid].irqs[irq].t_synced);
-+#endif /* CONFIG_ADEOS_PROFILING */
-+
-+          if (adp == adp_root)
-+              {
-+              adeos_hw_sti();
-+              ((void (*)(unsigned, struct pt_regs 
*))adp->irqs[irq].handler)(irq,&__adeos_irq_regs);
-+              adeos_hw_cli();
-+              }
-+          else
-+              {
-+              __clear_bit(IPIPE_SYNC_FLAG,&cpudata->status);
-+              adp->irqs[irq].handler(irq);
-+              __set_bit(IPIPE_SYNC_FLAG,&cpudata->status);
-+              }
-+
-+#ifdef CONFIG_SMP
-+          {
-+          int _cpuid = adeos_processor_id();
-+
-+          if (_cpuid != cpuid) /* Handle CPU migration. */
-+              {
-+              /* We expect any domain to clear the SYNC bit each
-+                 time it switches in a new task, so that preemptions
-+                 and/or CPU migrations (in the SMP case) over the
-+                 ISR do not lock out the log syncer for some
-+                 indefinite amount of time. In the Linux case,
-+                 schedule() handles this (see kernel/sched.c). For
-+                 this reason, we don't bother clearing it here for
-+                 the source CPU in the migration handling case,
-+                 since it must have scheduled another task in by
-+                 now. */
-+              cpuid = _cpuid;
-+              cpudata = &adp->cpudata[cpuid];
-+              __set_bit(IPIPE_SYNC_FLAG,&cpudata->status);
-+              }
-+          }
-+#endif /* CONFIG_SMP */
-+
-+          __clear_bit(IPIPE_STALL_FLAG,&cpudata->status);
-+          }
-+      }
-+
-+    __clear_bit(IPIPE_SYNC_FLAG,&cpudata->status);
-+}
-+
-+int __adeos_ack_irq (unsigned irq)
-+
-+{
-+    irq_desc_t *desc = irq_desc + irq;
-+    unsigned long adflags;
-+    adeos_declare_cpuid;
-+    
-+    if (desc->handler->ack == NULL)
-+      return 1;
-+
-+    /* No need to mask IRQs at hw level: we are always called from
-+       __adeos_handle_irq(), so interrupts are already off. We stall
-+       the pipeline so that spin_lock_irq*() ops won't unintentionally
-+       flush it, since this could cause infinite recursion. */
-+
-+    adeos_load_cpuid();
-+    adflags = adeos_test_and_stall_pipeline();
-+    preempt_disable();
-+    spin_lock(&desc->lock);
-+    desc->handler->ack(irq); 
-+#ifdef CONFIG_POWER4
-+    /* if it is a k2u3 cascaded irq, acknowledge it, also */
-+    if (desc->action == &k2u3_cascade_action) {
-+      struct pt_regs regs;
-+      int irq2 = openpic2_get_irq(&regs);
-+      if (irq2 != -1) {
-+              irq_desc_t *desc2 = irq_desc + irq2;
-+              if (desc2->handler->ack)
-+                  desc2->handler->ack(irq2);
-+      }
-+    }
-+#endif
-+    spin_unlock(&desc->lock);
-+    preempt_enable_no_resched();
-+    adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
-+
-+    return 1;
-+}
-+
-+static inline void __adeos_walk_pipeline (struct list_head *pos, int cpuid)
-+
-+{
-+    adomain_t *this_domain = adp_cpu_current[cpuid];
-+
-+    while (pos != &__adeos_pipeline)
-+      {
-+      adomain_t *next_domain = list_entry(pos,adomain_t,p_link);
-+
-+      if (test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
-+          break; /* Stalled stage -- do not go further. */
-+
-+      if (next_domain->cpudata[cpuid].irq_pending_hi != 0)
-+          {
-+          /* Since the critical IPI might be dispatched by the
-+             following actions, the current domain might not be
-+             linked to the pipeline anymore after its handler
-+             returns on SMP boxes, even if the domain remains valid
-+             (see adeos_unregister_domain()), so don't make any
-+             dangerous assumptions here. */
-+
-+          if (next_domain == this_domain)
-+              __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+          else
-+              {
-+              __adeos_switch_to(this_domain,next_domain,cpuid);
-+
-+              adeos_load_cpuid(); /* Processor might have changed. */
-+
-+              if (this_domain->cpudata[cpuid].irq_pending_hi != 0 &&
-+                  
!test_bit(IPIPE_STALL_FLAG,&this_domain->cpudata[cpuid].status))
-+                  __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+              }
-+
-+          break;
-+          }
-+      else if (next_domain == this_domain)
-+          break;
-+
-+      pos = next_domain->p_link.next;
-+      }
-+}
-+
-+/* __adeos_handle_irq() -- ADEOS's generic IRQ handler. An optimistic
-+   interrupt protection log is maintained here for each
-+   domain. Interrupts are off on entry. */
-+
-+void __adeos_handle_irq (int irq, struct pt_regs *regs)
-+
-+{
-+    struct list_head *head, *pos;
-+    adeos_declare_cpuid;
-+    int m_ack, s_ack;
-+
-+    m_ack = irq & ADEOS_IRQ_ACKED;
-+    irq &= ADEOS_IRQ_ACKED_MASK;
-+
-+    if (irq >= IPIPE_NR_IRQS)
-+      {
-+      printk(KERN_ERR "Adeos: spurious interrupt %d\n",irq);
-+      return;
-+      }
-+
-+    adeos_load_cpuid();
-+
-+#ifdef CONFIG_ADEOS_PROFILING
-+    __adeos_profile_data[cpuid].irqs[irq].n_handled++;
-+    adeos_hw_tsc(__adeos_profile_data[cpuid].irqs[irq].t_handled);
-+#endif /* CONFIG_ADEOS_PROFILING */
-+
-+    s_ack = m_ack;
-+
-+    if 
(test_bit(IPIPE_STICKY_FLAG,&adp_cpu_current[cpuid]->irqs[irq].control))
-+      head = &adp_cpu_current[cpuid]->p_link;
-+    else
-+      head = __adeos_pipeline.next;
-+
-+    /* Ack the interrupt. */
-+
-+    pos = head;
-+
-+    while (pos != &__adeos_pipeline)
-+      {
-+      adomain_t *_adp = list_entry(pos,adomain_t,p_link);
-+
-+      /* For each domain handling the incoming IRQ, mark it as
-+           pending in its log. */
-+
-+      if (test_bit(IPIPE_HANDLE_FLAG,&_adp->irqs[irq].control))
-+          {
-+          /* Domains that handle this IRQ are polled for
-+             acknowledging it by decreasing priority order. The
-+             interrupt must be made pending _first_ in the domain's
-+             status flags before the PIC is unlocked. */
-+
-+          _adp->cpudata[cpuid].irq_hits[irq]++;
-+          __adeos_set_irq_bit(_adp,cpuid,irq);
-+
-+          /* Always get the first master acknowledge available. Once
-+             we've got it, allow slave acknowledge handlers to run
-+             (until one of them stops us). */
-+
-+          if (_adp->irqs[irq].acknowledge != NULL)
-+              {
-+              if (!m_ack)
-+                  m_ack = _adp->irqs[irq].acknowledge(irq);
-+              else if (test_bit(IPIPE_SHARED_FLAG,&_adp->irqs[irq].control) 
&& !s_ack)
-+                  s_ack = _adp->irqs[irq].acknowledge(irq);
-+              }
-+          }
-+
-+      /* If the domain does not want the IRQ to be passed down the
-+         interrupt pipe, exit the loop now. */
-+
-+      if (!test_bit(IPIPE_PASS_FLAG,&_adp->irqs[irq].control))
-+          break;
-+
-+      pos = _adp->p_link.next;
-+      }
-+
-+    /* Now walk the pipeline, yielding control to the highest priority
-+       domain that has pending interrupt(s) or immediately to the
-+       current domain if the interrupt has been marked as
-+       'sticky'. This search does not go beyond the current domain in
-+       the pipeline. To understand this code properly, one must keep
-+       in mind that domains having a higher priority than the current
-+       one are sleeping on the adeos_suspend_domain() service. In
-+       addition, domains having a lower priority have been preempted
-+       by an interrupt dispatched to a higher priority domain. Once
-+       the first and highest priority stage has been selected here,
-+       the subsequent stages will be activated in turn when each
-+       visited domain calls adeos_suspend_domain() to wake up its
-+       neighbour down the pipeline. */
-+
-+    __adeos_walk_pipeline(head,cpuid);
-+}
-+
-+/* ADEOS's version of the interrupt trap handler. */
-+
-+asmlinkage int __adeos_grab_irq (struct pt_regs *regs)
-+
-+{
-+    extern int ppc_spurious_interrupts;
-+    adeos_declare_cpuid;
-+    int irq, first = 1;
-+
-+    if (!adp_pipelined)
-+      {
-+      do_IRQ(regs);
-+      return 1;
-+      }
-+
-+    if ((irq = ppc_md.get_irq(regs)) >= 0)
-+      {
-+      __adeos_handle_irq(irq,regs);
-+      first = 0;
-+      }
-+    else if (irq != -2 && first)
-+           ppc_spurious_interrupts++;
-+
-+    adeos_load_cpuid();
-+
-+    return (adp_cpu_current[cpuid] == adp_root &&
-+          !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status));
-+}
-+
-+/* ADEOS's version of irq.c:do_IRQ(). */
-+
-+void __adeos_do_IRQ (int irq, struct pt_regs *regs)
-+
-+{
-+    irq_enter();
-+    __do_IRQ(irq,regs);
-+    irq_exit();
-+}
-+
-+/* ADEOS's version of the decrementer trap handler. */
-+
-+asmlinkage int __adeos_grab_timer (struct pt_regs *regs)
-+
-+{
-+    adeos_declare_cpuid;
-+
-+    if (!adp_pipelined)
-+      {
-+      timer_interrupt(regs);
-+      return 1;
-+      }
-+
-+#ifdef CONFIG_POWER4
-+    /* On 970 CPUs DEC cannot be disabled, and without setting DEC
-+     * here, DEC interrupt would be triggered as soon as interrupts are
-+     * enabled in __adeos_sync_stage 
-+     */
-+    set_dec(0x7fffffff);
-+#endif
-+    
-+    __adeos_irq_regs.msr = regs->msr; /* for do_timer() */
-+
-+    __adeos_handle_irq(ADEOS_TIMER_VIRQ,regs);
-+
-+    adeos_load_cpuid();
-+
-+#ifndef CONFIG_40x
-+    if (__adeos_decr_ticks != tb_ticks_per_jiffy)
-+      {
-+      unsigned long long next_date, now;
-+
-+      next_date = __adeos_decr_next[cpuid];
-+
-+      while ((now = __adeos_read_timebase()) >= next_date)
-+          next_date += __adeos_decr_ticks;
-+
-+      set_dec(next_date - now);
-+
-+      __adeos_decr_next[cpuid] = next_date;
-+      }
-+#endif /* !CONFIG_40x */
-+
-+    return (adp_cpu_current[cpuid] == adp_root &&
-+          !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status));
-+}
-+
-+void __adeos_do_timer (int irq, struct pt_regs *regs)
-+
-+{
-+    timer_interrupt(regs);
-+}
-+
-+asmlinkage int __adeos_check_root (struct pt_regs *regs)
-+
-+{
-+    adeos_declare_cpuid;
-+    /* This routine is called with hw interrupts off, so no migration
-+       can occur while checking the identity of the current domain. */
-+    adeos_load_cpuid();
-+    return (adp_cpu_current[cpuid] == adp_root &&
-+          !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status));
-+}
-+
-+/* adeos_trigger_irq() -- Push the interrupt to the pipeline entry
-+   just like if it has been actually received from a hw source. This
-+   both works for real and virtual interrupts. This also means that
-+   the current domain might be immediately preempted by a higher
-+   priority domain who happens to handle this interrupt. */
-+
-+int adeos_trigger_irq (unsigned irq)
-+
-+{
-+    struct pt_regs regs;
-+    unsigned long flags;
-+
-+    if (irq >= IPIPE_NR_IRQS ||
-+      (adeos_virtual_irq_p(irq) && !test_bit(irq - 
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map)))
-+      return -EINVAL;
-+
-+    adeos_hw_local_irq_save(flags);
-+
-+    regs.msr = flags;
-+
-+    __adeos_handle_irq(irq | ADEOS_IRQ_ACKED, &regs);
-+
-+    adeos_hw_local_irq_restore(flags);
-+
-+    return 1;
-+}
-+
-+int __adeos_enter_syscall (struct pt_regs *regs)
-+
-+{
-+    adeos_declare_cpuid;
-+    unsigned long flags;
-+
-+    /* This routine either returns:
-+       0 -- if the syscall is to be passed to Linux;
-+       1 -- if the syscall should not be passed to Linux, and no
-+       tail work should be performed;
-+       -1 -- if the syscall should not be passed to Linux but the
-+       tail work has to be performed. */
-+
-+    if (__adeos_event_monitors[ADEOS_SYSCALL_PROLOGUE] > 0 &&
-+      __adeos_handle_event(ADEOS_SYSCALL_PROLOGUE,regs) > 0)
-+      {
-+      if (adp_current == adp_root && !in_atomic())
-+          {
-+          /* Sync pending VIRQs before _TIF_NEED_RESCHED is
-+           * tested. */
-+
-+          adeos_lock_cpu(flags);
-+
-+          if ((adp_root->cpudata[cpuid].irq_pending_hi & IPIPE_IRQMASK_VIRT) 
!= 0)
-+              __adeos_sync_stage(IPIPE_IRQMASK_VIRT);
-+
-+          adeos_unlock_cpu(flags);
-+
-+          return -1;
-+          }
-+
-+      return 1;
-+      }
-+
-+    return 0;
-+}
-+
-+int __adeos_exit_syscall (void) 
-+
-+{
-+    if (__adeos_event_monitors[ADEOS_SYSCALL_EPILOGUE] > 0)
-+      return __adeos_handle_event(ADEOS_SYSCALL_EPILOGUE,NULL);
-+
-+    return 0;
-+}
-+
-+EXPORT_SYMBOL(__adeos_init_stage);
-+EXPORT_SYMBOL(__adeos_sync_stage);
-+EXPORT_SYMBOL(__adeos_irq_regs);
-+#ifdef CONFIG_ADEOS_THREADS
-+EXPORT_SYMBOL(__adeos_switch_domain);
-+#endif /* CONFIG_ADEOS_THREADS */
-+EXPORT_SYMBOL(__adeos_do_IRQ);
-+EXPORT_SYMBOL(__adeos_do_timer);
-+EXPORT_SYMBOL(__adeos_decr_ticks);
-+EXPORT_SYMBOL(__adeos_decr_next);
-+EXPORT_SYMBOL(__adeos_current_threadinfo);
-+EXPORT_SYMBOL(adeos_critical_enter);
-+EXPORT_SYMBOL(adeos_critical_exit);
-+EXPORT_SYMBOL(adeos_trigger_irq);
-diff -uNrp linux-2.6.10/arch/ppc/kernel/entry.S 
linux-2.6.10-ppc-adeos/arch/ppc/kernel/entry.S
---- linux-2.6.10/arch/ppc/kernel/entry.S       2004-12-24 22:35:27.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/kernel/entry.S     2005-03-27 
16:35:19.000000000 +0200
-@@ -143,8 +143,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
-       .globl transfer_to_handler_cont
- transfer_to_handler_cont:
-       lwz     r11,THREAD_INFO-THREAD(r12)
-+#ifdef CONFIG_ADEOS_CORE
-+      /* Allow for kernel-based local stacks: those must not cause
-+      the stack overflow detection to trigger when some activity has
-+      been preempted over them (e.g. Adeos domain stacks). We just
-+      check if the kernel stack is not treading on the memory area
-+      ranging from &current->thread_info to &current->thread, which
-+      is coarser than the vanilla implementation, but likely
-+      sensitive enough to catch overflows soon enough though.*/
-+      addi    r9,r11,THREAD
-+      cmplw   0,r1,r11
-+      cmplw   1,r1,r9
-+      crand   1,1,4
-+      bgt-    stack_ovf               /* if r11 < r1 < r11+THREAD */
-+#else /* CONFIG_ADEOS_CORE */
-       cmplw   r1,r11                  /* if r1 <= current->thread_info */
-       ble-    stack_ovf               /* then the kernel stack overflowed */
-+#endif /* CONFIG_ADEOS_CORE */
- 3:
-       mflr    r9
-       lwz     r11,0(r9)               /* virtual address of handler */
-@@ -195,6 +210,21 @@ _GLOBAL(DoSyscall)
-       lwz     r11,_CCR(r1)    /* Clear SO bit in CR */
-       rlwinm  r11,r11,0,4,2
-       stw     r11,_CCR(r1)
-+#ifdef CONFIG_ADEOS_CORE
-+      addi    r3,r1,GPR0
-+      bl      __adeos_enter_syscall
-+      cmpwi   r3,0
-+      lwz     r3,GPR3(r1)
-+      lwz     r0,GPR0(r1)
-+      lwz     r4,GPR4(r1)
-+      lwz     r5,GPR5(r1)
-+      lwz     r6,GPR6(r1)
-+      lwz     r7,GPR7(r1)
-+      lwz     r8,GPR8(r1)
-+      lwz     r9,GPR9(r1)
-+      bgt     .adeos_end_syscall
-+      blt     ret_from_syscall
-+#endif /* CONFIG_ADEOS_CORE */
- #ifdef SHOW_SYSCALLS
-       bl      do_show_syscall
- #endif /* SHOW_SYSCALLS */
-@@ -215,6 +245,13 @@ syscall_dotrace_cont:
-       mtlr    r10
-       addi    r9,r1,STACK_FRAME_OVERHEAD
-       blrl                    /* Call handler */
-+#ifdef CONFIG_ADEOS_CORE
-+      stw     r3,RESULT(r1)   /* Save result */
-+      bl      __adeos_exit_syscall
-+      cmpwi   r3,0
-+      lwz     r3,RESULT(r1)
-+      bne-    syscall_exit_cont
-+#endif /* CONFIG_ADEOS_CORE */
-       .globl  ret_from_syscall
- ret_from_syscall:
- #ifdef SHOW_SYSCALLS
-@@ -262,6 +299,14 @@ syscall_exit_cont:
-       SYNC
-       RFI
- 
-+#ifdef CONFIG_ADEOS_CORE
-+.adeos_end_syscall:
-+      LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
-+      SYNC
-+      MTMSRD(r10)
-+      b syscall_exit_cont
-+#endif /* CONFIG_ADEOS_CORE */
-+
- 66:   li      r3,-ENOSYS
-       b       ret_from_syscall
- 
-@@ -586,6 +631,12 @@ ret_from_except:
-       SYNC                    /* Some chip revs have problems here... */
-       MTMSRD(r10)             /* disable interrupts */
- 
-+#ifdef CONFIG_ADEOS_CORE
-+        bl __adeos_check_root
-+        cmpwi   r3, 0
-+        beq- restore
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       lwz     r3,_MSR(r1)     /* Returning to user mode? */
-       andi.   r0,r3,MSR_PR
-       beq     resume_kernel
-@@ -1024,3 +1075,119 @@ machine_check_in_rtas:
-       /* XXX load up BATs and panic */
- 
- #endif /* CONFIG_PPC_OF */
-+
-+#ifdef CONFIG_ADEOS_CORE
-+
-+_GLOBAL(__adeos_ret_from_except)
-+        cmpwi   r3, 0
-+        bne+ ret_from_except
-+        b restore
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+
-+/*
-+ * r3 = adp_next, r4 = adp_cpu_current[adeos_processor_id()].
-+ * NOTE: This code is _not_ SMP-compliant. Always called with hw
-+ * interrupts off.
-+ */
-+_GLOBAL(__adeos_switch_domain)
-+
-+      stwu    r1,-108-STACK_FRAME_OVERHEAD(r1)
-+
-+      /* Save general purpose registers. */
-+      stw     r31,STACK_FRAME_OVERHEAD+0*4(r1)
-+      stw     r30,STACK_FRAME_OVERHEAD+1*4(r1)
-+      stw     r29,STACK_FRAME_OVERHEAD+2*4(r1)
-+      stw     r28,STACK_FRAME_OVERHEAD+3*4(r1)
-+      stw     r27,STACK_FRAME_OVERHEAD+4*4(r1)
-+      stw     r26,STACK_FRAME_OVERHEAD+5*4(r1)
-+      stw     r25,STACK_FRAME_OVERHEAD+6*4(r1)
-+      stw     r24,STACK_FRAME_OVERHEAD+7*4(r1)
-+      stw     r23,STACK_FRAME_OVERHEAD+8*4(r1)
-+      stw     r22,STACK_FRAME_OVERHEAD+9*4(r1)
-+      stw     r21,STACK_FRAME_OVERHEAD+10*4(r1)
-+      stw     r20,STACK_FRAME_OVERHEAD+11*4(r1)
-+      stw     r19,STACK_FRAME_OVERHEAD+12*4(r1)
-+      stw     r18,STACK_FRAME_OVERHEAD+13*4(r1)
-+      stw     r17,STACK_FRAME_OVERHEAD+14*4(r1)
-+      stw     r16,STACK_FRAME_OVERHEAD+15*4(r1)
-+      stw     r15,STACK_FRAME_OVERHEAD+16*4(r1)
-+      stw     r14,STACK_FRAME_OVERHEAD+17*4(r1)
-+      stw     r13,STACK_FRAME_OVERHEAD+18*4(r1)
-+      stw      r3,STACK_FRAME_OVERHEAD+19*4(r1)
-+      stw      r2,STACK_FRAME_OVERHEAD+20*4(r1)
-+      stw      r0,STACK_FRAME_OVERHEAD+21*4(r1)
-+
-+      /* Save special registers. */
-+      mfctr    r2
-+      stw      r2,STACK_FRAME_OVERHEAD+22*4(r1)
-+      mfcr     r2
-+      stw      r2,STACK_FRAME_OVERHEAD+23*4(r1)
-+      mfxer    r2
-+      stw      r2,STACK_FRAME_OVERHEAD+24*4(r1)
-+      mflr     r2
-+      stw      r2,STACK_FRAME_OVERHEAD+25*4(r1)
-+      mfmsr    r2
-+      stw      r2,STACK_FRAME_OVERHEAD+26*4(r1)
-+
-+      /* Actual switch block. */
-+      lwz      r2,0(r4)       /* r2 = old_adp = adp_cpu_current[cpuid] */
-+      stw      r1,0(r2)       /* old_adp->esp[0] = sp */
-+      stw      r3,0(r4)       /* adp_cpu_current[cpuid] = new_adp */
-+      /* CONFIG_SMP should sync here; but first, accesses to esp[]
-+      would require cpuid-indexing. */
-+      lwz      r1,0(r3)       /* sp = new_adp->esp[0] */
-+
-+      /* Restore special registers. */
-+      lwz      r2,STACK_FRAME_OVERHEAD+26*4(r1)
-+      mtmsr    r2
-+      lwz      r2,STACK_FRAME_OVERHEAD+25*4(r1)
-+      mtlr     r2
-+      lwz      r2,STACK_FRAME_OVERHEAD+24*4(r1)
-+      mtxer    r2
-+      lwz      r2,STACK_FRAME_OVERHEAD+23*4(r1)
-+      mtcr     r2
-+      lwz      r2,STACK_FRAME_OVERHEAD+22*4(r1)
-+      mtctr    r2
-+
-+      /* Restore general purpose registers. */
-+      lwz      r0,STACK_FRAME_OVERHEAD+21*4(r1)
-+      lwz      r2,STACK_FRAME_OVERHEAD+20*4(r1)
-+      lwz      r3,STACK_FRAME_OVERHEAD+19*4(r1)
-+      lwz     r13,STACK_FRAME_OVERHEAD+18*4(r1)
-+      lwz     r14,STACK_FRAME_OVERHEAD+17*4(r1)
-+      lwz     r15,STACK_FRAME_OVERHEAD+16*4(r1)
-+      lwz     r16,STACK_FRAME_OVERHEAD+15*4(r1)
-+      lwz     r17,STACK_FRAME_OVERHEAD+14*4(r1)
-+      lwz     r18,STACK_FRAME_OVERHEAD+13*4(r1)
-+      lwz     r19,STACK_FRAME_OVERHEAD+12*4(r1)
-+      lwz     r20,STACK_FRAME_OVERHEAD+11*4(r1)
-+      lwz     r21,STACK_FRAME_OVERHEAD+10*4(r1)
-+      lwz     r22,STACK_FRAME_OVERHEAD+9*4(r1)
-+      lwz     r23,STACK_FRAME_OVERHEAD+8*4(r1)
-+      lwz     r24,STACK_FRAME_OVERHEAD+7*4(r1)
-+      lwz     r25,STACK_FRAME_OVERHEAD+6*4(r1)
-+      lwz     r26,STACK_FRAME_OVERHEAD+5*4(r1)
-+      lwz     r27,STACK_FRAME_OVERHEAD+4*4(r1)
-+      lwz     r28,STACK_FRAME_OVERHEAD+3*4(r1)
-+      lwz     r29,STACK_FRAME_OVERHEAD+2*4(r1)
-+      lwz     r30,STACK_FRAME_OVERHEAD+1*4(r1)
-+      lwz     r31,STACK_FRAME_OVERHEAD+0*4(r1)
-+
-+      addi    r1,r1,108+STACK_FRAME_OVERHEAD
-+
-+      blr
-+
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+/* Returns the current threadinfo pointer in a way which is
-+   insensitive to the underlying stack, by directly reading the
-+   special purpose register #3. */
-+      
-+_GLOBAL(__adeos_current_threadinfo)
-+      mfspr   r3,SPRG3
-+      addi    r3,r3,-THREAD
-+      tovirt(r3,r3)
-+      blr
-+      
-+#endif /* CONFIG_ADEOS_CORE */
-diff -uNrp linux-2.6.10/arch/ppc/kernel/head.S 
linux-2.6.10-ppc-adeos/arch/ppc/kernel/head.S
---- linux-2.6.10/arch/ppc/kernel/head.S        2004-12-24 22:34:58.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/kernel/head.S      2005-03-27 
16:35:19.000000000 +0200
-@@ -339,6 +339,12 @@ i##n:                                                     
        \
-       EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
-                         ret_from_except)
- 
-+#ifdef CONFIG_ADEOS_CORE
-+#define EXC_XFER_ADEOS_LITE(n, hdlr)          \
-+      EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
-+                        __adeos_ret_from_except)
-+#endif /* CONFIG_ADEOS_CORE */
-+
- #define EXC_XFER_EE(n, hdlr)          \
-       EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
-                         ret_from_except_full)
-@@ -445,7 +451,11 @@ InstructionAccess:
- #endif /* CONFIG_PPC64BRIDGE */
- 
- /* External interrupt */
-+#ifdef CONFIG_ADEOS_CORE
-+      EXCEPTION(0x500, HardwareInterrupt, __adeos_grab_irq, 
EXC_XFER_ADEOS_LITE)
-+#else /* !CONFIG_ADEOS_CORE */
-       EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
-+#endif /* CONFIG_ADEOS_CORE */
- 
- /* Alignment exception */
-       . = 0x600
-@@ -470,7 +480,11 @@ FPUnavailable:
-       EXC_XFER_EE_LITE(0x800, KernelFP)
- 
- /* Decrementer */
-+#ifdef CONFIG_ADEOS_CORE
-+      EXCEPTION(0x900, Decrementer, __adeos_grab_timer, EXC_XFER_ADEOS_LITE)
-+#else /* !CONFIG_ADEOS_CORE */
-       EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
-+#endif /* CONFIG_ADEOS_CORE */
- 
-       EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE)
-diff -uNrp linux-2.6.10/arch/ppc/kernel/head_44x.S 
linux-2.6.10-ppc-adeos/arch/ppc/kernel/head_44x.S
---- linux-2.6.10/arch/ppc/kernel/head_44x.S    2004-12-24 22:34:29.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/kernel/head_44x.S  2005-03-27 
16:35:19.000000000 +0200
-@@ -421,7 +421,11 @@ interrupt_base:
-       EXC_XFER_EE_LITE(0x0400, handle_page_fault)
- 
-       /* External Input Interrupt */
-+#ifdef CONFIG_ADEOS_CORE
-+      EXCEPTION(0x0500, ExternalInput, __adeos_grab_irq, EXC_XFER_ADEOS_LITE)
-+#else /* !CONFIG_ADEOS_CORE */
-       EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
-+#endif /* CONFIG_ADEOS_CORE */
- 
-       /* Alignment Interrupt */
-       START_EXCEPTION(Alignment)
-@@ -456,7 +460,11 @@ interrupt_base:
-       lis     r0,[EMAIL PROTECTED]            /* Setup the DEC interrupt mask 
*/
-       mtspr   SPRN_TSR,r0             /* Clear the DEC interrupt */
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-+#ifdef CONFIG_ADEOS_CORE
-+      EXC_XFER_ADEOS_LITE(0x1000, __adeos_grab_timer)
-+#else /* !CONFIG_ADEOS_CORE */
-       EXC_XFER_LITE(0x1000, timer_interrupt)
-+#endif /* CONFIG_ADEOS_CORE */
- 
-       /* Fixed Internal Timer Interrupt */
-       /* TODO: Add FIT support */
-diff -uNrp linux-2.6.10/arch/ppc/kernel/head_4xx.S 
linux-2.6.10-ppc-adeos/arch/ppc/kernel/head_4xx.S
---- linux-2.6.10/arch/ppc/kernel/head_4xx.S    2004-12-24 22:35:39.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/kernel/head_4xx.S  2005-03-27 
16:35:19.000000000 +0200
-@@ -272,7 +272,13 @@ label:
-       EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
-                         ret_from_except)
- 
--#define EXC_XFER_EE(n, hdlr)          \
-+#ifdef CONFIG_ADEOS_CORE
-+#define EXC_XFER_ADEOS_LITE(n, hdlr)          \
-+      EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
-+                        __adeos_ret_from_except)
-+#endif /* CONFIG_ADEOS_CORE */
-+
-+      #define EXC_XFER_EE(n, hdlr)            \
-       EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, 
transfer_to_handler_full, \
-                         ret_from_except_full)
- 
-@@ -280,7 +286,6 @@ label:
-       EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
-                         ret_from_except)
- 
--
- /*
-  * 0x0100 - Critical Interrupt Exception
-  */
-@@ -435,7 +440,11 @@ label:
-       EXC_XFER_EE_LITE(0x400, handle_page_fault)
- 
- /* 0x0500 - External Interrupt Exception */
-+#ifdef CONFIG_ADEOS_CORE
-+      EXCEPTION(0x0500, HardwareInterrupt, __adeos_grab_irq, 
EXC_XFER_ADEOS_LITE)
-+#else /* !CONFIG_ADEOS_CORE */
-       EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
-+#endif /* CONFIG_ADEOS_CORE */
- 
- /* 0x0600 - Alignment Exception */
-       START_EXCEPTION(0x0600, Alignment)
-@@ -473,7 +482,11 @@ label:
-       lis     r0,[EMAIL PROTECTED]
-       mtspr   SPRN_TSR,r0             /* Clear the PIT exception */
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-+#ifdef CONFIG_ADEOS_CORE
-+      EXC_XFER_ADEOS_LITE(0x1000, __adeos_grab_timer)
-+#else /* !CONFIG_ADEOS_CORE */
-       EXC_XFER_LITE(0x1000, timer_interrupt)
-+#endif /* CONFIG_ADEOS_CORE */
- 
- #if 0
- /* NOTE:
-diff -uNrp linux-2.6.10/arch/ppc/kernel/head_8xx.S 
linux-2.6.10-ppc-adeos/arch/ppc/kernel/head_8xx.S
---- linux-2.6.10/arch/ppc/kernel/head_8xx.S    2004-12-24 22:34:44.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/kernel/head_8xx.S  2005-03-27 
16:35:19.000000000 +0200
-@@ -194,7 +194,13 @@ i##n:                                                     
        \
-       EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
-                         ret_from_except)
- 
--#define EXC_XFER_EE(n, hdlr)          \
-+#ifdef CONFIG_ADEOS_CORE
-+#define EXC_XFER_ADEOS_LITE(n, hdlr)          \
-+      EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
-+                        __adeos_ret_from_except)
-+#endif /* CONFIG_ADEOS_CORE */
-+
-+      #define EXC_XFER_EE(n, hdlr)            \
-       EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
-                         ret_from_except_full)
- 
-@@ -241,7 +247,11 @@ InstructionAccess:
-       EXC_XFER_EE_LITE(0x400, handle_page_fault)
- 
- /* External interrupt */
-+#ifdef CONFIG_ADEOS_CORE
-+      EXCEPTION(0x500, HardwareInterrupt, __adeos_grab_irq, 
EXC_XFER_ADEOS_LITE)
-+#else /* !CONFIG_ADEOS_CORE */
-       EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
-+#endif /* CONFIG_ADEOS_CORE */
- 
- /* Alignment exception */
-       . = 0x600
-@@ -262,7 +272,11 @@ Alignment:
-       EXCEPTION(0x800, FPUnavailable, UnknownException, EXC_XFER_STD)
- 
- /* Decrementer */
-+#ifdef CONFIG_ADEOS_CORE
-+      EXCEPTION(0x900, Decrementer, __adeos_grab_timer, EXC_XFER_ADEOS_LITE)
-+#else /* !CONFIG_ADEOS_CORE */
-       EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
-+#endif /* CONFIG_ADEOS_CORE */
- 
-       EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE)
-       EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE)
-diff -uNrp linux-2.6.10/arch/ppc/kernel/head_booke.h 
linux-2.6.10-ppc-adeos/arch/ppc/kernel/head_booke.h
---- linux-2.6.10/arch/ppc/kernel/head_booke.h  2004-12-24 22:33:51.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/kernel/head_booke.h        2005-03-27 
16:35:19.000000000 +0200
-@@ -228,6 +228,12 @@ label:
-       EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
-                         ret_from_except)
- 
-+#ifdef CONFIG_ADEOS_CORE
-+#define EXC_XFER_ADEOS_LITE(n, hdlr)          \
-+      EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
-+                        __adeos_ret_from_except)
-+#endif /* CONFIG_ADEOS_CORE */
-+
- #define EXC_XFER_EE(n, hdlr)          \
-       EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, 
transfer_to_handler_full, \
-                         ret_from_except_full)
-diff -uNrp linux-2.6.10/arch/ppc/kernel/head_e500.S 
linux-2.6.10-ppc-adeos/arch/ppc/kernel/head_e500.S
---- linux-2.6.10/arch/ppc/kernel/head_e500.S   2004-12-24 22:35:23.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/kernel/head_e500.S 2005-03-27 
16:35:19.000000000 +0200
-@@ -473,7 +473,11 @@ interrupt_base:
-       EXC_XFER_EE_LITE(0x0400, handle_page_fault)
- 
-       /* External Input Interrupt */
-+#ifdef CONFIG_ADEOS_CORE
-+      EXCEPTION(0x0500, ExternalInput, __adeos_grab_irq, EXC_XFER_ADEOS_LITE)
-+#else /* !CONFIG_ADEOS_CORE */
-       EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
-+#endif /* CONFIG_ADEOS_CORE */
- 
-       /* Alignment Interrupt */
-       START_EXCEPTION(Alignment)
-@@ -508,7 +512,11 @@ interrupt_base:
-       lis     r0,[EMAIL PROTECTED]            /* Setup the DEC interrupt mask 
*/
-       mtspr   SPRN_TSR,r0             /* Clear the DEC interrupt */
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-+#ifdef CONFIG_ADEOS_CORE
-+      EXC_XFER_ADEOS_LITE(0x0900, __adeos_grab_timer)
-+#else /* !CONFIG_ADEOS_CORE */
-       EXC_XFER_LITE(0x0900, timer_interrupt)
-+#endif /* CONFIG_ADEOS_CORE */
- 
-       /* Fixed Internal Timer Interrupt */
-       /* TODO: Add FIT support */
-diff -uNrp linux-2.6.10/arch/ppc/kernel/idle.c 
linux-2.6.10-ppc-adeos/arch/ppc/kernel/idle.c
---- linux-2.6.10/arch/ppc/kernel/idle.c        2004-12-24 22:33:49.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/kernel/idle.c      2005-07-01 
19:19:23.000000000 +0200
-@@ -59,11 +59,16 @@ void default_idle(void)
-  */
- int cpu_idle(void)
- {
--      for (;;)
-+      for (;;) {
-+#ifdef CONFIG_ADEOS_CORE
-+              adeos_suspend_domain();
-+#endif /* CONFIG_ADEOS_CORE */
-+
-               if (ppc_md.idle != NULL)
-                       ppc_md.idle();
-               else
-                       default_idle();
-+      }
-       return 0;
- }
- 
-diff -uNrp linux-2.6.10/arch/ppc/kernel/ppc_ksyms.c 
linux-2.6.10-ppc-adeos/arch/ppc/kernel/ppc_ksyms.c
---- linux-2.6.10/arch/ppc/kernel/ppc_ksyms.c   2004-12-24 22:35:28.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/kernel/ppc_ksyms.c 2005-03-27 
16:35:19.000000000 +0200
-@@ -346,3 +346,28 @@ EXPORT_SYMBOL(agp_special_page);
- EXPORT_SYMBOL(__mtdcr);
- EXPORT_SYMBOL(__mfdcr);
- #endif
-+
-+#ifdef CONFIG_ADEOS_CORE
-+/* The following are per-platform convenience exports which are needed
-+   by some Adeos domains loaded as kernel modules. */
-+EXPORT_SYMBOL(__switch_to);
-+void show_stack(struct task_struct *task,
-+              unsigned long *esp);
-+EXPORT_SYMBOL(show_stack);
-+void atomic_set_mask(unsigned long mask,
-+                   unsigned long *ptr);
-+EXPORT_SYMBOL(atomic_set_mask);
-+void atomic_clear_mask(unsigned long mask,
-+                     unsigned long *ptr);
-+EXPORT_SYMBOL(atomic_clear_mask);
-+extern unsigned long context_map[];
-+EXPORT_SYMBOL(context_map);
-+EXPORT_SYMBOL(_switch);
-+#ifdef FEW_CONTEXTS
-+EXPORT_SYMBOL(nr_free_contexts);
-+EXPORT_SYMBOL(context_mm);
-+EXPORT_SYMBOL(steal_context);
-+#endif
-+extern struct task_struct *last_task_used_math;
-+EXPORT_SYMBOL(last_task_used_math);
-+#endif /* CONFIG_ADEOS_CORE */
-diff -uNrp linux-2.6.10/arch/ppc/kernel/traps.c 
linux-2.6.10-ppc-adeos/arch/ppc/kernel/traps.c
---- linux-2.6.10/arch/ppc/kernel/traps.c       2004-12-24 22:34:26.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/kernel/traps.c     2005-05-15 
11:10:33.000000000 +0200
-@@ -78,6 +78,12 @@ void die(const char * str, struct pt_reg
- {
-       static int die_counter;
-       int nl = 0;
-+
-+#ifdef CONFIG_ADEOS_CORE
-+      /* lets us see Oopses from other domains, too */
-+      if (adp_current != adp_root)
-+              adeos_set_printk_sync(adp_current);
-+#endif /* CONFIG_ADEOS_CORE */
-       console_verbose();
-       spin_lock_irq(&die_lock);
- #ifdef CONFIG_PMAC_BACKLIGHT
-@@ -199,10 +205,22 @@ static inline int check_io_access(struct
- #define clear_single_step(regs)       ((regs)->msr &= ~MSR_SE)
- #endif
- 
-+#ifdef CONFIG_ADEOS_CORE
-+static inline int __adeos_pipeline_trap(int trap, struct pt_regs *regs)
-+{
-+    return __adeos_event_monitors[trap] > 0 ? __adeos_handle_event(trap,regs) 
: 0;
-+}
-+#endif /* CONFIG_ADEOS_CORE */
-+
- void MachineCheckException(struct pt_regs *regs)
- {
-       unsigned long reason = get_mc_reason(regs);
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_MCE_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       if (user_mode(regs)) {
-               regs->msr |= MSR_RI;
-               _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
-@@ -338,6 +356,10 @@ void SMIException(struct pt_regs *regs)
- 
- void UnknownException(struct pt_regs *regs)
- {
-+#ifdef CONFIG_ADEOS_CORE
-+       if (__adeos_pipeline_trap(ADEOS_UNKNOWN_TRAP,regs))
-+         return;
-+#endif /* CONFIG_ADEOS_CORE */
-       printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
-              regs->nip, regs->msr, regs->trap, print_tainted());
-       _exception(SIGTRAP, regs, 0, 0);
-@@ -345,6 +367,10 @@ void UnknownException(struct pt_regs *re
- 
- void InstructionBreakpoint(struct pt_regs *regs)
- {
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_IABR_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-       if (debugger_iabr_match(regs))
-               return;
-       _exception(SIGTRAP, regs, TRAP_BRKPT, 0);
-@@ -352,6 +378,10 @@ void InstructionBreakpoint(struct pt_reg
- 
- void RunModeException(struct pt_regs *regs)
- {
-+#ifdef CONFIG_ADEOS_CORE
-+        if (__adeos_pipeline_trap(ADEOS_RM_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-       _exception(SIGTRAP, regs, 0, 0);
- }
- 
-@@ -421,6 +451,10 @@ static void emulate_single_step(struct p
- {
-       if (single_stepping(regs)) {
-               clear_single_step(regs);
-+#ifdef CONFIG_ADEOS_CORE
-+              if (__adeos_pipeline_trap(ADEOS_SSTEP_TRAP,regs))
-+                  return;
-+#endif /* CONFIG_ADEOS_CORE */
-               _exception(SIGTRAP, regs, TRAP_TRACE, 0);
-       }
- }
-@@ -490,6 +524,11 @@ void ProgramCheckException(struct pt_reg
-       unsigned int reason = get_reason(regs);
-       extern int do_mathemu(struct pt_regs *regs);
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_PCE_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-+
- #ifdef CONFIG_MATH_EMULATION
-       /* (reason & REASON_ILLEGAL) would be the obvious thing here,
-        * but there seems to be a hardware bug on the 405GP (RevD)
-@@ -567,6 +606,10 @@ void ProgramCheckException(struct pt_reg
- void SingleStepException(struct pt_regs *regs)
- {
-       regs->msr &= ~MSR_SE;  /* Turn off 'trace' bit */
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_SSTEP_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-       if (debugger_sstep(regs))
-               return;
-       _exception(SIGTRAP, regs, TRAP_TRACE, 0);
-@@ -581,6 +624,12 @@ void AlignmentException(struct pt_regs *
-               regs->nip += 4; /* skip over emulated instruction */
-               return;
-       }
-+#ifdef CONFIG_ADEOS_CORE
-+      /* Assume that fixing alignment can always be done regardless
-+         of the current domain. */
-+      if (__adeos_pipeline_trap(ADEOS_ALIGNMENT_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-       if (fixed == -EFAULT) {
-               /* fixed == -EFAULT means the operand address was bad */
-               if (user_mode(regs))
-@@ -603,6 +652,10 @@ void StackOverflow(struct pt_regs *regs)
- 
- void nonrecoverable_exception(struct pt_regs *regs)
- {
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_NREC_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-       printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
-              regs->nip, regs->msr);
-       debugger(regs);
-@@ -623,6 +676,11 @@ void SoftwareEmulation(struct pt_regs *r
-       extern int Soft_emulate_8xx(struct pt_regs *);
-       int errcode;
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_SOFTEMU_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       CHECK_FULL_REGS(regs);
- 
-       if (!user_mode(regs)) {
-@@ -651,6 +709,10 @@ void SoftwareEmulation(struct pt_regs *r
- 
- void DebugException(struct pt_regs *regs, unsigned long debug_status)
- {
-+#ifdef CONFIG_ADEOS_CORE
-+        if (__adeos_pipeline_trap(ADEOS_DEBUG_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-       if (debug_status & DBSR_IC) {   /* instruction completion */
-               regs->msr &= ~MSR_DE;
-               if (user_mode(regs)) {
-@@ -680,6 +742,11 @@ void AltivecUnavailException(struct pt_r
- {
-       static int kernel_altivec_count;
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_ALTUNAVAIL_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-+
- #ifndef CONFIG_ALTIVEC
-       if (user_mode(regs)) {
-               /* A user program has executed an altivec instruction,
-@@ -701,6 +768,11 @@ void AltivecAssistException(struct pt_re
- {
-       int err;
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_ALTASSIST_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       preempt_disable();
-       if (regs->msr & MSR_VEC)
-               giveup_altivec(current);
-@@ -747,6 +819,11 @@ void SPEFloatingPointException(struct pt
-       int fpexc_mode;
-       int code = 0;
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_SPE_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       spefscr = current->thread.spefscr;
-       fpexc_mode = current->thread.fpexc_mode;
- 
-diff -uNrp linux-2.6.10/arch/ppc/mm/fault.c 
linux-2.6.10-ppc-adeos/arch/ppc/mm/fault.c
---- linux-2.6.10/arch/ppc/mm/fault.c   2004-12-24 22:34:29.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/mm/fault.c 2005-03-27 16:35:19.000000000 
+0200
-@@ -116,6 +116,12 @@ int do_page_fault(struct pt_regs *regs, 
-               is_write = error_code & 0x02000000;
- #endif /* CONFIG_4xx || CONFIG_BOOKE */
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_event_monitors[ADEOS_ACCESS_TRAP] > 0 &&
-+          __adeos_handle_event(ADEOS_ACCESS_TRAP,regs) != 0)
-+          return 0;
-+#endif /* CONFIG_ADEOS_CORE */
-+
- #if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
-       if (debugger_fault_handler && TRAP(regs) == 0x300) {
-               debugger_fault_handler(regs);
-diff -uNrp linux-2.6.10/arch/ppc/platforms/pmac_pic.c 
linux-2.6.10-ppc-adeos/arch/ppc/platforms/pmac_pic.c
---- linux-2.6.10/arch/ppc/platforms/pmac_pic.c 2004-12-24 22:35:28.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/arch/ppc/platforms/pmac_pic.c       2005-05-25 
23:42:45.000000000 +0200
-@@ -387,7 +387,12 @@ static irqreturn_t k2u3_action(int cpl, 
-       return IRQ_HANDLED;
- }
- 
-+#ifdef CONFIG_ADEOS_CORE
-+/* this is used in kernel/adeos.c adeos_acknowledge_irq */
-+struct irqaction k2u3_cascade_action = {
-+#else /* !CONFIG_ADEOS_CORE */
- static struct irqaction k2u3_cascade_action = {
-+#endif /* CONFIG_ADEOS_CORE */
-       .handler        = k2u3_action,
-       .flags          = 0,
-       .mask           = CPU_MASK_NONE,
-diff -uNrp linux-2.6.10/include/asm-ppc/adeos.h 
linux-2.6.10-ppc-adeos/include/asm-ppc/adeos.h
---- linux-2.6.10/include/asm-ppc/adeos.h       1970-01-01 01:00:00.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/include/asm-ppc/adeos.h     2005-09-11 
22:10:23.000000000 +0200
-@@ -0,0 +1,451 @@
-+/*
-+ *   include/asm-ppc/adeos.h
-+ *
-+ *   Copyright (C) 2004 Philippe Gerum.
-+ *
-+ *   Adeos/PPC port over 2.6 based on the previous 2.4 implementation by:
-+ *
-+ *   Copyright (C) 2004 Wolfgang Grandegger.
-+ *
-+ *   It follows closely the ARM and x86 ports of ADEOS.
-+ *
-+ *   Copyright (C) 2002 Philippe Gerum.
-+ *
-+ *   This program is free software; you can redistribute it and/or modify
-+ *   it under the terms of the GNU General Public License as published by
-+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ *   USA; either version 2 of the License, or (at your option) any later
-+ *   version.
-+ *
-+ *   This program is distributed in the hope that it will be useful,
-+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *   GNU General Public License for more details.
-+ *
-+ *   You should have received a copy of the GNU General Public License
-+ *   along with this program; if not, write to the Free Software
-+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, 
USA.
-+ */
-+
-+#ifndef __PPC_ADEOS_H
-+#define __PPC_ADEOS_H
-+
-+#include <asm/ptrace.h>
-+#include <asm/irq.h>
-+#include <asm/bitops.h>
-+#include <linux/list.h>
-+#include <linux/cpumask.h>
-+#include <linux/threads.h>
-+
-+#define ADEOS_ARCH_STRING     "r8c4/ppc"
-+#define ADEOS_MAJOR_NUMBER    8
-+#define ADEOS_MINOR_NUMBER    4
-+
-+#define ADEOS_IRQ_ACKED               0x1000
-+#define ADEOS_IRQ_ACKED_MASK  (ADEOS_IRQ_ACKED - 1)
-+
-+#ifdef CONFIG_SMP
-+
-+#error "Adeos/ppc: SMP not yet implemented"
-+
-+#define ADEOS_NR_CPUS          NR_CPUS
-+#define ADEOS_CRITICAL_IPI     0
-+
-+#define adeos_processor_id()   (__adeos_current_threadinfo()->cpu)
-+
-+#define adeos_declare_cpuid    int cpuid
-+#define adeos_load_cpuid()     do { \
-+                                  (cpuid) = adeos_processor_id();     \
-+                               } while(0)
-+#define adeos_lock_cpu(flags)  do { \
-+                                  adeos_hw_local_irq_save(flags); \
-+                                  (cpuid) = adeos_processor_id(); \
-+                               } while(0)
-+#define adeos_unlock_cpu(flags) adeos_hw_local_irq_restore(flags)
-+#define adeos_get_cpu(flags)    adeos_lock_cpu(flags)
-+#define adeos_put_cpu(flags)    adeos_unlock_cpu(flags)
-+#define adp_current             (adp_cpu_current[adeos_processor_id()])
-+
-+#else  /* !CONFIG_SMP */
-+
-+#define ADEOS_NR_CPUS          1
-+#define adeos_processor_id()   0
-+/* Array references using this index should be optimized out. */
-+#define adeos_declare_cpuid    const int cpuid = 0
-+#define adeos_load_cpuid()      /* nop */
-+#define adeos_lock_cpu(flags)   adeos_hw_local_irq_save(flags)
-+#define adeos_unlock_cpu(flags) adeos_hw_local_irq_restore(flags)
-+#define adeos_get_cpu(flags)    do { flags = flags; } while(0)
-+#define adeos_put_cpu(flags)    /* nop */
-+#define adp_current             (adp_cpu_current[0])
-+
-+#endif /* CONFIG_SMP */
-+
-+ /* PPC traps */
-+#define ADEOS_ACCESS_TRAP     0       /* Data or instruction access exception 
*/
-+#define ADEOS_ALIGNMENT_TRAP  1       /* Alignment exception */
-+#define ADEOS_ALTUNAVAIL_TRAP 2       /* Altivec unavailable */
-+#define ADEOS_PCE_TRAP        3       /* Program check exception */
-+#define ADEOS_MCE_TRAP        4       /* Machine check exception */
-+#define ADEOS_UNKNOWN_TRAP    5       /* Unknown exception */
-+#define ADEOS_IABR_TRAP       6       /* Instruction breakpoint */
-+#define ADEOS_RM_TRAP         7       /* Run mode exception */
-+#define ADEOS_SSTEP_TRAP      8       /* Single-step exception  */
-+#define ADEOS_NREC_TRAP       9       /* Non-recoverable exception  */
-+#define ADEOS_SOFTEMU_TRAP    10 /* Software emulation  */
-+#define ADEOS_DEBUG_TRAP      11 /* Debug exception  */
-+#define ADEOS_SPE_TRAP        12 /* SPE exception  */
-+#define ADEOS_ALTASSIST_TRAP  13 /* Altivec assist exception */
-+#define ADEOS_NR_FAULTS       14
-+/* Pseudo-vectors used for kernel events */
-+#define ADEOS_FIRST_KEVENT      ADEOS_NR_FAULTS
-+#define ADEOS_SYSCALL_PROLOGUE  (ADEOS_FIRST_KEVENT)
-+#define ADEOS_SYSCALL_EPILOGUE  (ADEOS_FIRST_KEVENT + 1)
-+#define ADEOS_SCHEDULE_HEAD     (ADEOS_FIRST_KEVENT + 2)
-+#define ADEOS_SCHEDULE_TAIL     (ADEOS_FIRST_KEVENT + 3)
-+#define ADEOS_ENTER_PROCESS     (ADEOS_FIRST_KEVENT + 4)
-+#define ADEOS_EXIT_PROCESS      (ADEOS_FIRST_KEVENT + 5)
-+#define ADEOS_SIGNAL_PROCESS    (ADEOS_FIRST_KEVENT + 6)
-+#define ADEOS_KICK_PROCESS      (ADEOS_FIRST_KEVENT + 7)
-+#define ADEOS_RENICE_PROCESS    (ADEOS_FIRST_KEVENT + 8)
-+#define ADEOS_USER_EVENT        (ADEOS_FIRST_KEVENT + 9)
-+#define ADEOS_LAST_KEVENT       (ADEOS_USER_EVENT)
-+
-+#define ADEOS_NR_EVENTS         (ADEOS_LAST_KEVENT + 1)
-+
-+typedef struct adevinfo {
-+
-+    unsigned domid;
-+    unsigned event;
-+    void *evdata;
-+
-+    volatile int propagate;   /* Private */
-+
-+} adevinfo_t;
-+
-+typedef struct adsysinfo {
-+
-+    int ncpus;                        /* Number of CPUs on board */
-+
-+    u64 cpufreq;              /* CPU frequency (in Hz) */
-+
-+    /* Arch-dependent block */
-+
-+    struct {
-+      unsigned tmirq;         /* Decrementer virtual IRQ */
-+      u64 tmfreq;             /* Timebase frequency */
-+    } archdep;
-+
-+} adsysinfo_t;
-+
-+#define IPIPE_NR_XIRQS   NR_IRQS
-+/* Number of virtual IRQs */
-+#define IPIPE_NR_VIRQS   BITS_PER_LONG
-+/* First virtual IRQ # */
-+#define IPIPE_VIRQ_BASE  (((IPIPE_NR_XIRQS + BITS_PER_LONG - 1) / 
BITS_PER_LONG) * BITS_PER_LONG)
-+/* Total number of IRQ slots */
-+#define IPIPE_NR_IRQS     (IPIPE_VIRQ_BASE + IPIPE_NR_VIRQS)
-+/* Number of indirect words needed to map the whole IRQ space. */
-+#define IPIPE_IRQ_IWORDS  ((IPIPE_NR_IRQS + BITS_PER_LONG - 1) / 
BITS_PER_LONG)
-+#define IPIPE_IRQ_IMASK   (BITS_PER_LONG - 1)
-+#define IPIPE_IRQ_ISHIFT  5   /* 2^5 for 32bits arch. */
-+
-+#define IPIPE_IRQMASK_ANY   (~0L)
-+#define IPIPE_IRQMASK_VIRT  (IPIPE_IRQMASK_ANY << (IPIPE_VIRQ_BASE / 
BITS_PER_LONG))
-+
-+/* The first virtual interrupt is reserved for the timer (see
-+   __adeos_init_platform). */
-+#define ADEOS_TIMER_VIRQ    IPIPE_VIRQ_BASE
-+
-+typedef struct adomain {
-+
-+    /* -- Section: offset-based references are made on these fields
-+       from inline assembly code. Please don't move or reorder. */
-+#ifdef CONFIG_ADEOS_THREADS
-+    unsigned long esp[ADEOS_NR_CPUS]; /* Domain stack pointers */
-+#endif /* CONFIG_ADEOS_THREADS */
-+    void (*dswitch)(void);    /* Domain switch hook */
-+    /* -- End of section. */
-+
-+    struct list_head p_link;  /* Link in pipeline */
-+
-+    struct adcpudata {
-+      unsigned long status;
-+      unsigned long irq_pending_hi;
-+      unsigned long irq_pending_lo[IPIPE_IRQ_IWORDS];
-+      unsigned irq_hits[IPIPE_NR_IRQS];
-+#ifdef CONFIG_ADEOS_THREADS
-+      adevinfo_t event_info;
-+#endif /* CONFIG_ADEOS_THREADS */
-+    } cpudata[ADEOS_NR_CPUS];
-+
-+    struct {
-+      int (*acknowledge)(unsigned irq);
-+      void (*handler)(unsigned irq);
-+      unsigned long control;
-+    } irqs[IPIPE_NR_IRQS];
-+
-+    struct {
-+      void (*handler)(adevinfo_t *evinfo);
-+    } events[ADEOS_NR_EVENTS];
-+
-+    struct adomain *m_link;   /* Link in mutex sleep queue */
-+
-+    unsigned long flags;
-+
-+    unsigned domid;
-+
-+    const char *name;
-+
-+    int priority;
-+
-+    int ptd_keymax;
-+    int ptd_keycount;
-+    unsigned long ptd_keymap;
-+    void (*ptd_setfun)(int, void *);
-+    void *(*ptd_getfun)(int);
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+    unsigned long estackbase[ADEOS_NR_CPUS];
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+} adomain_t;
-+
-+/* The following macros must be used hw interrupts off. */
-+
-+#define __adeos_set_irq_bit(adp,cpuid,irq) \
-+do { \
-+    if (!test_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control)) { \
-+        __set_bit(irq & 
IPIPE_IRQ_IMASK,&(adp)->cpudata[cpuid].irq_pending_lo[irq >> 
IPIPE_IRQ_ISHIFT]); \
-+        __set_bit(irq >> 
IPIPE_IRQ_ISHIFT,&(adp)->cpudata[cpuid].irq_pending_hi); \
-+       } \
-+} while(0)
-+
-+#define __adeos_clear_pend(adp,cpuid,irq) \
-+do { \
-+    __clear_bit(irq & 
IPIPE_IRQ_IMASK,&(adp)->cpudata[cpuid].irq_pending_lo[irq >> 
IPIPE_IRQ_ISHIFT]); \
-+    if ((adp)->cpudata[cpuid].irq_pending_lo[irq >> IPIPE_IRQ_ISHIFT] == 0) \
-+        __clear_bit(irq >> 
IPIPE_IRQ_ISHIFT,&(adp)->cpudata[cpuid].irq_pending_hi); \
-+} while(0)
-+
-+#define __adeos_lock_irq(adp,cpuid,irq) \
-+do { \
-+    if (!test_and_set_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control)) \
-+      __adeos_clear_pend(adp,cpuid,irq); \
-+} while(0)
-+
-+#define __adeos_unlock_irq(adp,irq) \
-+do { \
-+    if (test_and_clear_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control)) { \
-+        int __cpuid, __nr_cpus = num_online_cpus();         \
-+      for (__cpuid = 0; __cpuid < __nr_cpus; __cpuid++)      \
-+         if ((adp)->cpudata[__cpuid].irq_hits[irq] > 0) { /* We need atomic 
ops next. */ \
-+           set_bit(irq & 
IPIPE_IRQ_IMASK,&(adp)->cpudata[__cpuid].irq_pending_lo[irq >> 
IPIPE_IRQ_ISHIFT]); \
-+           set_bit(irq >> 
IPIPE_IRQ_ISHIFT,&(adp)->cpudata[__cpuid].irq_pending_hi); \
-+         } \
-+    } \
-+} while(0)
-+
-+#define __adeos_clear_irq(adp,irq) \
-+do { \
-+    int __cpuid, __nr_cpus = num_online_cpus(); \
-+    clear_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control); \
-+    for (__cpuid = 0; __cpuid < __nr_cpus; __cpuid++) {       \
-+       (adp)->cpudata[__cpuid].irq_hits[irq] = 0; \
-+       __adeos_clear_pend(adp,__cpuid,irq); \
-+    } \
-+} while(0)
-+
-+#define adeos_virtual_irq_p(irq) ((irq) >= IPIPE_VIRQ_BASE && \
-+                                (irq) < IPIPE_NR_IRQS)
-+
-+static inline void adeos_hw_local_irq_save_ptr(unsigned long *flags)
-+{
-+    unsigned long msr;
-+    msr = mfmsr();
-+    *flags = msr;
-+    mtmsr(msr & ~MSR_EE);
-+    __asm__ __volatile__("": : :"memory");
-+}
-+
-+#define adeos_hw_local_irq_save_flags(flags) 
adeos_hw_local_irq_save_ptr(&(flags))
-+#define adeos_hw_local_irq_restore(flags)    mtmsr(flags)
-+
-+static inline void adeos_hw_local_irq_disable(void)
-+{
-+    unsigned long msr;
-+    msr = mfmsr();
-+    mtmsr(msr & ~MSR_EE);
-+    __asm__ __volatile__("": : :"memory");
-+}
-+
-+static inline void adeos_hw_local_irq_enable(void)
-+{
-+    unsigned long msr;
-+    __asm__ __volatile__("": : :"memory");
-+    msr = mfmsr();
-+    mtmsr(msr | MSR_EE);
-+}
-+
-+#define adeos_hw_local_irq_save(flags) 
({adeos_hw_local_irq_save_flags(flags);adeos_hw_local_irq_disable();})
-+#define adeos_hw_save_flags_and_sti(flags) 
({adeos_hw_local_irq_save_flags(flags);adeos_hw_local_irq_enable();})
-+
-+#define adeos_hw_cli() adeos_hw_local_irq_disable()
-+#define adeos_hw_sti() adeos_hw_local_irq_enable()
-+
-+#define adeos_hw_local_irq_flags(flags)       asm volatile("mfmsr %0" : "=r" 
(flags))
-+#define adeos_hw_test_iflag(x)                ((x) & MSR_EE)
-+#define adeos_hw_irqs_disabled()      \
-+({                                    \
-+      unsigned long flags;            \
-+      adeos_hw_local_irq_flags(flags);\
-+      !adeos_hw_test_iflag(flags);    \
-+})
-+
-+/* Get the machine TSC by copying the time base registers contents to
-+   the destination variable. Assuming big-endianess here. */ 
-+
-+#define adeos_hw_tsc(t) \
-+({ unsigned long __tbu; \
-+   __asm__ __volatile__ ("1: mftbu %0\n" \
-+                       "mftb %1\n" \
-+                       "mftbu %2\n" \
-+                       "cmpw %2,%0\n" \
-+                       "bne- 1b\n" \
-+                       :"=r" (((unsigned long *)&t)[0]), \
-+                       "=r" (((unsigned long *)&t)[1]), \
-+                       "=r" (__tbu)); \
-+   t; })
-+
-+extern unsigned tb_ticks_per_jiffy;
-+
-+#define adeos_cpu_freq() (HZ * tb_ticks_per_jiffy)
-+
-+#define adeos_spin_lock(x)     _spin_lock(x)
-+#define adeos_spin_unlock(x)   _spin_unlock(x)
-+#define adeos_spin_trylock(x)  _spin_trylock(x)
-+#define adeos_write_lock(x)    _write_lock(x)
-+#define adeos_write_unlock(x)  _write_unlock(x)
-+#define adeos_write_trylock(x) _write_trylock(x)
-+#define adeos_read_lock(x)     _read_lock(x)
-+#define adeos_read_unlock(x)   _read_unlock(x)
-+#define raw_spinlock_t         spinlock_t
-+#define RAW_SPIN_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
-+#define raw_rwlock_t           rwlock_t
-+#define RAW_RW_LOCK_UNLOCKED   RW_LOCK_UNLOCKED
-+
-+#define spin_lock_irqsave_hw(lock,flags)      adeos_spin_lock_irqsave(lock, 
flags)
-+#define spin_unlock_irqrestore_hw(lock,flags) 
adeos_spin_unlock_irqrestore(lock, flags)
-+
-+#define adeos_spin_lock_irqsave(x,flags)  \
-+do { \
-+   adeos_hw_local_irq_save(flags); \
-+   adeos_spin_lock(x); \
-+} while (0)
-+
-+#define adeos_spin_unlock_irqrestore(x,flags)  \
-+do { \
-+   adeos_spin_unlock(x); \
-+   adeos_hw_local_irq_restore(flags); \
-+} while (0)
-+
-+#define adeos_spin_lock_disable(x)  \
-+do { \
-+   adeos_hw_cli(); \
-+   adeos_spin_lock(x); \
-+} while (0)
-+
-+#define adeos_spin_unlock_enable(x)  \
-+do { \
-+   adeos_spin_unlock(x); \
-+   adeos_hw_sti(); \
-+} while (0)
-+
-+#define adeos_read_lock_irqsave(lock, flags) \
-+do { \
-+   adeos_hw_local_irq_save(flags); \
-+   adeos_read_lock(lock); \
-+} while (0)
-+
-+#define adeos_read_unlock_irqrestore(lock, flags) \
-+do { \
-+   adeos_read_unlock(lock); \
-+   adeos_hw_local_irq_restore(flags); \
-+} while (0)
-+
-+#define adeos_write_lock_irqsave(lock, flags) \
-+do { \
-+   adeos_hw_local_irq_save(flags); \
-+   adeos_write_lock(lock); \
-+} while (0)
-+
-+#define adeos_write_unlock_irqrestore(lock, flags) \
-+do { \
-+   adeos_write_unlock(lock); \
-+   adeos_hw_local_irq_restore(flags); \
-+} while (0)
-+
-+/* Private interface -- Internal use only */
-+
-+struct adattr;
-+
-+void __adeos_init(void);
-+
-+void __adeos_init_domain(adomain_t *adp,
-+                       struct adattr *attr);
-+
-+void __adeos_cleanup_domain(adomain_t *adp);
-+
-+#define __adeos_check_platform() do { } while(0)
-+
-+#define __adeos_read_timebase() ({ unsigned long long t; adeos_hw_tsc(t); t; 
})
-+
-+void __adeos_init_platform(void);
-+
-+void __adeos_enable_pipeline(void);
-+
-+void __adeos_disable_pipeline(void);
-+
-+void __adeos_init_stage(adomain_t *adp);
-+
-+void __adeos_sync_stage(unsigned long syncmask);
-+
-+int __adeos_ack_irq(unsigned irq);
-+
-+void __adeos_do_IRQ(int irq,
-+                  struct pt_regs *regs);
-+
-+void __adeos_do_timer(int irq,
-+                    struct pt_regs *regs);
-+
-+struct thread_info *__adeos_current_threadinfo(void);
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+
-+int __adeos_switch_domain(adomain_t *adp,
-+                        adomain_t **currentp);
-+
-+/* Called with hw interrupts off. */
-+static inline void __adeos_switch_to (adomain_t *out,
-+                                    adomain_t *in,
-+                                    int cpuid)
-+{
-+    extern adomain_t *adp_cpu_current[];
-+
-+    __adeos_switch_domain(in,&adp_cpu_current[cpuid]);
-+
-+    if (out->dswitch != NULL)
-+      out->dswitch();
-+}
-+
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+extern struct pt_regs __adeos_irq_regs;
-+
-+extern unsigned long __adeos_virtual_irq_map;
-+
-+extern unsigned long __adeos_decr_ticks;
-+
-+extern unsigned long long __adeos_decr_next[];
-+
-+#endif /* !__PPC_ADEOS_H */
-diff -uNrp linux-2.6.10/include/asm-ppc/hw_irq.h 
linux-2.6.10-ppc-adeos/include/asm-ppc/hw_irq.h
---- linux-2.6.10/include/asm-ppc/hw_irq.h      2004-12-24 22:35:15.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/include/asm-ppc/hw_irq.h    2005-03-27 
16:35:19.000000000 +0200
-@@ -12,6 +12,37 @@ extern void timer_interrupt(struct pt_re
- 
- #define INLINE_IRQS
- 
-+#ifdef CONFIG_ADEOS_CORE
-+
-+void __adeos_stall_root(void);
-+void __adeos_unstall_root(void);
-+unsigned long __adeos_test_root(void);
-+unsigned long __adeos_test_and_stall_root(void);
-+void __adeos_restore_root(unsigned long flags);
-+
-+#define irqs_disabled()  __adeos_test_root()
-+
-+static inline void local_irq_disable(void) {
-+    __adeos_stall_root();
-+}
-+
-+static inline void local_irq_enable(void) {
-+    __adeos_unstall_root();
-+}
-+
-+static inline void local_irq_save_ptr(unsigned long *flags) {
-+    *flags = __adeos_test_and_stall_root();
-+}
-+
-+static inline void local_irq_restore(unsigned long flags) {
-+    __adeos_restore_root(flags);
-+}
-+
-+#define local_save_flags(flags)               ((flags) = __adeos_test_root())
-+#define local_irq_save(flags)         local_irq_save_ptr(&flags)
-+
-+#else /* !CONFIG_ADEOS_CORE */
-+
- #define irqs_disabled()       ((mfmsr() & MSR_EE) == 0)
- 
- #ifdef INLINE_IRQS
-@@ -57,6 +88,8 @@ extern void local_save_flags_ptr(unsigne
- 
- #endif
- 
-+#endif /* CONFIG_ADEOS_CORE */
-+
- extern void do_lost_interrupts(unsigned long);
- 
- #define mask_irq(irq) ({if (irq_desc[irq].handler && 
irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);})
-diff -uNrp linux-2.6.10/include/asm-ppc/mmu_context.h 
linux-2.6.10-ppc-adeos/include/asm-ppc/mmu_context.h
---- linux-2.6.10/include/asm-ppc/mmu_context.h 2004-12-24 22:34:58.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/include/asm-ppc/mmu_context.h       2005-09-01 
10:10:08.000000000 +0200
-@@ -150,11 +150,18 @@ static inline void get_mmu_context(struc
- static inline void destroy_context(struct mm_struct *mm)
- {
-       if (mm->context != NO_CONTEXT) {
-+#ifdef CONFIG_ADEOS_CORE
-+              unsigned long flags;
-+              adeos_hw_local_irq_save(flags);
-+#endif /* CONFIG_ADEOS_CORE */
-               clear_bit(mm->context, context_map);
-               mm->context = NO_CONTEXT;
- #ifdef FEW_CONTEXTS
-               atomic_inc(&nr_free_contexts);
- #endif
-+#ifdef CONFIG_ADEOS_CORE
-+              adeos_hw_local_irq_restore(flags);
-+#endif /* CONFIG_ADEOS_CORE */
-       }
- }
- 
-@@ -189,7 +196,17 @@ static inline void switch_mm(struct mm_s
-  * After we have set current->mm to a new value, this activates
-  * the context for the new mm so we see the new mappings.
-  */
-+#ifdef CONFIG_ADEOS_CORE
-+#define activate_mm(active_mm, mm)   \
-+do { \
-+    unsigned long flags; \
-+    adeos_hw_local_irq_save(flags); \
-+    switch_mm(active_mm, mm, current); \
-+    adeos_hw_local_irq_restore(flags); \
-+} while(0)
-+#else /* !CONFIG_ADEOS_CORE */
- #define activate_mm(active_mm, mm)   switch_mm(active_mm, mm, current)
-+#endif /* CONFIG_ADEOS_CORE */
- 
- extern void mmu_context_init(void);
- 
-diff -uNrp linux-2.6.10/include/asm-ppc/smp.h 
linux-2.6.10-ppc-adeos/include/asm-ppc/smp.h
---- linux-2.6.10/include/asm-ppc/smp.h 2004-12-24 22:34:23.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/include/asm-ppc/smp.h       2005-03-27 
16:35:19.000000000 +0200
-@@ -45,7 +45,12 @@ extern void smp_local_timer_interrupt(st
- #define NO_PROC_ID            0xFF            /* No processor magic marker */
- #define PROC_CHANGE_PENALTY   20
- 
--#define smp_processor_id() (current_thread_info()->cpu)
-+#ifdef CONFIG_ADEOS_CORE
-+#include <asm/adeos.h>
-+#define smp_processor_id()      adeos_processor_id()
-+#else /* !CONFIG_ADEOS_CORE */
-+#define smp_processor_id()    (current_thread_info()->cpu)
-+#endif /* CONFIG_ADEOS_CORE */
- 
- extern int __cpu_up(unsigned int cpu);
- 
-diff -uNrp linux-2.6.10/include/linux/adeos.h 
linux-2.6.10-ppc-adeos/include/linux/adeos.h
---- linux-2.6.10/include/linux/adeos.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/include/linux/adeos.h       2005-07-24 
19:33:25.000000000 +0200
-@@ -0,0 +1,553 @@
-+/*
-+ *   include/linux/adeos.h
-+ *
-+ *   Copyright (C) 2002,2003,2004 Philippe Gerum.
-+ *
-+ *   This program is free software; you can redistribute it and/or modify
-+ *   it under the terms of the GNU General Public License as published by
-+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ *   USA; either version 2 of the License, or (at your option) any later
-+ *   version.
-+ *
-+ *   This program is distributed in the hope that it will be useful,
-+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *   GNU General Public License for more details.
-+ *
-+ *   You should have received a copy of the GNU General Public License
-+ *   along with this program; if not, write to the Free Software
-+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, 
USA.
-+ */
-+
-+#ifndef __LINUX_ADEOS_H
-+#define __LINUX_ADEOS_H
-+
-+#include <linux/config.h>
-+
-+#ifdef CONFIG_ADEOS_CORE
-+
-+#include <linux/spinlock.h>
-+#include <asm/adeos.h>
-+
-+#define ADEOS_VERSION_PREFIX  "2.6"
-+#define ADEOS_VERSION_STRING  (ADEOS_VERSION_PREFIX ADEOS_ARCH_STRING)
-+#define ADEOS_RELEASE_NUMBER  
(0x02060000|((ADEOS_MAJOR_NUMBER&0xff)<<8)|(ADEOS_MINOR_NUMBER&0xff))
-+
-+#define ADEOS_ROOT_PRI       100
-+#define ADEOS_ROOT_ID        0
-+#define ADEOS_ROOT_NPTDKEYS  4        /* Must be <= 32 */
-+
-+#define ADEOS_RESET_TIMER  0x1
-+#define ADEOS_SAME_HANDLER ((void (*)(unsigned))(-1))
-+
-+/* Global domain flags */
-+#define ADEOS_SPRINTK_FLAG 0  /* Synchronous printk() allowed */
-+#define ADEOS_PPRINTK_FLAG 1  /* Asynchronous printk() request pending */
-+
-+/* Per-cpu pipeline flags.
-+   WARNING: some implementation might refer to those flags
-+   non-symbolically in assembly portions (e.g. x86). */
-+#define IPIPE_STALL_FLAG   0  /* Stalls a pipeline stage */
-+#define IPIPE_XPEND_FLAG   1  /* Exception notification is pending */
-+#define IPIPE_SLEEP_FLAG   2  /* Domain has self-suspended */
-+#define IPIPE_SYNC_FLAG    3  /* The interrupt syncer is running for the 
domain */
-+
-+#define IPIPE_HANDLE_FLAG    0
-+#define IPIPE_PASS_FLAG      1
-+#define IPIPE_ENABLE_FLAG    2
-+#define IPIPE_DYNAMIC_FLAG   IPIPE_HANDLE_FLAG
-+#define IPIPE_EXCLUSIVE_FLAG 3
-+#define IPIPE_STICKY_FLAG    4
-+#define IPIPE_SYSTEM_FLAG    5
-+#define IPIPE_LOCK_FLAG      6
-+#define IPIPE_SHARED_FLAG    7
-+#define IPIPE_CALLASM_FLAG   8        /* Arch-dependent -- might be unused. */
-+
-+#define IPIPE_HANDLE_MASK    (1 << IPIPE_HANDLE_FLAG)
-+#define IPIPE_PASS_MASK      (1 << IPIPE_PASS_FLAG)
-+#define IPIPE_ENABLE_MASK    (1 << IPIPE_ENABLE_FLAG)
-+#define IPIPE_DYNAMIC_MASK   IPIPE_HANDLE_MASK
-+#define IPIPE_EXCLUSIVE_MASK (1 << IPIPE_EXCLUSIVE_FLAG)
-+#define IPIPE_STICKY_MASK    (1 << IPIPE_STICKY_FLAG)
-+#define IPIPE_SYSTEM_MASK    (1 << IPIPE_SYSTEM_FLAG)
-+#define IPIPE_LOCK_MASK      (1 << IPIPE_LOCK_FLAG)
-+#define IPIPE_SHARED_MASK    (1 << IPIPE_SHARED_FLAG)
-+#define IPIPE_SYNC_MASK      (1 << IPIPE_SYNC_FLAG)
-+#define IPIPE_CALLASM_MASK   (1 << IPIPE_CALLASM_FLAG)
-+
-+#define IPIPE_DEFAULT_MASK  (IPIPE_HANDLE_MASK|IPIPE_PASS_MASK)
-+
-+typedef struct adattr {
-+
-+    unsigned domid;           /* Domain identifier -- Magic value set by 
caller */
-+    const char *name;         /* Domain name -- Warning: won't be dup'ed! */
-+    int priority;             /* Priority in interrupt pipeline */
-+    void (*entry)(int);               /* Domain entry point */
-+    int estacksz;             /* Stack size for entry context -- 0 means 
unspec */
-+    void (*dswitch)(void);    /* Handler called each time the domain is 
switched in */
-+    int nptdkeys;             /* Max. number of per-thread data keys */
-+    void (*ptdset)(int,void *);       /* Routine to set pt values */
-+    void *(*ptdget)(int);     /* Routine to get pt values */
-+
-+} adattr_t;
-+
-+typedef struct admutex {
-+
-+    raw_spinlock_t lock;
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+    adomain_t *sleepq, /* Pending domain queue */
-+            *owner;   /* Domain owning the mutex */
-+#ifdef CONFIG_SMP
-+    volatile int owncpu;
-+#define ADEOS_MUTEX_UNLOCKED { RAW_SPIN_LOCK_UNLOCKED, NULL, NULL, -1 }
-+#else  /* !CONFIG_SMP */
-+#define ADEOS_MUTEX_UNLOCKED { RAW_SPIN_LOCK_UNLOCKED, NULL, NULL }
-+#endif /* CONFIG_SMP */
-+#else /* !CONFIG_ADEOS_THREADS */
-+#define ADEOS_MUTEX_UNLOCKED { RAW_SPIN_LOCK_UNLOCKED }
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+} admutex_t;
-+
-+typedef void (*adevhand_t)(adevinfo_t *);
-+
-+extern int adp_pipelined;
-+
-+extern adomain_t *adp_cpu_current[],
-+                 *adp_root;
-+
-+extern int __adeos_event_monitors[];
-+
-+extern unsigned __adeos_printk_virq;
-+
-+extern unsigned long __adeos_virtual_irq_map;
-+
-+extern struct list_head __adeos_pipeline;
-+
-+extern raw_spinlock_t __adeos_pipelock;
-+
-+#ifdef CONFIG_ADEOS_PROFILING
-+
-+typedef struct adprofdata {
-+
-+    struct {
-+      unsigned long long t_handled;
-+      unsigned long long t_synced;
-+      unsigned long n_handled;
-+      unsigned long n_synced;
-+    } irqs[IPIPE_NR_IRQS];
-+
-+} adprofdata_t;
-+
-+extern adprofdata_t __adeos_profile_data[ADEOS_NR_CPUS];
-+
-+#endif /* CONFIG_ADEOS_PROFILING */
-+
-+/* Private interface */
-+
-+#ifdef CONFIG_PROC_FS
-+void __adeos_init_proc(void);
-+#endif /* CONFIG_PROC_FS */
-+
-+void __adeos_takeover(void);
-+
-+asmlinkage int __adeos_handle_event(unsigned event,
-+                                  void *evdata);
-+
-+void __adeos_flush_printk(unsigned irq);
-+
-+void __adeos_dump_state(void);
-+
-+static inline void __adeos_schedule_head(void *evdata) {
-+
-+    if (__adeos_event_monitors[ADEOS_SCHEDULE_HEAD] > 0)
-+      __adeos_handle_event(ADEOS_SCHEDULE_HEAD,evdata);
-+}
-+
-+static inline int __adeos_schedule_tail(void *evdata) {
-+
-+    if (__adeos_event_monitors[ADEOS_SCHEDULE_TAIL] > 0)
-+      return __adeos_handle_event(ADEOS_SCHEDULE_TAIL,evdata);
-+
-+    return 0;
-+}
-+
-+static inline void __adeos_enter_process(void) {
-+
-+    if (__adeos_event_monitors[ADEOS_ENTER_PROCESS] > 0)
-+      __adeos_handle_event(ADEOS_ENTER_PROCESS,NULL);
-+}
-+
-+static inline void __adeos_exit_process(void *evdata) {
-+
-+    if (__adeos_event_monitors[ADEOS_EXIT_PROCESS] > 0)
-+      __adeos_handle_event(ADEOS_EXIT_PROCESS,evdata);
-+}
-+
-+static inline int __adeos_signal_process(void *evdata) {
-+
-+    if (__adeos_event_monitors[ADEOS_SIGNAL_PROCESS] > 0)
-+      return __adeos_handle_event(ADEOS_SIGNAL_PROCESS,evdata);
-+
-+    return 0;
-+}
-+
-+static inline void __adeos_kick_process(void *evdata) {
-+
-+    if (__adeos_event_monitors[ADEOS_KICK_PROCESS] > 0)
-+      __adeos_handle_event(ADEOS_KICK_PROCESS,evdata);
-+}
-+
-+static inline int __adeos_renice_process(void *evdata) {
-+
-+    if (__adeos_event_monitors[ADEOS_RENICE_PROCESS] > 0)
-+      return __adeos_handle_event(ADEOS_RENICE_PROCESS,evdata);
-+
-+    return 0;
-+}
-+
-+void __adeos_stall_root(void);
-+
-+void __adeos_unstall_root(void);
-+
-+unsigned long __adeos_test_root(void);
-+
-+unsigned long __adeos_test_and_stall_root(void);
-+
-+void fastcall __adeos_restore_root(unsigned long flags);
-+
-+void __adeos_schedule_back_root(struct task_struct *prev);
-+
-+int __adeos_setscheduler_root(struct task_struct *p,
-+                            int policy,
-+                            int prio);
-+
-+void __adeos_reenter_root(struct task_struct *prev,
-+                        int policy,
-+                        int prio);
-+
-+int fastcall __adeos_schedule_irq(unsigned irq,
-+                                struct list_head *head);
-+
-+#define __adeos_pipeline_head_p(adp) (&(adp)->p_link == __adeos_pipeline.next)
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+
-+static inline int __adeos_domain_work_p (adomain_t *adp, int cpuid)
-+
-+{
-+    return (!test_bit(IPIPE_SLEEP_FLAG,&adp->cpudata[cpuid].status) ||
-+          (!test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status) &&
-+           adp->cpudata[cpuid].irq_pending_hi != 0) ||
-+          test_bit(IPIPE_XPEND_FLAG,&adp->cpudata[cpuid].status));
-+}
-+
-+#else /* !CONFIG_ADEOS_THREADS */
-+
-+static inline int __adeos_domain_work_p (adomain_t *adp, int cpuid)
-+
-+{
-+    return (!test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status) &&
-+          adp->cpudata[cpuid].irq_pending_hi != 0);
-+}
-+
-+static inline void __adeos_switch_to (adomain_t *out, adomain_t *in, int 
cpuid)
-+
-+{
-+    void adeos_suspend_domain(void);
-+
-+    /* "in" is guaranteed to be closer than "out" from the head of the
-+       pipeline (and obviously different). */
-+
-+    adp_cpu_current[cpuid] = in;
-+
-+    if (in->dswitch)
-+      in->dswitch();
-+
-+    adeos_suspend_domain(); /* Sync stage and propagate interrupts. */
-+    adeos_load_cpuid(); /* Processor might have changed. */
-+
-+    if (adp_cpu_current[cpuid] == in)
-+      /* Otherwise, something has changed the current domain under
-+         our feet recycling the register set; do not override. */
-+      adp_cpu_current[cpuid] = out;
-+}
-+
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+/* Public interface */
-+
-+int adeos_register_domain(adomain_t *adp,
-+                        adattr_t *attr);
-+
-+int adeos_unregister_domain(adomain_t *adp);
-+
-+void adeos_suspend_domain(void);
-+
-+int adeos_virtualize_irq_from(adomain_t *adp,
-+                            unsigned irq,
-+                            void (*handler)(unsigned irq),
-+                            int (*acknowledge)(unsigned irq),
-+                            unsigned modemask);
-+
-+static inline int adeos_virtualize_irq(unsigned irq,
-+                                     void (*handler)(unsigned irq),
-+                                     int (*acknowledge)(unsigned irq),
-+                                     unsigned modemask) {
-+
-+    return adeos_virtualize_irq_from(adp_current,
-+                                   irq,
-+                                   handler,
-+                                   acknowledge,
-+                                   modemask);
-+}
-+
-+int adeos_control_irq(unsigned irq,
-+                    unsigned clrmask,
-+                    unsigned setmask);
-+
-+cpumask_t adeos_set_irq_affinity(unsigned irq,
-+                               cpumask_t cpumask);
-+
-+static inline int adeos_share_irq (unsigned irq, int (*acknowledge)(unsigned 
irq)) {
-+
-+    return adeos_virtualize_irq(irq,
-+                              ADEOS_SAME_HANDLER,
-+                              acknowledge,
-+                              
IPIPE_SHARED_MASK|IPIPE_HANDLE_MASK|IPIPE_PASS_MASK);
-+}
-+
-+unsigned adeos_alloc_irq(void);
-+
-+int adeos_free_irq(unsigned irq);
-+
-+int fastcall adeos_trigger_irq(unsigned irq);
-+
-+static inline int adeos_propagate_irq(unsigned irq) {
-+
-+    return __adeos_schedule_irq(irq,adp_current->p_link.next);
-+}
-+
-+static inline int adeos_schedule_irq(unsigned irq) {
-+
-+    return __adeos_schedule_irq(irq,&adp_current->p_link);
-+}
-+
-+int fastcall adeos_send_ipi(unsigned ipi,
-+                          cpumask_t cpumask);
-+
-+static inline void adeos_stall_pipeline_from (adomain_t *adp)
-+
-+{
-+    adeos_declare_cpuid;
-+#ifdef CONFIG_SMP
-+    unsigned long flags;
-+
-+    adeos_lock_cpu(flags);
-+
-+    __set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+
-+    if (!__adeos_pipeline_head_p(adp))
-+      adeos_unlock_cpu(flags);
-+#else /* CONFIG_SMP */
-+    set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+
-+    if (__adeos_pipeline_head_p(adp))
-+      adeos_hw_cli();
-+#endif /* CONFIG_SMP */
-+}
-+
-+static inline unsigned long adeos_test_pipeline_from (adomain_t *adp)
-+
-+{
-+    unsigned long flags, s;
-+    adeos_declare_cpuid;
-+    
-+    adeos_get_cpu(flags);
-+    s = test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+    adeos_put_cpu(flags);
-+
-+    return s;
-+}
-+
-+static inline unsigned long adeos_test_and_stall_pipeline_from (adomain_t 
*adp)
-+
-+{
-+    adeos_declare_cpuid;
-+    unsigned long s;
-+#ifdef CONFIG_SMP
-+    unsigned long flags;
-+
-+    adeos_lock_cpu(flags);
-+
-+    s = __test_and_set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+
-+    if (!__adeos_pipeline_head_p(adp))
-+      adeos_unlock_cpu(flags);
-+#else /* CONFIG_SMP */
-+    s = test_and_set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+
-+    if (__adeos_pipeline_head_p(adp))
-+      adeos_hw_cli();
-+#endif /* CONFIG_SMP */
-+    
-+    return s;
-+}
-+
-+void fastcall adeos_unstall_pipeline_from(adomain_t *adp);
-+
-+static inline unsigned long adeos_test_and_unstall_pipeline_from(adomain_t 
*adp)
-+
-+{
-+    unsigned long flags, s;
-+    adeos_declare_cpuid;
-+    
-+    adeos_get_cpu(flags);
-+    s = test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+    adeos_unstall_pipeline_from(adp);
-+    adeos_put_cpu(flags);
-+
-+    return s;
-+}
-+
-+static inline void adeos_unstall_pipeline(void)
-+
-+{
-+    adeos_unstall_pipeline_from(adp_current);
-+}
-+
-+static inline unsigned long adeos_test_and_unstall_pipeline(void)
-+
-+{
-+    return adeos_test_and_unstall_pipeline_from(adp_current);
-+}
-+
-+static inline unsigned long adeos_test_pipeline (void)
-+
-+{
-+    return adeos_test_pipeline_from(adp_current);
-+}
-+
-+static inline unsigned long adeos_test_and_stall_pipeline (void)
-+
-+{
-+    return adeos_test_and_stall_pipeline_from(adp_current);
-+}
-+
-+static inline void adeos_restore_pipeline_from (adomain_t *adp, unsigned long 
flags)
-+
-+{
-+    if (flags)
-+      adeos_stall_pipeline_from(adp);
-+    else
-+      adeos_unstall_pipeline_from(adp);
-+}
-+
-+static inline void adeos_stall_pipeline (void)
-+
-+{
-+    adeos_stall_pipeline_from(adp_current);
-+}
-+
-+static inline void adeos_restore_pipeline (unsigned long flags)
-+
-+{
-+    adeos_restore_pipeline_from(adp_current,flags);
-+}
-+
-+static inline void adeos_restore_pipeline_nosync (adomain_t *adp, unsigned 
long flags, int cpuid)
-+
-+{
-+    /* If cpuid is current, then it must be held on entry
-+       (adeos_get_cpu/adeos_hw_local_irq_save/adeos_hw_cli). */
-+
-+    if (flags)
-+      __set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+    else
-+      __clear_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+}
-+
-+adevhand_t adeos_catch_event_from(adomain_t *adp,
-+                                unsigned event,
-+                                adevhand_t handler);
-+
-+static inline adevhand_t adeos_catch_event (unsigned event, adevhand_t 
handler)
-+
-+{
-+    return adeos_catch_event_from(adp_current,event,handler);
-+}
-+
-+static inline void adeos_propagate_event(adevinfo_t *evinfo)
-+
-+{
-+    evinfo->propagate = 1;
-+}
-+
-+void adeos_init_attr(adattr_t *attr);
-+
-+int adeos_get_sysinfo(adsysinfo_t *sysinfo);
-+
-+int adeos_tune_timer(unsigned long ns,
-+                   int flags);
-+
-+int adeos_alloc_ptdkey(void);
-+
-+int adeos_free_ptdkey(int key);
-+
-+int adeos_set_ptd(int key,
-+                void *value);
-+
-+void *adeos_get_ptd(int key);
-+
-+unsigned long adeos_critical_enter(void (*syncfn)(void));
-+
-+void adeos_critical_exit(unsigned long flags);
-+
-+int adeos_init_mutex(admutex_t *mutex);
-+
-+int adeos_destroy_mutex(admutex_t *mutex);
-+
-+unsigned long fastcall adeos_lock_mutex(admutex_t *mutex);
-+
-+void fastcall adeos_unlock_mutex(admutex_t *mutex,
-+                               unsigned long flags);
-+
-+static inline void adeos_set_printk_sync (adomain_t *adp) {
-+    set_bit(ADEOS_SPRINTK_FLAG,&adp->flags);
-+}
-+
-+static inline void adeos_set_printk_async (adomain_t *adp) {
-+    clear_bit(ADEOS_SPRINTK_FLAG,&adp->flags);
-+}
-+
-+#define spin_lock_irqsave_hw_cond(lock,flags)      
spin_lock_irqsave_hw(lock,flags)
-+#define spin_unlock_irqrestore_hw_cond(lock,flags) 
spin_unlock_irqrestore_hw(lock,flags)
-+
-+#define pic_irq_lock(irq)     \
-+      do {            \
-+              adeos_declare_cpuid; \
-+              adeos_load_cpuid();             \
-+              __adeos_lock_irq(adp_cpu_current[cpuid], cpuid, irq); \
-+      } while(0)
-+
-+#define pic_irq_unlock(irq)   \
-+      do {            \
-+              adeos_declare_cpuid; \
-+              adeos_load_cpuid();          \
-+              __adeos_unlock_irq(adp_cpu_current[cpuid], irq); \
-+      } while(0)
-+
-+#else /* !CONFIG_ADEOS_CORE */
-+
-+#define spin_lock_irqsave_hw(lock,flags)      spin_lock_irqsave(lock, flags)
-+#define spin_unlock_irqrestore_hw(lock,flags) spin_unlock_irqrestore(lock, 
flags)
-+#define spin_lock_irqsave_hw_cond(lock,flags)      do { flags = 0; 
spin_lock(lock); } while(0)
-+#define spin_unlock_irqrestore_hw_cond(lock,flags) spin_unlock(lock)
-+
-+#define pic_irq_lock(irq)     do { } while(0)
-+#define pic_irq_unlock(irq)   do { } while(0)
-+
-+#endif        /* CONFIG_ADEOS_CORE */
-+
-+#endif /* !__LINUX_ADEOS_H */
-diff -uNrp linux-2.6.10/include/linux/preempt.h 
linux-2.6.10-ppc-adeos/include/linux/preempt.h
---- linux-2.6.10/include/linux/preempt.h       2004-12-24 22:34:26.000000000 
+0100
-+++ linux-2.6.10-ppc-adeos/include/linux/preempt.h     2005-03-27 
16:35:19.000000000 +0200
-@@ -25,6 +25,47 @@ do { \
- 
- asmlinkage void preempt_schedule(void);
- 
-+#ifdef CONFIG_ADEOS_CORE
-+
-+#include <asm/adeos.h>
-+
-+extern adomain_t *adp_cpu_current[],
-+                 *adp_root;
-+
-+#define preempt_disable() \
-+do { \
-+      if (adp_current == adp_root) { \
-+          inc_preempt_count();       \
-+          barrier(); \
-+        } \
-+} while (0)
-+
-+#define preempt_enable_no_resched() \
-+do { \
-+        if (adp_current == adp_root) { \
-+          barrier(); \
-+          dec_preempt_count(); \
-+        } \
-+} while (0)
-+
-+#define preempt_check_resched() \
-+do { \
-+        if (adp_current == adp_root) { \
-+          if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
-+              preempt_schedule(); \
-+        } \
-+} while (0)
-+
-+#define preempt_enable() \
-+do { \
-+      if (adp_current == adp_root) { \
-+          preempt_enable_no_resched(); \
-+          preempt_check_resched(); \
-+        } \
-+} while (0)
-+
-+#else /* !CONFIG_ADEOS_CORE */
-+
- #define preempt_disable() \
- do { \
-       inc_preempt_count(); \
-@@ -49,6 +90,8 @@ do { \
-       preempt_check_resched(); \
- } while (0)
- 
-+#endif /* CONFIG_ADEOS_CORE */
-+
- #else
- 
- #define preempt_disable()             do { } while (0)
-diff -uNrp linux-2.6.10/include/linux/sched.h 
linux-2.6.10-ppc-adeos/include/linux/sched.h
---- linux-2.6.10/include/linux/sched.h 2004-12-24 22:33:59.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/include/linux/sched.h       2005-03-27 
16:35:19.000000000 +0200
-@@ -4,6 +4,9 @@
- #include <asm/param.h>        /* for HZ */
- 
- #include <linux/config.h>
-+#ifdef CONFIG_ADEOS_CORE
-+#include <linux/adeos.h>
-+#endif /* CONFIG_ADEOS_CORE */
- #include <linux/capability.h>
- #include <linux/threads.h>
- #include <linux/kernel.h>
-@@ -664,6 +667,10 @@ struct task_struct {
-       struct mempolicy *mempolicy;
-       short il_next;          /* could be shared with used_math */
- #endif
-+
-+#ifdef CONFIG_ADEOS_CORE
-+        void *ptd[ADEOS_ROOT_NPTDKEYS];
-+#endif /* CONFIG_ADEOS_CORE */
- };
- 
- static inline pid_t process_group(struct task_struct *tsk)
-diff -uNrp linux-2.6.10/init/main.c linux-2.6.10-ppc-adeos/init/main.c
---- linux-2.6.10/init/main.c   2004-12-24 22:34:01.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/init/main.c 2005-03-27 16:35:19.000000000 +0200
-@@ -526,6 +526,11 @@ asmlinkage void __init start_kernel(void
-       init_timers();
-       softirq_init();
-       time_init();
-+#ifdef CONFIG_ADEOS_CORE
-+      /* On PPC, we need calibrated values for the decrementer to
-+         initialize, so run time_init() first. */
-+      __adeos_init();
-+#endif /* CONFIG_ADEOS_CORE */
- 
-       /*
-        * HACK ALERT! This is early. We're enabling the console before
-@@ -652,6 +657,11 @@ static void __init do_basic_setup(void)
-       sock_init();
- 
-       do_initcalls();
-+
-+#ifdef CONFIG_ADEOS
-+      /* i.e. Permanent pipelining from boot onwards. */
-+      __adeos_takeover();
-+#endif /* CONFIG_ADEOS */
- }
- 
- static void do_pre_smp_initcalls(void)
-diff -uNrp linux-2.6.10/kernel/Makefile linux-2.6.10-ppc-adeos/kernel/Makefile
---- linux-2.6.10/kernel/Makefile       2004-12-24 22:34:26.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/kernel/Makefile     2005-03-27 16:35:19.000000000 
+0200
-@@ -9,6 +9,7 @@ obj-y     = sched.o fork.o exec_domain.o
-           rcupdate.o intermodule.o extable.o params.o posix-timers.o \
-           kthread.o wait.o kfifo.o sys_ni.o
- 
-+obj-$(CONFIG_ADEOS_CORE) += adeos.o
- obj-$(CONFIG_FUTEX) += futex.o
- obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
- obj-$(CONFIG_SMP) += cpu.o spinlock.o
-diff -uNrp linux-2.6.10/kernel/adeos.c linux-2.6.10-ppc-adeos/kernel/adeos.c
---- linux-2.6.10/kernel/adeos.c        1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/kernel/adeos.c      2005-09-10 19:20:07.000000000 
+0200
-@@ -0,0 +1,828 @@
-+/*
-+ *   linux/kernel/adeos.c
-+ *
-+ *   Copyright (C) 2002,2003,2004 Philippe Gerum.
-+ *
-+ *   This program is free software; you can redistribute it and/or modify
-+ *   it under the terms of the GNU General Public License as published by
-+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ *   USA; either version 2 of the License, or (at your option) any later
-+ *   version.
-+ *
-+ *   This program is distributed in the hope that it will be useful,
-+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *   GNU General Public License for more details.
-+ *
-+ *   You should have received a copy of the GNU General Public License
-+ *   along with this program; if not, write to the Free Software
-+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, 
USA.
-+ *
-+ *   Architecture-independent ADEOS core support.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/module.h>
-+#ifdef CONFIG_PROC_FS
-+#include <linux/proc_fs.h>
-+#endif /* CONFIG_PROC_FS */
-+
-+/* The pre-defined domain slot for the root domain. */
-+static adomain_t adeos_root_domain;
-+
-+/* A constant pointer to the root domain. */
-+adomain_t *adp_root = &adeos_root_domain;
-+
-+/* A pointer to the current domain. */
-+adomain_t *adp_cpu_current[ADEOS_NR_CPUS] = { [ 0 ... ADEOS_NR_CPUS - 1] = 
&adeos_root_domain };
-+
-+/* The spinlock protecting from races while modifying the pipeline. */
-+raw_spinlock_t __adeos_pipelock = RAW_SPIN_LOCK_UNLOCKED;
-+
-+/* The pipeline data structure. Enqueues adomain_t objects by priority. */
-+struct list_head __adeos_pipeline;
-+
-+/* A global flag telling whether Adeos pipelining is engaged. */
-+int adp_pipelined;
-+
-+/* An array of global counters tracking domains monitoring events. */
-+int __adeos_event_monitors[ADEOS_NR_EVENTS] = { [ 0 ... ADEOS_NR_EVENTS - 1] 
= 0 };
-+
-+/* The allocated VIRQ map. */
-+unsigned long __adeos_virtual_irq_map = 0;
-+
-+/* A VIRQ to kick printk() output out when the root domain is in control. */
-+unsigned __adeos_printk_virq;
-+
-+#ifdef CONFIG_ADEOS_PROFILING
-+adprofdata_t __adeos_profile_data[ADEOS_NR_CPUS];
-+#endif /* CONFIG_ADEOS_PROFILING */
-+
-+static void __adeos_set_root_ptd (int key, void *value) {
-+
-+    current->ptd[key] = value;
-+}
-+
-+static void *__adeos_get_root_ptd (int key) {
-+
-+    return current->ptd[key];
-+}
-+
-+/* adeos_init() -- Initialization routine of the ADEOS layer. Called
-+   by the host kernel early during the boot procedure. */
-+
-+void __adeos_init (void)
-+
-+{
-+    adomain_t *adp = &adeos_root_domain;
-+
-+    __adeos_check_platform(); /* Do platform dependent checks first. */
-+
-+    /*
-+      A lightweight registration code for the root domain. Current
-+      assumptions are:
-+      - We are running on the boot CPU, and secondary CPUs are still
-+      lost in space.
-+      - adeos_root_domain has been zero'ed.
-+    */
-+
-+    INIT_LIST_HEAD(&__adeos_pipeline);
-+
-+    adp->name = "Linux";
-+    adp->domid = ADEOS_ROOT_ID;
-+    adp->priority = ADEOS_ROOT_PRI;
-+    adp->ptd_setfun = &__adeos_set_root_ptd;
-+    adp->ptd_getfun = &__adeos_get_root_ptd;
-+    adp->ptd_keymax = ADEOS_ROOT_NPTDKEYS;
-+
-+    __adeos_init_stage(adp);
-+
-+    INIT_LIST_HEAD(&adp->p_link);
-+    list_add_tail(&adp->p_link,&__adeos_pipeline);
-+
-+    __adeos_init_platform();
-+
-+    __adeos_printk_virq = adeos_alloc_irq(); /* Cannot fail here. */
-+    adp->irqs[__adeos_printk_virq].handler = &__adeos_flush_printk; 
-+    adp->irqs[__adeos_printk_virq].acknowledge = NULL; 
-+    adp->irqs[__adeos_printk_virq].control = IPIPE_HANDLE_MASK; 
-+
-+    printk(KERN_INFO "Adeos %s: Root domain %s registered.\n",
-+         ADEOS_VERSION_STRING,
-+         adp->name);
-+}
-+
-+/* adeos_handle_event() -- Adeos' generic event handler. This routine
-+   calls the per-domain handlers registered for a given
-+   exception/event. Each domain before the one which raised the event
-+   in the pipeline will get a chance to process the event. The latter
-+   will eventually be allowed to process its own event too if a valid
-+   handler exists for it.  Handler executions are always scheduled by
-+   the domain which raised the event for the higher priority domains
-+   wanting to be notified of such event.  Note: evdata might be
-+   NULL. */
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+
-+asmlinkage int __adeos_handle_event (unsigned event, void *evdata)
-+/* asmlinkage is there just in case CONFIG_REGPARM is enabled... */
-+{
-+    struct list_head *pos, *npos;
-+    adomain_t *this_domain;
-+    unsigned long flags;
-+    adeos_declare_cpuid;
-+    adevinfo_t evinfo;
-+    int propagate = 1;
-+
-+    adeos_lock_cpu(flags);
-+
-+    this_domain = adp_cpu_current[cpuid];
-+
-+    list_for_each_safe(pos,npos,&__adeos_pipeline) {
-+
-+      adomain_t *next_domain = list_entry(pos,adomain_t,p_link);
-+
-+      if (next_domain->events[event].handler != NULL)
-+          {
-+          if (next_domain == this_domain)
-+              {
-+              adeos_unlock_cpu(flags);
-+              evinfo.domid = this_domain->domid;
-+              evinfo.event = event;
-+              evinfo.evdata = evdata;
-+              evinfo.propagate = 0;
-+              this_domain->events[event].handler(&evinfo);
-+              propagate = evinfo.propagate;
-+              goto done;
-+              }
-+
-+          next_domain->cpudata[cpuid].event_info.domid = this_domain->domid;
-+          next_domain->cpudata[cpuid].event_info.event = event;
-+          next_domain->cpudata[cpuid].event_info.evdata = evdata;
-+          next_domain->cpudata[cpuid].event_info.propagate = 0;
-+          __set_bit(IPIPE_XPEND_FLAG,&next_domain->cpudata[cpuid].status);
-+
-+          /* Let the higher priority domain process the event. */
-+          __adeos_switch_to(this_domain,next_domain,cpuid);
-+          
-+          adeos_load_cpuid(); /* Processor might have changed. */
-+
-+          if (!next_domain->cpudata[cpuid].event_info.propagate)
-+              {
-+              propagate = 0;
-+              break;
-+              }
-+          }
-+
-+      if (next_domain != adp_root && /* NEVER sync the root stage here. */
-+          next_domain->cpudata[cpuid].irq_pending_hi != 0 &&
-+          !test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
-+          {
-+          if (next_domain != this_domain)
-+              __adeos_switch_to(this_domain,next_domain,cpuid);
-+          else
-+              __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+
-+          adeos_load_cpuid(); /* Processor might have changed. */
-+          }
-+
-+      if (next_domain == this_domain)
-+          break;
-+    }
-+
-+    adeos_unlock_cpu(flags);
-+
-+ done:
-+
-+    return !propagate;
-+}
-+
-+#else /* !CONFIG_ADEOS_THREADS */
-+
-+asmlinkage int __adeos_handle_event (unsigned event, void *evdata)
-+/* asmlinkage is there just in case CONFIG_REGPARM is enabled... */
-+{
-+    adomain_t *start_domain, *this_domain, *next_domain;
-+    struct list_head *pos, *npos;
-+    unsigned long flags;
-+    adeos_declare_cpuid;
-+    adevinfo_t evinfo;
-+    int propagate = 1;
-+
-+    adeos_lock_cpu(flags);
-+
-+    start_domain = this_domain = adp_cpu_current[cpuid];
-+
-+    list_for_each_safe(pos,npos,&__adeos_pipeline) {
-+
-+      next_domain = list_entry(pos,adomain_t,p_link);
-+
-+      /*  Note: Domain migration may occur while running event or
-+          interrupt handlers, in which case the current register set
-+          is going to be recycled for a different domain than the
-+          initiating one. We do care for that, always tracking the
-+          current domain descriptor upon return from those
-+          handlers. */
-+
-+      if (next_domain->events[event].handler != NULL)
-+          {
-+          adp_cpu_current[cpuid] = next_domain;
-+          evinfo.domid = start_domain->domid;
-+          adeos_unlock_cpu(flags);
-+          evinfo.event = event;
-+          evinfo.evdata = evdata;
-+          evinfo.propagate = 0;
-+          next_domain->events[event].handler(&evinfo);
-+          adeos_lock_cpu(flags);
-+
-+          if (adp_cpu_current[cpuid] != next_domain)
-+              this_domain = adp_cpu_current[cpuid];
-+
-+          propagate = evinfo.propagate;
-+          }
-+
-+      if (next_domain != adp_root && /* NEVER sync the root stage here. */
-+          next_domain->cpudata[cpuid].irq_pending_hi != 0 &&
-+          !test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
-+          {
-+          adp_cpu_current[cpuid] = next_domain;
-+          __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+          adeos_load_cpuid();
-+
-+          if (adp_cpu_current[cpuid] != next_domain)
-+              this_domain = adp_cpu_current[cpuid];
-+          }
-+
-+      adp_cpu_current[cpuid] = this_domain;
-+
-+      if (next_domain == this_domain || !propagate)
-+          break;
-+    }
-+
-+    adeos_unlock_cpu(flags);
-+
-+    return !propagate;
-+}
-+
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+void __adeos_stall_root (void)
-+
-+{
-+    if (adp_pipelined)
-+      {
-+      adeos_declare_cpuid;
-+
-+#ifdef CONFIG_SMP
-+      unsigned long flags;
-+      adeos_lock_cpu(flags);
-+      __set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
-+      adeos_unlock_cpu(flags);
-+#else /* !CONFIG_SMP */
-+      set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
-+#endif /* CONFIG_SMP */
-+      }
-+    else
-+      adeos_hw_cli();
-+}
-+
-+void __adeos_unstall_root (void)
-+
-+{
-+    if (adp_pipelined)
-+      {
-+      adeos_declare_cpuid;
-+
-+      adeos_hw_cli();
-+
-+      adeos_load_cpuid();
-+
-+      __clear_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
-+
-+      if (adp_root->cpudata[cpuid].irq_pending_hi != 0)
-+          __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+      }
-+
-+    adeos_hw_sti();   /* Needed in both cases. */
-+}
-+
-+unsigned long __adeos_test_root (void)
-+
-+{
-+    if (adp_pipelined)
-+      {
-+      adeos_declare_cpuid;
-+      unsigned long s;
-+
-+#ifdef CONFIG_SMP
-+      unsigned long flags;
-+      adeos_lock_cpu(flags);
-+      s = test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
-+      adeos_unlock_cpu(flags);
-+#else /* !CONFIG_SMP */
-+      s = test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
-+#endif /* CONFIG_SMP */
-+
-+      return s;
-+      }
-+
-+    return adeos_hw_irqs_disabled();
-+}
-+
-+unsigned long __adeos_test_and_stall_root (void)
-+
-+{
-+    unsigned long flags;
-+
-+    if (adp_pipelined)
-+      {
-+      adeos_declare_cpuid;
-+      unsigned long s;
-+
-+#ifdef CONFIG_SMP
-+      adeos_lock_cpu(flags);
-+      s = 
__test_and_set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
-+      adeos_unlock_cpu(flags);
-+#else /* !CONFIG_SMP */
-+      s = test_and_set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
-+#endif /* CONFIG_SMP */
-+
-+      return s;
-+      }
-+
-+    adeos_hw_local_irq_save(flags);
-+
-+    return !adeos_hw_test_iflag(flags);
-+}
-+
-+void fastcall __adeos_restore_root (unsigned long flags)
-+
-+{
-+    if (flags)
-+      __adeos_stall_root();
-+    else
-+      __adeos_unstall_root();
-+}
-+
-+/* adeos_unstall_pipeline_from() -- Unstall the interrupt pipeline and
-+   synchronize pending events from a given domain. */
-+
-+void fastcall adeos_unstall_pipeline_from (adomain_t *adp)
-+
-+{
-+    adomain_t *this_domain;
-+    struct list_head *pos;
-+    unsigned long flags;
-+    adeos_declare_cpuid;
-+
-+    adeos_lock_cpu(flags);
-+
-+    __clear_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+
-+    this_domain = adp_cpu_current[cpuid];
-+
-+    if (adp == this_domain)
-+      {
-+      if (adp->cpudata[cpuid].irq_pending_hi != 0)
-+          __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+
-+      goto release_cpu_and_exit;
-+      }
-+
-+    /* Attempt to flush all events that might be pending at the
-+       unstalled domain level. This code is roughly lifted from
-+       __adeos_walk_pipeline(). */
-+
-+    list_for_each(pos,&__adeos_pipeline) {
-+
-+      adomain_t *next_domain = list_entry(pos,adomain_t,p_link);
-+
-+      if (test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
-+          break; /* Stalled stage -- do not go further. */
-+
-+      if (next_domain->cpudata[cpuid].irq_pending_hi != 0)
-+          {
-+          /* Since the critical IPI might be triggered by the
-+             following actions, the current domain might not be
-+             linked to the pipeline anymore after its handler
-+             returns on SMP boxen, even if the domain remains valid
-+             (see adeos_unregister_domain()), so don't make any
-+             hazardous assumptions here. */
-+
-+          if (next_domain == this_domain)
-+              __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+          else
-+              {
-+              __adeos_switch_to(this_domain,next_domain,cpuid);
-+
-+              adeos_load_cpuid(); /* Processor might have changed. */
-+
-+              if (this_domain->cpudata[cpuid].irq_pending_hi != 0 &&
-+                  
!test_bit(IPIPE_STALL_FLAG,&this_domain->cpudata[cpuid].status))
-+                  __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+              }
-+          
-+          break;
-+          }
-+      else if (next_domain == this_domain)
-+          break;
-+    }
-+
-+release_cpu_and_exit:
-+
-+    if (__adeos_pipeline_head_p(adp))
-+      adeos_hw_sti();
-+    else
-+      adeos_unlock_cpu(flags);
-+}
-+
-+/* adeos_suspend_domain() -- tell the ADEOS layer that the current
-+   domain is now dormant. The calling domain is switched out, while
-+   the next domain with work in progress or pending in the pipeline is
-+   switched in. */
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+
-+#define __flush_pipeline_stage() \
-+do { \
-+    if (!test_bit(IPIPE_STALL_FLAG,&cpudata->status) && \
-+      cpudata->irq_pending_hi != 0) \
-+      { \
-+      __adeos_sync_stage(IPIPE_IRQMASK_ANY); \
-+      adeos_load_cpuid(); \
-+      cpudata = &this_domain->cpudata[cpuid]; \
-+      } \
-+} while(0)
-+
-+void adeos_suspend_domain (void)
-+
-+{
-+    adomain_t *this_domain, *next_domain;
-+    struct adcpudata *cpudata;
-+    struct list_head *ln;
-+    unsigned long flags;
-+    adeos_declare_cpuid;
-+
-+    adeos_lock_cpu(flags);
-+
-+    this_domain = next_domain = adp_cpu_current[cpuid];
-+    cpudata = &this_domain->cpudata[cpuid];
-+
-+    /* A suspending domain implicitely unstalls the pipeline. */
-+    __clear_bit(IPIPE_STALL_FLAG,&cpudata->status);
-+
-+    /* Make sure that no event remains stuck in the pipeline. This
-+       could happen with emerging SMP instances, or domains which
-+       forget to unstall their stage before calling us. */
-+    __flush_pipeline_stage();
-+
-+    for (;;)
-+      {
-+      ln = next_domain->p_link.next;
-+
-+      if (ln == &__adeos_pipeline)    /* End of pipeline reached? */
-+          /* Caller should loop on its idle task on return. */
-+          goto release_cpu_and_exit;
-+
-+      next_domain = list_entry(ln,adomain_t,p_link);
-+
-+      /* Make sure the domain was preempted (i.e. not sleeping) or
-+         has some event to process before switching to it. */
-+
-+      if (__adeos_domain_work_p(next_domain,cpuid))
-+          break;
-+      }
-+
-+    /* Mark the outgoing domain as aslept (i.e. not preempted). */
-+    __set_bit(IPIPE_SLEEP_FLAG,&cpudata->status);
-+
-+    /* Suspend the calling domain, switching to the next one. */
-+    __adeos_switch_to(this_domain,next_domain,cpuid);
-+
-+#ifdef CONFIG_SMP
-+    adeos_load_cpuid();       /* Processor might have changed. */
-+    cpudata = &this_domain->cpudata[cpuid];
-+#endif /* CONFIG_SMP */
-+
-+    /* Clear the sleep bit for the incoming domain. */
-+    __clear_bit(IPIPE_SLEEP_FLAG,&cpudata->status);
-+
-+    /* Now, we are back into the calling domain. Flush the interrupt
-+       log and fire the event interposition handler if needed.  CPU
-+       migration is allowed in SMP-mode on behalf of an event handler
-+       provided that the current domain raised it. Otherwise, it's
-+       not. */
-+
-+    __flush_pipeline_stage();
-+
-+    if (__test_and_clear_bit(IPIPE_XPEND_FLAG,&cpudata->status))
-+      {
-+      adeos_unlock_cpu(flags);
-+      
this_domain->events[cpudata->event_info.event].handler(&cpudata->event_info);
-+      return;
-+      }
-+
-+release_cpu_and_exit:
-+
-+    adeos_unlock_cpu(flags);
-+
-+    /* Return to the point of suspension in the calling domain. */
-+}
-+
-+#else /* !CONFIG_ADEOS_THREADS */
-+
-+void adeos_suspend_domain (void)
-+
-+{
-+    adomain_t *this_domain, *next_domain;
-+    struct list_head *ln;
-+    unsigned long flags;
-+    adeos_declare_cpuid;
-+
-+    adeos_lock_cpu(flags);
-+
-+    this_domain = next_domain = adp_cpu_current[cpuid];
-+
-+    __clear_bit(IPIPE_STALL_FLAG,&this_domain->cpudata[cpuid].status);
-+
-+    if (this_domain->cpudata[cpuid].irq_pending_hi != 0)
-+      goto sync_stage;
-+
-+    for (;;)
-+      {
-+      ln = next_domain->p_link.next;
-+
-+      if (ln == &__adeos_pipeline)
-+          break;
-+
-+      next_domain = list_entry(ln,adomain_t,p_link);
-+
-+      if (test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
-+          break;
-+
-+      if (next_domain->cpudata[cpuid].irq_pending_hi == 0)
-+          continue;
-+
-+      adp_cpu_current[cpuid] = next_domain;
-+
-+      if (next_domain->dswitch)
-+          next_domain->dswitch();
-+
-+ sync_stage:
-+
-+      __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+
-+      adeos_load_cpuid();     /* Processor might have changed. */
-+
-+      if (adp_cpu_current[cpuid] != next_domain)
-+          /* Something has changed the current domain under our feet
-+             recycling the register set; take note. */
-+          this_domain = adp_cpu_current[cpuid];
-+      }
-+
-+    adp_cpu_current[cpuid] = this_domain;
-+
-+    adeos_unlock_cpu(flags);
-+}
-+
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+/* adeos_alloc_irq() -- Allocate a virtual/soft pipelined interrupt.
-+   Virtual interrupts are handled in exactly the same way than their
-+   hw-generated counterparts. This is a very basic, one-way only,
-+   inter-domain communication system (see adeos_trigger_irq()).  Note:
-+   it is not necessary for a domain to allocate a virtual interrupt to
-+   trap it using adeos_virtualize_irq(). The newly allocated VIRQ
-+   number which can be passed to other IRQ-related services is
-+   returned on success, zero otherwise (i.e. no more virtual interrupt
-+   channel is available). We need this service as part of the Adeos
-+   bootstrap code, hence it must reside in a built-in area. */
-+
-+unsigned adeos_alloc_irq (void)
-+
-+{
-+    unsigned long flags, irq = 0;
-+    int ipos;
-+
-+    spin_lock_irqsave_hw(&__adeos_pipelock,flags);
-+
-+    if (__adeos_virtual_irq_map != ~0)
-+      {
-+      ipos = ffz(__adeos_virtual_irq_map);
-+      set_bit(ipos,&__adeos_virtual_irq_map);
-+      irq = ipos + IPIPE_VIRQ_BASE;
-+      }
-+
-+    spin_unlock_irqrestore_hw(&__adeos_pipelock,flags);
-+
-+    return irq;
-+}
-+
-+#ifdef CONFIG_PROC_FS
-+
-+#include <linux/proc_fs.h>
-+
-+static struct proc_dir_entry *adeos_proc_entry;
-+
-+static int __adeos_read_proc (char *page,
-+                            char **start,
-+                            off_t off,
-+                            int count,
-+                            int *eof,
-+                            void *data)
-+{
-+    unsigned long ctlbits;
-+    struct list_head *pos;
-+    unsigned irq, _irq;
-+    char *p = page;
-+    int len;
-+
-+#ifdef CONFIG_ADEOS_MODULE
-+    p += sprintf(p,"Adeos %s -- Pipelining: 
%s",ADEOS_VERSION_STRING,adp_pipelined ? "active" : "stopped");
-+#else /* !CONFIG_ADEOS_MODULE */
-+    p += sprintf(p,"Adeos %s -- Pipelining: permanent",ADEOS_VERSION_STRING);
-+#endif /* CONFIG_ADEOS_MODULE */
-+#ifdef CONFIG_ADEOS_THREADS
-+    p += sprintf(p, " (threaded)\n\n");
-+#else                         /* CONFIG_ADEOS_THREADS */
-+    p += sprintf(p, "\n\n");
-+#endif                                /* CONFIG_ADEOS_THREADS */
-+
-+    spin_lock(&__adeos_pipelock);
-+
-+    list_for_each(pos,&__adeos_pipeline) {
-+
-+      adomain_t *adp = list_entry(pos,adomain_t,p_link);
-+
-+      p += sprintf(p,"%8s: priority=%d, id=0x%.8x, ptdkeys=%d/%d\n",
-+                   adp->name,
-+                   adp->priority,
-+                   adp->domid,
-+                   adp->ptd_keycount,
-+                   adp->ptd_keymax);
-+      irq = 0;
-+
-+      while (irq < IPIPE_NR_IRQS)
-+          {
-+          ctlbits = (adp->irqs[irq].control & 
(IPIPE_HANDLE_MASK|IPIPE_PASS_MASK|IPIPE_STICKY_MASK));
-+
-+          if (irq >= IPIPE_NR_XIRQS && !adeos_virtual_irq_p(irq))
-+              {
-+              /* There might be a hole between the last external IRQ
-+                 and the first virtual one; skip it. */
-+              irq++;
-+              continue;
-+              }
-+
-+          if (adeos_virtual_irq_p(irq) && !test_bit(irq - 
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map))
-+              {
-+              /* Non-allocated virtual IRQ; skip it. */
-+              irq++;
-+              continue;
-+              }
-+
-+          /* Attempt to group consecutive IRQ numbers having the
-+             same virtualization settings in a single line. */
-+
-+          _irq = irq;
-+
-+          while (++_irq < IPIPE_NR_IRQS)
-+              {
-+              if (adeos_virtual_irq_p(_irq) != adeos_virtual_irq_p(irq) ||
-+                  (adeos_virtual_irq_p(_irq) &&
-+                   !test_bit(_irq - 
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map)) ||
-+                  ctlbits != (adp->irqs[_irq].control & 
(IPIPE_HANDLE_MASK|IPIPE_PASS_MASK|IPIPE_STICKY_MASK)))
-+                  break;
-+              }
-+
-+          if (_irq == irq + 1)
-+              p += sprintf(p,"\tirq%u: ",irq);
-+          else
-+              p += sprintf(p,"\tirq%u-%u: ",irq,_irq - 1);
-+
-+          /* Statuses are as follows:
-+             o "accepted" means handled _and_ passed down the
-+             pipeline.
-+             o "grabbed" means handled, but the interrupt might be
-+             terminated _or_ passed down the pipeline depending on
-+             what the domain handler asks for to Adeos.
-+             o "passed" means unhandled by the domain but passed
-+             down the pipeline.
-+             o "discarded" means unhandled and _not_ passed down the
-+             pipeline. The interrupt merely disappears from the
-+             current domain down to the end of the pipeline. */
-+
-+          if (ctlbits & IPIPE_HANDLE_MASK)
-+              {
-+              if (ctlbits & IPIPE_PASS_MASK)
-+                  p += sprintf(p,"accepted");
-+              else
-+                  p += sprintf(p,"grabbed");
-+              }
-+          else if (ctlbits & IPIPE_PASS_MASK)
-+              p += sprintf(p,"passed");
-+          else
-+              p += sprintf(p,"discarded");
-+
-+          if (ctlbits & IPIPE_STICKY_MASK)
-+              p += sprintf(p,", sticky");
-+
-+          if (adeos_virtual_irq_p(irq))
-+              p += sprintf(p,", virtual");
-+
-+          p += sprintf(p,"\n");
-+
-+          irq = _irq;
-+          }
-+    }
-+
-+    spin_unlock(&__adeos_pipelock);
-+
-+    len = p - page;
-+
-+    if (len <= off + count)
-+      *eof = 1;
-+
-+    *start = page + off;
-+
-+    len -= off;
-+
-+    if (len > count)
-+      len = count;
-+
-+    if (len < 0)
-+      len = 0;
-+
-+    return len;
-+}
-+
-+void __adeos_init_proc (void) {
-+
-+    adeos_proc_entry = create_proc_read_entry("adeos",
-+                                            0444,
-+                                            NULL,
-+                                            &__adeos_read_proc,
-+                                            NULL);
-+}
-+
-+#endif /* CONFIG_PROC_FS */
-+
-+void __adeos_dump_state (void)
-+
-+{
-+    int _cpuid, nr_cpus = num_online_cpus();
-+    struct list_head *pos;
-+    unsigned long flags;
-+    adeos_declare_cpuid;
-+
-+    adeos_lock_cpu(flags);
-+
-+    printk(KERN_WARNING "Adeos: Current domain=%s on CPU #%d 
[stackbase=%p]\n",
-+         adp_current->name,
-+         cpuid,
-+#ifdef CONFIG_ADEOS_THREADS
-+         (void *)adp_current->estackbase[cpuid]
-+#else /* !CONFIG_ADEOS_THREADS */
-+         current
-+#endif /* CONFIG_ADEOS_THREADS */
-+         );
-+
-+    list_for_each(pos,&__adeos_pipeline) {
-+
-+        adomain_t *adp = list_entry(pos,adomain_t,p_link);
-+
-+        for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)
-+            printk(KERN_WARNING "%8s[cpuid=%d]: priority=%d, status=0x%lx, 
pending_hi=0x%lx\n",
-+                   adp->name,
-+                   _cpuid,
-+                   adp->priority,
-+                   adp->cpudata[_cpuid].status,
-+                   adp->cpudata[_cpuid].irq_pending_hi);
-+    }
-+
-+    adeos_unlock_cpu(flags);
-+}
-+
-+EXPORT_SYMBOL(adeos_suspend_domain);
-+EXPORT_SYMBOL(adeos_alloc_irq);
-+EXPORT_SYMBOL(adp_cpu_current);
-+EXPORT_SYMBOL(adp_root);
-+EXPORT_SYMBOL(adp_pipelined);
-+EXPORT_SYMBOL(__adeos_handle_event);
-+EXPORT_SYMBOL(__adeos_unstall_root);
-+EXPORT_SYMBOL(__adeos_stall_root);
-+EXPORT_SYMBOL(__adeos_restore_root);
-+EXPORT_SYMBOL(__adeos_test_and_stall_root);
-+EXPORT_SYMBOL(__adeos_test_root);
-+EXPORT_SYMBOL(__adeos_dump_state);
-+EXPORT_SYMBOL(__adeos_pipeline);
-+EXPORT_SYMBOL(__adeos_pipelock);
-+EXPORT_SYMBOL(__adeos_virtual_irq_map);
-+EXPORT_SYMBOL(__adeos_event_monitors);
-+EXPORT_SYMBOL(adeos_unstall_pipeline_from);
-+#ifdef CONFIG_ADEOS_PROFILING
-+EXPORT_SYMBOL(__adeos_profile_data);
-+#endif /* CONFIG_ADEOS_PROFILING */
-+/* The following are convenience exports which are needed by some
-+   Adeos domains loaded as kernel modules. */
-+EXPORT_SYMBOL(do_exit);
-diff -uNrp linux-2.6.10/kernel/exit.c linux-2.6.10-ppc-adeos/kernel/exit.c
---- linux-2.6.10/kernel/exit.c 2004-12-24 22:35:27.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/kernel/exit.c       2005-03-27 16:35:19.000000000 
+0200
-@@ -809,6 +809,9 @@ fastcall NORET_TYPE void do_exit(long co
-       group_dead = atomic_dec_and_test(&tsk->signal->live);
-       if (group_dead)
-               acct_process(code);
-+#ifdef CONFIG_ADEOS_CORE
-+      __adeos_exit_process(tsk);
-+#endif /* CONFIG_ADEOS_CORE */
-       __exit_mm(tsk);
- 
-       exit_sem(tsk);
-diff -uNrp linux-2.6.10/kernel/fork.c linux-2.6.10-ppc-adeos/kernel/fork.c
---- linux-2.6.10/kernel/fork.c 2004-12-24 22:33:59.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/kernel/fork.c       2005-08-31 13:50:04.000000000 
+0200
-@@ -1021,6 +1021,14 @@ static task_t *copy_process(unsigned lon
- 
-       nr_threads++;
-       write_unlock_irq(&tasklist_lock);
-+#ifdef CONFIG_ADEOS_CORE
-+      {
-+      int k;
-+
-+      for (k = 0; k < ADEOS_ROOT_NPTDKEYS; k++)
-+          p->ptd[k] = NULL;
-+      }
-+#endif /* CONFIG_ADEOS_CORE */
-       retval = 0;
- 
- fork_out:
-diff -uNrp linux-2.6.10/kernel/irq/handle.c 
linux-2.6.10-ppc-adeos/kernel/irq/handle.c
---- linux-2.6.10/kernel/irq/handle.c   2004-12-24 22:35:50.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/kernel/irq/handle.c 2005-03-27 16:35:19.000000000 
+0200
-@@ -127,7 +127,12 @@ fastcall unsigned int __do_IRQ(unsigned 
-               /*
-                * No locking required for CPU-local interrupts:
-                */
-+#ifdef CONFIG_ADEOS_CORE
-+              if (!adp_pipelined) 
-+                  desc->handler->ack(irq);
-+#else
-               desc->handler->ack(irq);
-+#endif /* CONFIG_ADEOS_CORE */
-               action_ret = handle_IRQ_event(irq, regs, desc->action);
-               if (!noirqdebug)
-                       note_interrupt(irq, desc, action_ret);
-@@ -136,7 +141,12 @@ fastcall unsigned int __do_IRQ(unsigned 
-       }
- 
-       spin_lock(&desc->lock);
-+#ifdef CONFIG_ADEOS_CORE
-+      if (!adp_pipelined) 
-+          desc->handler->ack(irq);
-+#else
-       desc->handler->ack(irq);
-+#endif /* CONFIG_ADEOS_CORE */
-       /*
-        * REPLAY is when Linux resends an IRQ that was dropped earlier
-        * WAITING is used by probe to mark irqs that are being tested
-diff -uNrp linux-2.6.10/kernel/panic.c linux-2.6.10-ppc-adeos/kernel/panic.c
---- linux-2.6.10/kernel/panic.c        2004-12-24 22:35:29.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/kernel/panic.c      2005-03-27 16:35:19.000000000 
+0200
-@@ -70,6 +70,9 @@ NORET_TYPE void panic(const char * fmt, 
-       va_end(args);
-       printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
-       bust_spinlocks(0);
-+#ifdef CONFIG_ADEOS_CORE
-+      __adeos_dump_state();
-+#endif /* CONFIG_ADEOS_CORE */
- 
- #ifdef CONFIG_SMP
-       smp_send_stop();
-diff -uNrp linux-2.6.10/kernel/printk.c linux-2.6.10-ppc-adeos/kernel/printk.c
---- linux-2.6.10/kernel/printk.c       2004-12-24 22:35:40.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/kernel/printk.c     2005-07-11 15:55:38.000000000 
+0200
-@@ -509,6 +509,66 @@ static void zap_locks(void)
-  * then changes console_loglevel may break. This is because console_loglevel
-  * is inspected when the actual printing occurs.
-  */
-+#ifdef CONFIG_ADEOS_CORE
-+
-+static raw_spinlock_t __adeos_printk_lock = RAW_SPIN_LOCK_UNLOCKED;
-+
-+static int __adeos_printk_fill;
-+
-+static char __adeos_printk_buf[__LOG_BUF_LEN];
-+
-+void __adeos_flush_printk (unsigned virq)
-+{
-+      char *p = __adeos_printk_buf;
-+      int out = 0, len;
-+
-+      clear_bit(ADEOS_PPRINTK_FLAG,&adp_root->flags);
-+
-+      while (out < __adeos_printk_fill) {
-+              len = strlen(p) + 1;
-+              printk("%s",p);
-+              p += len;
-+              out += len;
-+      }
-+      __adeos_printk_fill = 0;
-+}
-+
-+asmlinkage int printk(const char *fmt, ...)
-+{
-+      unsigned long flags;
-+      int r, fbytes;
-+      va_list args;
-+
-+      va_start(args, fmt);
-+
-+      if (adp_current == adp_root ||
-+          test_bit(ADEOS_SPRINTK_FLAG,&adp_current->flags) ||
-+          oops_in_progress) {
-+              r = vprintk(fmt, args);
-+              goto out;
-+      }
-+
-+      adeos_spin_lock_irqsave(&__adeos_printk_lock,flags);
-+
-+      fbytes = __LOG_BUF_LEN - __adeos_printk_fill;
-+
-+      if (fbytes > 1) {
-+              r = vscnprintf(__adeos_printk_buf + __adeos_printk_fill,
-+                             fbytes, fmt, args) + 1; /* account for the null 
byte */
-+              __adeos_printk_fill += r;
-+      } else
-+              r = 0;
-+      
-+      adeos_spin_unlock_irqrestore(&__adeos_printk_lock,flags);
-+
-+      if (!test_and_set_bit(ADEOS_PPRINTK_FLAG,&adp_root->flags))
-+              adeos_trigger_irq(__adeos_printk_virq);
-+out: 
-+      va_end(args);
-+
-+      return r;
-+}
-+#else /* !CONFIG_ADEOS_CORE */
- asmlinkage int printk(const char *fmt, ...)
- {
-       va_list args;
-@@ -520,6 +580,7 @@ asmlinkage int printk(const char *fmt, .
- 
-       return r;
- }
-+#endif /* CONFIG_ADEOS_CORE */
- 
- asmlinkage int vprintk(const char *fmt, va_list args)
- {
-diff -uNrp linux-2.6.10/kernel/sched.c linux-2.6.10-ppc-adeos/kernel/sched.c
---- linux-2.6.10/kernel/sched.c        2004-12-24 22:35:24.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/kernel/sched.c      2005-06-08 19:50:47.000000000 
+0200
-@@ -302,7 +302,16 @@ static DEFINE_PER_CPU(struct runqueue, r
-  * Default context-switch locking:
-  */
- #ifndef prepare_arch_switch
-+#ifdef CONFIG_ADEOS_CORE
-+#define prepare_arch_switch(rq,prev,next) \
-+do { \
-+    struct { struct task_struct *prev, *next; } arg = { (prev), (next) }; \
-+    __adeos_schedule_head(&arg); \
-+    adeos_hw_cli(); \
-+} while(0)
-+#else /* !CONFIG_ADEOS_CORE */
- # define prepare_arch_switch(rq, next)        do { } while (0)
-+#endif /* CONFIG_ADEOS_CORE */
- # define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
- # define task_running(rq, p)          ((rq)->curr == (p))
- #endif
-@@ -1367,6 +1376,9 @@ asmlinkage void schedule_tail(task_t *pr
- 
-       if (current->set_child_tid)
-               put_user(current->pid, current->set_child_tid);
-+#ifdef CONFIG_ADEOS_CORE
-+      __adeos_enter_process();
-+#endif /* CONFIG_ADEOS_CORE */
- }
- 
- /*
-@@ -2535,6 +2547,11 @@ asmlinkage void __sched schedule(void)
-       unsigned long run_time;
-       int cpu, idx;
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      if (adp_current != adp_root) /* Let's be helpful and conservative. */
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       /*
-        * Test if we are atomic.  Since do_exit() needs to call into
-        * schedule() atomically, we ignore that path for now.
-@@ -2684,9 +2701,28 @@ switch_tasks:
-               rq->curr = next;
-               ++*switch_count;
- 
--              prepare_arch_switch(rq, next);
-+#ifdef CONFIG_ADEOS_CORE
-+              prepare_arch_switch(rq, prev, next);
-+#else /* !CONFIG_ADEOS_CORE */
-+              prepare_arch_switch(rq, next);
-+#endif /* CONFIG_ADEOS_CORE */
-               prev = context_switch(rq, prev, next);
-               barrier();
-+#ifdef CONFIG_ADEOS_CORE
-+              if (adp_pipelined)
-+                  {
-+                  
__clear_bit(IPIPE_SYNC_FLAG,&adp_root->cpudata[task_cpu(current)].status);
-+                  adeos_hw_sti();
-+                  }
-+
-+              if (__adeos_schedule_tail(prev) > 0 || adp_current != adp_root)
-+                  /* Someone has just recycled the register set of
-+                     prev for running over a non-root domain, or
-+                     some event handler in the pipeline asked for a
-+                     truncated scheduling tail. Don't perform the
-+                     Linux housekeeping chores, at least not now. */
-+                  return;
-+#endif /* CONFIG_ADEOS_CORE */
- 
-               finish_task_switch(prev);
-       } else
-@@ -3148,6 +3184,16 @@ recheck:
-       retval = security_task_setscheduler(p, policy, &lp);
-       if (retval)
-               goto out_unlock;
-+#ifdef CONFIG_ADEOS_CORE
-+      {
-+      struct { struct task_struct *task; int policy; struct sched_param 
*param; } evdata = { p, policy, &lp };
-+      if (__adeos_renice_process(&evdata))
-+          {
-+          retval = 0;
-+          goto out_unlock;
-+          }
-+      }
-+#endif /* CONFIG_ADEOS_CORE */
-       /*
-        * To be able to change p->policy safely, the apropriate
-        * runqueue lock must be held.
-@@ -4676,3 +4722,62 @@ void normalize_rt_tasks(void)
- }
- 
- #endif /* CONFIG_MAGIC_SYSRQ */
-+
-+#ifdef CONFIG_ADEOS_CORE
-+
-+int __adeos_setscheduler_root (struct task_struct *p, int policy, int prio)
-+{
-+      prio_array_t *array;
-+      unsigned long flags;
-+      runqueue_t *rq;
-+      int oldprio;
-+
-+      if (prio < 1 || prio > MAX_RT_PRIO-1)
-+          return -EINVAL;
-+
-+      read_lock_irq(&tasklist_lock);
-+      rq = task_rq_lock(p, &flags);
-+      array = p->array;
-+      if (array)
-+              deactivate_task(p, rq);
-+      oldprio = p->prio;
-+      __setscheduler(p, policy, prio);
-+      if (array) {
-+              __activate_task(p, rq);
-+              if (task_running(rq, p)) {
-+                      if (p->prio > oldprio)
-+                              resched_task(rq->curr);
-+              } else if (TASK_PREEMPTS_CURR(p, rq))
-+                      resched_task(rq->curr);
-+      }
-+      task_rq_unlock(rq, &flags);
-+      read_unlock_irq(&tasklist_lock);
-+
-+      return 0;
-+}
-+
-+EXPORT_SYMBOL(__adeos_setscheduler_root);
-+
-+void __adeos_reenter_root (struct task_struct *prev,
-+                         int policy,
-+                         int prio)
-+{
-+      finish_task_switch(prev);
-+      if (reacquire_kernel_lock(current) < 0)
-+          ;
-+      preempt_enable_no_resched();
-+
-+      if (current->policy != policy || current->rt_priority != prio)
-+          __adeos_setscheduler_root(current,policy,prio);
-+}
-+
-+EXPORT_SYMBOL(__adeos_reenter_root);
-+
-+void __adeos_schedule_back_root (struct task_struct *prev)
-+{
-+    __adeos_reenter_root(prev,current->policy,current->rt_priority);
-+}
-+
-+EXPORT_SYMBOL(__adeos_schedule_back_root);
-+
-+#endif /* CONFIG_ADEOS_CORE */
-diff -uNrp linux-2.6.10/kernel/signal.c linux-2.6.10-ppc-adeos/kernel/signal.c
---- linux-2.6.10/kernel/signal.c       2004-12-24 22:34:32.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/kernel/signal.c     2005-03-27 16:35:19.000000000 
+0200
-@@ -576,6 +576,13 @@ void signal_wake_up(struct task_struct *
- 
-       set_tsk_thread_flag(t, TIF_SIGPENDING);
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      {
-+      struct { struct task_struct *t; } evdata = { t };
-+      __adeos_kick_process(&evdata);
-+      }
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       /*
-        * If resume is set, we want to wake it up in the TASK_STOPPED case.
-        * We don't check for TASK_STOPPED because there is a race with it
-@@ -823,6 +830,17 @@ specific_send_sig_info(int sig, struct s
-               BUG();
- #endif
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      /* If some domain handler in the pipeline doesn't ask for
-+         propagation, return success pretending that 'sig' was
-+         delivered. */
-+      {
-+      struct { struct task_struct *task; int sig; } evdata = { t, sig };
-+      if (__adeos_signal_process(&evdata))
-+          goto out;
-+      }
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
-               /*
-                * Set up a return to indicate that we dropped the signal.
-diff -uNrp linux-2.6.10/kernel/sysctl.c linux-2.6.10-ppc-adeos/kernel/sysctl.c
---- linux-2.6.10/kernel/sysctl.c       2004-12-24 22:33:59.000000000 +0100
-+++ linux-2.6.10-ppc-adeos/kernel/sysctl.c     2005-03-27 16:35:19.000000000 
+0200
-@@ -946,6 +946,9 @@ void __init sysctl_init(void)
- #ifdef CONFIG_PROC_FS
-       register_proc_table(root_table, proc_sys_root);
-       init_irq_proc();
-+#ifdef CONFIG_ADEOS_CORE
-+      __adeos_init_proc();
-+#endif /* CONFIG_ADEOS_CORE */
- #endif
- }
- 
diff -Nru --exclude=.svn xenomai-orig/arch/ppc/patches/README 
xenomai-devel/arch/ppc/patches/README
--- xenomai-orig/arch/ppc/patches/README        2005-10-23 11:00:14.000000000 
+0300
+++ xenomai-devel/arch/ppc/patches/README       1970-01-01 02:00:00.000000000 
+0200
@@ -1,19 +0,0 @@
--- arch/ppc/patches
-
-Xenomai needs special kernel support to deliver fast and deterministic
-response time to external interrupts, and also to provide real-time
-services highly integrated with the standard Linux kernel.
-
-This support is provided by the Adeos real-time enabler [1], in the
-form of a kernel patch you have to apply to a vanilla kernel tree,
-before you attempt to compile the Xenomai codebase against the latter
-kernel.
-
-On the ppc architecture, Xenomai can run over the former and the new
-generation of Adeos patches, namely adeos-linux-* and adeos-ipipe-*,
-that one can find in this directory. Just apply one of those patches
-to the corresponding kernel release. You may want to have a look at
-the README.*INSTALL guides at the top of the Xenomai tree for more
-information.
-
-[1] http://www.gna.org/projects/adeos/
diff -Nru --exclude=.svn xenomai-orig/arch/ppc64/defconfig 
xenomai-devel/arch/ppc64/defconfig
--- xenomai-orig/arch/ppc64/defconfig   2005-10-11 10:32:31.000000000 +0300
+++ xenomai-devel/arch/ppc64/defconfig  1970-01-01 02:00:00.000000000 +0200
@@ -1,67 +0,0 @@
-#
-# Automatically generated make config: don't edit
-#
-CONFIG_MODULES=y
-CONFIG_XENO_VERSION="2.0"
-
-#
-# General
-#
-CONFIG_XENO_INSTALLDIR="/usr/realtime"
-CONFIG_XENO_LINUXDIR="/lib/modules/`uname -r`/build"
-
-#
-# Documentation
-#
-# CONFIG_XENO_DOC_DOX is not set
-# CONFIG_XENO_DOC_LATEX_NONSTOP is not set
-# CONFIG_XENO_DOC_DBX is not set
-# CONFIG_XENO_OPT_EXPERT is not set
-
-#
-# Nucleus
-#
-CONFIG_XENO_OPT_PERVASIVE=y
-CONFIG_XENO_OPT_PIPE=y
-CONFIG_XENO_OPT_PIPE_NRDEV="32"
-CONFIG_XENO_OPT_SYS_HEAPSZ="128"
-
-#
-# Machine (powerpc64)
-#
-CONFIG_XENO_HW_FPU=y
-CONFIG_XENO_HW_PERIODIC_TIMER=y
-
-#
-# APIs
-#
-CONFIG_XENO_SKIN_NATIVE=y
-CONFIG_XENO_OPT_NATIVE_REGISTRY=y
-CONFIG_XENO_OPT_NATIVE_REGISTRY_NRSLOTS="512"
-CONFIG_XENO_OPT_NATIVE_PIPE=y
-CONFIG_XENO_OPT_NATIVE_PIPE_BUFSZ="4096"
-CONFIG_XENO_OPT_NATIVE_SEM=y
-CONFIG_XENO_OPT_NATIVE_EVENT=y
-CONFIG_XENO_OPT_NATIVE_MUTEX=y
-CONFIG_XENO_OPT_NATIVE_COND=y
-CONFIG_XENO_OPT_NATIVE_QUEUE=y
-CONFIG_XENO_OPT_NATIVE_HEAP=y
-CONFIG_XENO_OPT_NATIVE_ALARM=y
-CONFIG_XENO_OPT_NATIVE_MPS=y
-CONFIG_XENO_OPT_NATIVE_INTR=y
-CONFIG_XENO_SKIN_POSIX=y
-# CONFIG_XENO_SKIN_PSOS is not set
-# CONFIG_XENO_SKIN_UITRON is not set
-# CONFIG_XENO_SKIN_VRTX is not set
-# CONFIG_XENO_SKIN_VXWORKS is not set
-CONFIG_XENO_OPT_UVM=y
-
-#
-# Drivers
-#
-# CONFIG_XENO_DRIVERS_16550A is not set
-
-#
-# Simulator
-#
-# CONFIG_XENO_MVM is not set
diff -Nru --exclude=.svn xenomai-orig/arch/ppc64/GNUmakefile.am 
xenomai-devel/arch/ppc64/GNUmakefile.am
--- xenomai-orig/arch/ppc64/GNUmakefile.am      2005-10-11 10:32:31.000000000 
+0300
+++ xenomai-devel/arch/ppc64/GNUmakefile.am     1970-01-01 02:00:00.000000000 
+0200
@@ -1,3 +0,0 @@
-SUBDIRS = hal
-
-EXTRA_DIST = Kconfig defconfig patches
diff -Nru --exclude=.svn xenomai-orig/arch/ppc64/GNUmakefile.in 
xenomai-devel/arch/ppc64/GNUmakefile.in
--- xenomai-orig/arch/ppc64/GNUmakefile.in      2005-10-23 11:00:14.000000000 
+0300
+++ xenomai-devel/arch/ppc64/GNUmakefile.in     1970-01-01 02:00:00.000000000 
+0200
@@ -1,614 +0,0 @@
-# GNUmakefile.in generated by automake 1.9.5 from GNUmakefile.am.
-# @configure_input@
-
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005  Free Software Foundation, Inc.
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
[EMAIL PROTECTED]@
-srcdir = @srcdir@
-top_srcdir = @top_srcdir@
-VPATH = @srcdir@
-pkgdatadir = $(datadir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-top_builddir = ../..
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-INSTALL = @INSTALL@
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-target_triplet = @target@
-subdir = arch/ppc64
-DIST_COMMON = $(srcdir)/GNUmakefile.am $(srcdir)/GNUmakefile.in
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps =  \
-       $(top_srcdir)/config/autoconf/ac_prog_cc_for_build.m4 \
-       $(top_srcdir)/config/autoconf/docbook.m4 \
-       $(top_srcdir)/config/version $(top_srcdir)/configure.in
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
-       $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/include/xeno_config.h
-CONFIG_CLEAN_FILES =
-SOURCES =
-DIST_SOURCES =
-RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
-       html-recursive info-recursive install-data-recursive \
-       install-exec-recursive install-info-recursive \
-       install-recursive installcheck-recursive installdirs-recursive \
-       pdf-recursive ps-recursive uninstall-info-recursive \
-       uninstall-recursive
-ETAGS = etags
-CTAGS = ctags
-DIST_SUBDIRS = $(SUBDIRS)
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMDEP_FALSE = @AMDEP_FALSE@
-AMDEP_TRUE = @AMDEP_TRUE@
-AMTAR = @AMTAR@
-AR = @AR@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-BUILD_EXEEXT = @BUILD_EXEEXT@
-BUILD_OBJEXT = @BUILD_OBJEXT@
-CC = @CC@
-CCAS = @CCAS@
-CCASFLAGS = @CCASFLAGS@
-CCDEPMODE = @CCDEPMODE@
-CC_FOR_BUILD = @CC_FOR_BUILD@
-CFLAGS = @CFLAGS@
-CFLAGS_FOR_BUILD = @CFLAGS_FOR_BUILD@
-CONFIG_IA64_FALSE = @CONFIG_IA64_FALSE@
-CONFIG_IA64_TRUE = @CONFIG_IA64_TRUE@
-CONFIG_LTT_FALSE = @CONFIG_LTT_FALSE@
-CONFIG_LTT_TRUE = @CONFIG_LTT_TRUE@
-CONFIG_PPC64_FALSE = @CONFIG_PPC64_FALSE@
-CONFIG_PPC64_TRUE = @CONFIG_PPC64_TRUE@
-CONFIG_PPC_FALSE = @CONFIG_PPC_FALSE@
-CONFIG_PPC_TRUE = @CONFIG_PPC_TRUE@
-CONFIG_SMP_FALSE = @CONFIG_SMP_FALSE@
-CONFIG_SMP_TRUE = @CONFIG_SMP_TRUE@
-CONFIG_X86_FALSE = @CONFIG_X86_FALSE@
-CONFIG_X86_LOCAL_APIC_FALSE = @CONFIG_X86_LOCAL_APIC_FALSE@
-CONFIG_X86_LOCAL_APIC_TRUE = @CONFIG_X86_LOCAL_APIC_TRUE@
-CONFIG_X86_TRUE = @CONFIG_X86_TRUE@
-CONFIG_XENO_DOC_DOX_FALSE = @CONFIG_XENO_DOC_DOX_FALSE@
-CONFIG_XENO_DOC_DOX_TRUE = @CONFIG_XENO_DOC_DOX_TRUE@
-CONFIG_XENO_DRIVERS_16550A_FALSE = @CONFIG_XENO_DRIVERS_16550A_FALSE@
-CONFIG_XENO_DRIVERS_16550A_TRUE = @CONFIG_XENO_DRIVERS_16550A_TRUE@
-CONFIG_XENO_HW_FPU_FALSE = @CONFIG_XENO_HW_FPU_FALSE@
-CONFIG_XENO_HW_FPU_TRUE = @CONFIG_XENO_HW_FPU_TRUE@
-CONFIG_XENO_HW_NMI_DEBUG_LATENCY_FALSE = 
@CONFIG_XENO_HW_NMI_DEBUG_LATENCY_FALSE@
-CONFIG_XENO_HW_NMI_DEBUG_LATENCY_TRUE = @CONFIG_XENO_HW_NMI_DEBUG_LATENCY_TRUE@
-CONFIG_XENO_HW_SMI_DETECT_FALSE = @CONFIG_XENO_HW_SMI_DETECT_FALSE@
-CONFIG_XENO_HW_SMI_DETECT_TRUE = @CONFIG_XENO_HW_SMI_DETECT_TRUE@
-CONFIG_XENO_MAINT_FALSE = @CONFIG_XENO_MAINT_FALSE@
-CONFIG_XENO_MAINT_GCH_FALSE = @CONFIG_XENO_MAINT_GCH_FALSE@
-CONFIG_XENO_MAINT_GCH_TRUE = @CONFIG_XENO_MAINT_GCH_TRUE@
-CONFIG_XENO_MAINT_PGM_FALSE = @CONFIG_XENO_MAINT_PGM_FALSE@
-CONFIG_XENO_MAINT_PGM_TRUE = @CONFIG_XENO_MAINT_PGM_TRUE@
-CONFIG_XENO_MAINT_TRUE = @CONFIG_XENO_MAINT_TRUE@
-CONFIG_XENO_OLD_FASHIONED_BUILD_FALSE = @CONFIG_XENO_OLD_FASHIONED_BUILD_FALSE@
-CONFIG_XENO_OLD_FASHIONED_BUILD_TRUE = @CONFIG_XENO_OLD_FASHIONED_BUILD_TRUE@
-CONFIG_XENO_OPT_CONFIG_GZ_FALSE = @CONFIG_XENO_OPT_CONFIG_GZ_FALSE@
-CONFIG_XENO_OPT_CONFIG_GZ_TRUE = @CONFIG_XENO_OPT_CONFIG_GZ_TRUE@
-CONFIG_XENO_OPT_NATIVE_ALARM_FALSE = @CONFIG_XENO_OPT_NATIVE_ALARM_FALSE@
-CONFIG_XENO_OPT_NATIVE_ALARM_TRUE = @CONFIG_XENO_OPT_NATIVE_ALARM_TRUE@
-CONFIG_XENO_OPT_NATIVE_COND_FALSE = @CONFIG_XENO_OPT_NATIVE_COND_FALSE@
-CONFIG_XENO_OPT_NATIVE_COND_TRUE = @CONFIG_XENO_OPT_NATIVE_COND_TRUE@
-CONFIG_XENO_OPT_NATIVE_EVENT_FALSE = @CONFIG_XENO_OPT_NATIVE_EVENT_FALSE@
-CONFIG_XENO_OPT_NATIVE_EVENT_TRUE = @CONFIG_XENO_OPT_NATIVE_EVENT_TRUE@
-CONFIG_XENO_OPT_NATIVE_HEAP_FALSE = @CONFIG_XENO_OPT_NATIVE_HEAP_FALSE@
-CONFIG_XENO_OPT_NATIVE_HEAP_TRUE = @CONFIG_XENO_OPT_NATIVE_HEAP_TRUE@
-CONFIG_XENO_OPT_NATIVE_INTR_FALSE = @CONFIG_XENO_OPT_NATIVE_INTR_FALSE@
-CONFIG_XENO_OPT_NATIVE_INTR_TRUE = @CONFIG_XENO_OPT_NATIVE_INTR_TRUE@
-CONFIG_XENO_OPT_NATIVE_MUTEX_FALSE = @CONFIG_XENO_OPT_NATIVE_MUTEX_FALSE@
-CONFIG_XENO_OPT_NATIVE_MUTEX_TRUE = @CONFIG_XENO_OPT_NATIVE_MUTEX_TRUE@
-CONFIG_XENO_OPT_NATIVE_PIPE_FALSE = @CONFIG_XENO_OPT_NATIVE_PIPE_FALSE@
-CONFIG_XENO_OPT_NATIVE_PIPE_TRUE = @CONFIG_XENO_OPT_NATIVE_PIPE_TRUE@
-CONFIG_XENO_OPT_NATIVE_QUEUE_FALSE = @CONFIG_XENO_OPT_NATIVE_QUEUE_FALSE@
-CONFIG_XENO_OPT_NATIVE_QUEUE_TRUE = @CONFIG_XENO_OPT_NATIVE_QUEUE_TRUE@
-CONFIG_XENO_OPT_NATIVE_REGISTRY_FALSE = @CONFIG_XENO_OPT_NATIVE_REGISTRY_FALSE@
-CONFIG_XENO_OPT_NATIVE_REGISTRY_TRUE = @CONFIG_XENO_OPT_NATIVE_REGISTRY_TRUE@
-CONFIG_XENO_OPT_NATIVE_SEM_FALSE = @CONFIG_XENO_OPT_NATIVE_SEM_FALSE@
-CONFIG_XENO_OPT_NATIVE_SEM_TRUE = @CONFIG_XENO_OPT_NATIVE_SEM_TRUE@
-CONFIG_XENO_OPT_PERVASIVE_FALSE = @CONFIG_XENO_OPT_PERVASIVE_FALSE@
-CONFIG_XENO_OPT_PERVASIVE_TRUE = @CONFIG_XENO_OPT_PERVASIVE_TRUE@
-CONFIG_XENO_OPT_PIPE_FALSE = @CONFIG_XENO_OPT_PIPE_FALSE@
-CONFIG_XENO_OPT_PIPE_TRUE = @CONFIG_XENO_OPT_PIPE_TRUE@
-CONFIG_XENO_OPT_RTAI_FIFO_FALSE = @CONFIG_XENO_OPT_RTAI_FIFO_FALSE@
-CONFIG_XENO_OPT_RTAI_FIFO_TRUE = @CONFIG_XENO_OPT_RTAI_FIFO_TRUE@
-CONFIG_XENO_OPT_RTAI_SEM_FALSE = @CONFIG_XENO_OPT_RTAI_SEM_FALSE@
-CONFIG_XENO_OPT_RTAI_SEM_TRUE = @CONFIG_XENO_OPT_RTAI_SEM_TRUE@
-CONFIG_XENO_OPT_RTAI_SHM_FALSE = @CONFIG_XENO_OPT_RTAI_SHM_FALSE@
-CONFIG_XENO_OPT_RTAI_SHM_TRUE = @CONFIG_XENO_OPT_RTAI_SHM_TRUE@
-CONFIG_XENO_OPT_UDEV_FALSE = @CONFIG_XENO_OPT_UDEV_FALSE@
-CONFIG_XENO_OPT_UDEV_TRUE = @CONFIG_XENO_OPT_UDEV_TRUE@
-CONFIG_XENO_OPT_UVM_FALSE = @CONFIG_XENO_OPT_UVM_FALSE@
-CONFIG_XENO_OPT_UVM_TRUE = @CONFIG_XENO_OPT_UVM_TRUE@
-CONFIG_XENO_SKIN_NATIVE_FALSE = @CONFIG_XENO_SKIN_NATIVE_FALSE@
-CONFIG_XENO_SKIN_NATIVE_TRUE = @CONFIG_XENO_SKIN_NATIVE_TRUE@
-CONFIG_XENO_SKIN_POSIX_FALSE = @CONFIG_XENO_SKIN_POSIX_FALSE@
-CONFIG_XENO_SKIN_POSIX_TRUE = @CONFIG_XENO_SKIN_POSIX_TRUE@
-CONFIG_XENO_SKIN_PSOS_FALSE = @CONFIG_XENO_SKIN_PSOS_FALSE@
-CONFIG_XENO_SKIN_PSOS_TRUE = @CONFIG_XENO_SKIN_PSOS_TRUE@
-CONFIG_XENO_SKIN_RTAI_FALSE = @CONFIG_XENO_SKIN_RTAI_FALSE@
-CONFIG_XENO_SKIN_RTAI_TRUE = @CONFIG_XENO_SKIN_RTAI_TRUE@
-CONFIG_XENO_SKIN_RTDM_FALSE = @CONFIG_XENO_SKIN_RTDM_FALSE@
-CONFIG_XENO_SKIN_RTDM_TRUE = @CONFIG_XENO_SKIN_RTDM_TRUE@
-CONFIG_XENO_SKIN_UITRON_FALSE = @CONFIG_XENO_SKIN_UITRON_FALSE@
-CONFIG_XENO_SKIN_UITRON_TRUE = @CONFIG_XENO_SKIN_UITRON_TRUE@
-CONFIG_XENO_SKIN_VRTX_FALSE = @CONFIG_XENO_SKIN_VRTX_FALSE@
-CONFIG_XENO_SKIN_VRTX_TRUE = @CONFIG_XENO_SKIN_VRTX_TRUE@
-CONFIG_XENO_SKIN_VXWORKS_FALSE = @CONFIG_XENO_SKIN_VXWORKS_FALSE@
-CONFIG_XENO_SKIN_VXWORKS_TRUE = @CONFIG_XENO_SKIN_VXWORKS_TRUE@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CPPFLAGS_FOR_BUILD = @CPPFLAGS_FOR_BUILD@
-CPP_FOR_BUILD = @CPP_FOR_BUILD@
-CROSS_COMPILE = @CROSS_COMPILE@
-CXX = @CXX@
-CXXCPP = @CXXCPP@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DBX_ABS_SRCDIR_FALSE = @DBX_ABS_SRCDIR_FALSE@
-DBX_ABS_SRCDIR_TRUE = @DBX_ABS_SRCDIR_TRUE@
-DBX_DOC_FALSE = @DBX_DOC_FALSE@
-DBX_DOC_ROOT = @DBX_DOC_ROOT@
-DBX_DOC_TRUE = @DBX_DOC_TRUE@
-DBX_FOP = @DBX_FOP@
-DBX_GEN_DOC_ROOT = @DBX_GEN_DOC_ROOT@
-DBX_LINT = @DBX_LINT@
-DBX_MAYBE_NONET = @DBX_MAYBE_NONET@
-DBX_ROOT = @DBX_ROOT@
-DBX_XSLTPROC = @DBX_XSLTPROC@
-DBX_XSL_ROOT = @DBX_XSL_ROOT@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-DOXYGEN = @DOXYGEN@
-DOXYGEN_HAVE_DOT = @DOXYGEN_HAVE_DOT@
-DOXYGEN_SHOW_INCLUDE_FILES = @DOXYGEN_SHOW_INCLUDE_FILES@
-ECHO = @ECHO@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-F77 = @F77@
-FFLAGS = @FFLAGS@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-LATEX_BATCHMODE = @LATEX_BATCHMODE@
-LATEX_MODE = @LATEX_MODE@
-LDFLAGS = @LDFLAGS@
-LEX = @LEX@
-LEXLIB = @LEXLIB@
-LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBTOOL = @LIBTOOL@
-LN_S = @LN_S@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@
-MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@
-MAKEINFO = @MAKEINFO@
-OBJEXT = @OBJEXT@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-RANLIB = @RANLIB@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-STRIP = @STRIP@
-VERSION = @VERSION@
-XENO_BUILD_STRING = @XENO_BUILD_STRING@
-XENO_FP_CFLAGS = @XENO_FP_CFLAGS@
-XENO_HOST_STRING = @XENO_HOST_STRING@
-XENO_KBUILD_CLEAN = @XENO_KBUILD_CLEAN@
-XENO_KBUILD_CMD = @XENO_KBUILD_CMD@
-XENO_KBUILD_DISTCLEAN = @XENO_KBUILD_DISTCLEAN@
-XENO_KBUILD_ENV = @XENO_KBUILD_ENV@
-XENO_KMOD_APP_CFLAGS = @XENO_KMOD_APP_CFLAGS@
-XENO_KMOD_CFLAGS = @XENO_KMOD_CFLAGS@
-XENO_LINUX_DIR = @XENO_LINUX_DIR@
-XENO_LINUX_VERSION = @XENO_LINUX_VERSION@
-XENO_MAYBE_DOCDIR = @XENO_MAYBE_DOCDIR@
-XENO_MAYBE_SIMDIR = @XENO_MAYBE_SIMDIR@
-XENO_MODULE_DIR = @XENO_MODULE_DIR@
-XENO_MODULE_EXT = @XENO_MODULE_EXT@
-XENO_PIPE_NRDEV = @XENO_PIPE_NRDEV@
-XENO_SYMBOL_DIR = @XENO_SYMBOL_DIR@
-XENO_TARGET_ARCH = @XENO_TARGET_ARCH@
-XENO_TARGET_SUBARCH = @XENO_TARGET_SUBARCH@
-XENO_USER_APP_CFLAGS = @XENO_USER_APP_CFLAGS@
-XENO_USER_CFLAGS = @XENO_USER_CFLAGS@
-ac_ct_AR = @ac_ct_AR@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CC_FOR_BUILD = @ac_ct_CC_FOR_BUILD@
-ac_ct_CXX = @ac_ct_CXX@
-ac_ct_F77 = @ac_ct_F77@
-ac_ct_RANLIB = @ac_ct_RANLIB@
-ac_ct_STRIP = @ac_ct_STRIP@
-am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
-am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
-am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@
-am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_vendor = @build_vendor@
-datadir = @datadir@
-exec_prefix = @exec_prefix@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localstatedir = @localstatedir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-oldincludedir = @oldincludedir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-subdirs = @subdirs@
-sysconfdir = @sysconfdir@
-target = @target@
-target_alias = @target_alias@
-target_cpu = @target_cpu@
-target_os = @target_os@
-target_vendor = @target_vendor@
-SUBDIRS = hal
-EXTRA_DIST = Kconfig defconfig patches
-all: all-recursive
-
-.SUFFIXES:
-$(srcdir)/GNUmakefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/GNUmakefile.am  
$(am__configure_deps)
-       @for dep in $?; do \
-         case '$(am__configure_deps)' in \
-           *$$dep*) \
-             cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
-               && exit 0; \
-             exit 1;; \
-         esac; \
-       done; \
-       echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  
arch/ppc64/GNUmakefile'; \
-       cd $(top_srcdir) && \
-         $(AUTOMAKE) --foreign  arch/ppc64/GNUmakefile
-.PRECIOUS: GNUmakefile
-GNUmakefile: $(srcdir)/GNUmakefile.in $(top_builddir)/config.status
-       @case '$?' in \
-         *config.status*) \
-           cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
-         *) \
-           echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ 
$(am__depfiles_maybe)'; \
-           cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ 
$(am__depfiles_maybe);; \
-       esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure 
$(CONFIG_STATUS_DEPENDENCIES)
-       cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
-       cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
-       cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-mostlyclean-libtool:
-       -rm -f *.lo
-
-clean-libtool:
-       -rm -rf .libs _libs
-
-distclean-libtool:
-       -rm -f libtool
-uninstall-info-am:
-
-# This directory's subdirectories are mostly independent; you can cd
-# into them and run `make' without going through this Makefile.
-# To change the values of `make' variables: instead of editing Makefiles,
-# (1) if the variable is set in `config.status', edit `config.status'
-#     (which will cause the Makefiles to be regenerated when you run `make');
-# (2) otherwise, pass the desired values on the `make' command line.
-$(RECURSIVE_TARGETS):
-       @failcom='exit 1'; \
-       for f in x $$MAKEFLAGS; do \
-         case $$f in \
-           *=* | --[!k]*);; \
-           *k*) failcom='fail=yes';; \
-         esac; \
-       done; \
-       dot_seen=no; \
-       target=`echo $@ | sed s/-recursive//`; \
-       list='$(SUBDIRS)'; for subdir in $$list; do \
-         echo "Making $$target in $$subdir"; \
-         if test "$$subdir" = "."; then \
-           dot_seen=yes; \
-           local_target="$$target-am"; \
-         else \
-           local_target="$$target"; \
-         fi; \
-         (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
-         || eval $$failcom; \
-       done; \
-       if test "$$dot_seen" = "no"; then \
-         $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
-       fi; test -z "$$fail"
-
-mostlyclean-recursive clean-recursive distclean-recursive \
-maintainer-clean-recursive:
-       @failcom='exit 1'; \
-       for f in x $$MAKEFLAGS; do \
-         case $$f in \
-           *=* | --[!k]*);; \
-           *k*) failcom='fail=yes';; \
-         esac; \
-       done; \
-       dot_seen=no; \
-       case "$@" in \
-         distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
-         *) list='$(SUBDIRS)' ;; \
-       esac; \
-       rev=''; for subdir in $$list; do \
-         if test "$$subdir" = "."; then :; else \
-           rev="$$subdir $$rev"; \
-         fi; \
-       done; \
-       rev="$$rev ."; \
-       target=`echo $@ | sed s/-recursive//`; \
-       for subdir in $$rev; do \
-         echo "Making $$target in $$subdir"; \
-         if test "$$subdir" = "."; then \
-           local_target="$$target-am"; \
-         else \
-           local_target="$$target"; \
-         fi; \
-         (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
-         || eval $$failcom; \
-       done && test -z "$$fail"
-tags-recursive:
-       list='$(SUBDIRS)'; for subdir in $$list; do \
-         test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); 
\
-       done
-ctags-recursive:
-       list='$(SUBDIRS)'; for subdir in $$list; do \
-         test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) 
ctags); \
-       done
-
-ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
-       list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
-       unique=`for i in $$list; do \
-           if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
-         done | \
-         $(AWK) '    { files[$$0] = 1; } \
-              END { for (i in files) print i; }'`; \
-       mkid -fID $$unique
-tags: TAGS
-
-TAGS: tags-recursive $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
-               $(TAGS_FILES) $(LISP)
-       tags=; \
-       here=`pwd`; \
-       if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
-         include_option=--etags-include; \
-         empty_fix=.; \
-       else \
-         include_option=--include; \
-         empty_fix=; \
-       fi; \
-       list='$(SUBDIRS)'; for subdir in $$list; do \
-         if test "$$subdir" = .; then :; else \
-           test ! -f $$subdir/TAGS || \
-             tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \
-         fi; \
-       done; \
-       list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
-       unique=`for i in $$list; do \
-           if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
-         done | \
-         $(AWK) '    { files[$$0] = 1; } \
-              END { for (i in files) print i; }'`; \
-       if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
-         test -n "$$unique" || unique=$$empty_fix; \
-         $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
-           $$tags $$unique; \
-       fi
-ctags: CTAGS
-CTAGS: ctags-recursive $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
-               $(TAGS_FILES) $(LISP)
-       tags=; \
-       here=`pwd`; \
-       list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
-       unique=`for i in $$list; do \
-           if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
-         done | \
-         $(AWK) '    { files[$$0] = 1; } \
-              END { for (i in files) print i; }'`; \
-       test -z "$(CTAGS_ARGS)$$tags$$unique" \
-         || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
-            $$tags $$unique
-
-GTAGS:
-       here=`$(am__cd) $(top_builddir) && pwd` \
-         && cd $(top_srcdir) \
-         && gtags -i $(GTAGS_ARGS) $$here
-
-distclean-tags:
-       -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
-
-distdir: $(DISTFILES)
-       @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
-       topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
-       list='$(DISTFILES)'; for file in $$list; do \
-         case $$file in \
-           $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
-           $(top_srcdir)/*) file=`echo "$$file" | sed 
"s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
-         esac; \
-         if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
-         dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
-         if test "$$dir" != "$$file" && test "$$dir" != "."; then \
-           dir="/$$dir"; \
-           $(mkdir_p) "$(distdir)$$dir"; \
-         else \
-           dir=''; \
-         fi; \
-         if test -d $$d/$$file; then \
-           if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
-             cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
-           fi; \
-           cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
-         else \
-           test -f $(distdir)/$$file \
-           || cp -p $$d/$$file $(distdir)/$$file \
-           || exit 1; \
-         fi; \
-       done
-       list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
-         if test "$$subdir" = .; then :; else \
-           test -d "$(distdir)/$$subdir" \
-           || $(mkdir_p) "$(distdir)/$$subdir" \
-           || exit 1; \
-           distdir=`$(am__cd) $(distdir) && pwd`; \
-           top_distdir=`$(am__cd) $(top_distdir) && pwd`; \
-           (cd $$subdir && \
-             $(MAKE) $(AM_MAKEFLAGS) \
-               top_distdir="$$top_distdir" \
-               distdir="$$distdir/$$subdir" \
-               distdir) \
-             || exit 1; \
-         fi; \
-       done
-check-am: all-am
-check: check-recursive
-all-am: GNUmakefile
-installdirs: installdirs-recursive
-installdirs-am:
-install: install-recursive
-install-exec: install-exec-recursive
-install-data: install-data-recursive
-uninstall: uninstall-recursive
-
-install-am: all-am
-       @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-recursive
-install-strip:
-       $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
-         install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
-         `test -z '$(STRIP)' || \
-           echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
-       -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-
-maintainer-clean-generic:
-       @echo "This command is intended for maintainers to use"
-       @echo "it deletes files that may require special tools to rebuild."
-clean: clean-recursive
-
-clean-am: clean-generic clean-libtool mostlyclean-am
-
-distclean: distclean-recursive
-       -rm -f GNUmakefile
-distclean-am: clean-am distclean-generic distclean-libtool \
-       distclean-tags
-
-dvi: dvi-recursive
-
-dvi-am:
-
-html: html-recursive
-
-info: info-recursive
-
-info-am:
-
-install-data-am:
-
-install-exec-am:
-
-install-info: install-info-recursive
-
-install-man:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-recursive
-       -rm -f GNUmakefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-recursive
-
-mostlyclean-am: mostlyclean-generic mostlyclean-libtool
-
-pdf: pdf-recursive
-
-pdf-am:
-
-ps: ps-recursive
-
-ps-am:
-
-uninstall-am: uninstall-info-am
-
-uninstall-info: uninstall-info-recursive
-
-.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am check check-am \
-       clean clean-generic clean-libtool clean-recursive ctags \
-       ctags-recursive distclean distclean-generic distclean-libtool \
-       distclean-recursive distclean-tags distdir dvi dvi-am html \
-       html-am info info-am install install-am install-data \
-       install-data-am install-exec install-exec-am install-info \
-       install-info-am install-man install-strip installcheck \
-       installcheck-am installdirs installdirs-am maintainer-clean \
-       maintainer-clean-generic maintainer-clean-recursive \
-       mostlyclean mostlyclean-generic mostlyclean-libtool \
-       mostlyclean-recursive pdf pdf-am ps ps-am tags tags-recursive \
-       uninstall uninstall-am uninstall-info-am
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff -Nru --exclude=.svn xenomai-orig/arch/ppc64/hal/fpu.S 
xenomai-devel/arch/ppc64/hal/fpu.S
--- xenomai-orig/arch/ppc64/hal/fpu.S   2005-10-11 10:32:30.000000000 +0300
+++ xenomai-devel/arch/ppc64/hal/fpu.S  1970-01-01 02:00:00.000000000 +0200
@@ -1,74 +0,0 @@
-/*
- * arch/ppc64/hal/fpu.S
- *
- * Fusion 64-bit PowerPC adoption
- * Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
- * based on previous work:
- *
- * Copyright (C) 2001,2002,2003,2004 Philippe Gerum.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
- * USA; either version 2 of the License, or (at your option) any later
- * version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <asm/processor.h>
-#include <asm/ppc_asm.h>
-#include <xeno_config.h> 
-
-#define RTHAL_FPSAVE(n, base)  stfd n,8*(n)(base)
-#define RTHAL_FPSAVE2(n, base) RTHAL_FPSAVE(n, base); RTHAL_FPSAVE(n+1, base)
-#define RTHAL_FPSAVE4(n, base) RTHAL_FPSAVE2(n, base); RTHAL_FPSAVE2(n+2, base)
-#define RTHAL_FPSAVE8(n, base) RTHAL_FPSAVE4(n, base); RTHAL_FPSAVE4(n+4, base)
-#define RTHAL_FPSAVE16(n, base)        RTHAL_FPSAVE8(n, base); 
RTHAL_FPSAVE8(n+8, base)
-#define RTHAL_FPSAVE32(n, base)        RTHAL_FPSAVE16(n, base); 
RTHAL_FPSAVE16(n+16, base)
-
-/* r3 = &tcb->fpuenv */
-_GLOBAL(rthal_save_fpu)
-       mfmsr   r5
-       ori     r5,r5,MSR_FP            /* Re-enable use of FPU. */
-       mtmsrd  r5                      /* Enable use of fpu. */
-       isync
-       RTHAL_FPSAVE32(0,r3)
-       mffs    fr0
-       stfd    fr0,8*32(r3)
-       blr
-
-#define RTHAL_FPLOAD(n, base)  lfd n,8*(n)(base)
-#define RTHAL_FPLOAD2(n, base) RTHAL_FPLOAD(n, base); RTHAL_FPLOAD(n+1, base)
-#define RTHAL_FPLOAD4(n, base) RTHAL_FPLOAD2(n, base); RTHAL_FPLOAD2(n+2, base)
-#define RTHAL_FPLOAD8(n, base) RTHAL_FPLOAD4(n, base); RTHAL_FPLOAD4(n+4, base)
-#define RTHAL_FPLOAD16(n, base)        RTHAL_FPLOAD8(n, base); 
RTHAL_FPLOAD8(n+8, base)
-#define RTHAL_FPLOAD32(n, base)        RTHAL_FPLOAD16(n, base); 
RTHAL_FPLOAD16(n+16, base)
-
-/* r3 = &tcb->fpuenv */
-_GLOBAL(rthal_init_fpu)
-       mfmsr   r5
-       ori     r5,r5,MSR_FP|MSR_FE1    /* RT kernel threads always operate in 
*/
-       li      r4,MSR_FE0              /* imprecise non-recoverable exception 
mode. */
-       andc    r5,r5,r4
-       mtmsrd  r5
-
-       /* Fallback wanted. */
-       
-/* r3 = &tcb->fpuenv */
-_GLOBAL(rthal_restore_fpu)
-       mfmsr   r5
-       ori     r5,r5,MSR_FP            /* Re-enable use of FPU. */
-       mtmsrd  r5                      /* Enable use of fpu. */
-       isync
-       lfd     fr0,8*32(r3)
-       mtfsf   0xff,0
-       RTHAL_FPLOAD32(0,r3)
-       blr
diff -Nru --exclude=.svn xenomai-orig/arch/ppc64/hal/GNUmakefile.am 
xenomai-devel/arch/ppc64/hal/GNUmakefile.am
--- xenomai-orig/arch/ppc64/hal/GNUmakefile.am  2005-10-23 11:00:14.000000000 
+0300
+++ xenomai-devel/arch/ppc64/hal/GNUmakefile.am 1970-01-01 02:00:00.000000000 
+0200
@@ -1,33 +0,0 @@
-moduledir = $(DESTDIR)@XENO_MODULE_DIR@
-
-modext = @XENO_MODULE_EXT@
-
-CROSS_COMPILE = @CROSS_COMPILE@
-
-libhal_SRC = ppc64.c switch.S
-
-if CONFIG_XENO_HW_FPU
-libhal_SRC += fpu.S
-endif
-distfiles = fpu.S
-
-xeno_hal.ko: @XENO_KBUILD_ENV@
-xeno_hal.ko: $(libhal_SRC) generic.c FORCE
-       @XENO_KBUILD_CMD@ xeno_extradef="@XENO_KMOD_CFLAGS@"
-
-clean-local:
-       @XENO_KBUILD_CLEAN@
-
-all-local: xeno_hal$(modext)
-if CONFIG_XENO_OLD_FASHIONED_BUILD
-       $(mkinstalldirs) $(top_srcdir)/modules
-       $(INSTALL_DATA) $^ $(top_srcdir)/modules
-endif
-
-install-exec-local: xeno_hal$(modext)
-       $(mkinstalldirs) $(moduledir)
-       $(INSTALL_DATA) $< $(moduledir)
-
-.PHONY: FORCE
-
-EXTRA_DIST = $(libhal_SRC) $(distfiles) Makefile
diff -Nru --exclude=.svn xenomai-orig/arch/ppc64/hal/GNUmakefile.in 
xenomai-devel/arch/ppc64/hal/GNUmakefile.in
--- xenomai-orig/arch/ppc64/hal/GNUmakefile.in  2005-10-23 11:00:14.000000000 
+0300
+++ xenomai-devel/arch/ppc64/hal/GNUmakefile.in 1970-01-01 02:00:00.000000000 
+0200
@@ -1,479 +0,0 @@
-# GNUmakefile.in generated by automake 1.9.5 from GNUmakefile.am.
-# @configure_input@
-
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005  Free Software Foundation, Inc.
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
[EMAIL PROTECTED]@
-srcdir = @srcdir@
-top_srcdir = @top_srcdir@
-VPATH = @srcdir@
-pkgdatadir = $(datadir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-top_builddir = ../../..
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-INSTALL = @INSTALL@
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-target_triplet = @target@
[EMAIL PROTECTED] = fpu.S
-subdir = arch/ppc64/hal
-DIST_COMMON = $(srcdir)/GNUmakefile.am $(srcdir)/GNUmakefile.in
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps =  \
-       $(top_srcdir)/config/autoconf/ac_prog_cc_for_build.m4 \
-       $(top_srcdir)/config/autoconf/docbook.m4 \
-       $(top_srcdir)/config/version $(top_srcdir)/configure.in
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
-       $(ACLOCAL_M4)
-mkinstalldirs = $(install_sh) -d
-CONFIG_HEADER = $(top_builddir)/include/xeno_config.h
-CONFIG_CLEAN_FILES =
-SOURCES =
-DIST_SOURCES =
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = @ACLOCAL@
-AMDEP_FALSE = @AMDEP_FALSE@
-AMDEP_TRUE = @AMDEP_TRUE@
-AMTAR = @AMTAR@
-AR = @AR@
-AUTOCONF = @AUTOCONF@
-AUTOHEADER = @AUTOHEADER@
-AUTOMAKE = @AUTOMAKE@
-AWK = @AWK@
-BUILD_EXEEXT = @BUILD_EXEEXT@
-BUILD_OBJEXT = @BUILD_OBJEXT@
-CC = @CC@
-CCAS = @CCAS@
-CCASFLAGS = @CCASFLAGS@
-CCDEPMODE = @CCDEPMODE@
-CC_FOR_BUILD = @CC_FOR_BUILD@
-CFLAGS = @CFLAGS@
-CFLAGS_FOR_BUILD = @CFLAGS_FOR_BUILD@
-CONFIG_IA64_FALSE = @CONFIG_IA64_FALSE@
-CONFIG_IA64_TRUE = @CONFIG_IA64_TRUE@
-CONFIG_LTT_FALSE = @CONFIG_LTT_FALSE@
-CONFIG_LTT_TRUE = @CONFIG_LTT_TRUE@
-CONFIG_PPC64_FALSE = @CONFIG_PPC64_FALSE@
-CONFIG_PPC64_TRUE = @CONFIG_PPC64_TRUE@
-CONFIG_PPC_FALSE = @CONFIG_PPC_FALSE@
-CONFIG_PPC_TRUE = @CONFIG_PPC_TRUE@
-CONFIG_SMP_FALSE = @CONFIG_SMP_FALSE@
-CONFIG_SMP_TRUE = @CONFIG_SMP_TRUE@
-CONFIG_X86_FALSE = @CONFIG_X86_FALSE@
-CONFIG_X86_LOCAL_APIC_FALSE = @CONFIG_X86_LOCAL_APIC_FALSE@
-CONFIG_X86_LOCAL_APIC_TRUE = @CONFIG_X86_LOCAL_APIC_TRUE@
-CONFIG_X86_TRUE = @CONFIG_X86_TRUE@
-CONFIG_XENO_DOC_DOX_FALSE = @CONFIG_XENO_DOC_DOX_FALSE@
-CONFIG_XENO_DOC_DOX_TRUE = @CONFIG_XENO_DOC_DOX_TRUE@
-CONFIG_XENO_DRIVERS_16550A_FALSE = @CONFIG_XENO_DRIVERS_16550A_FALSE@
-CONFIG_XENO_DRIVERS_16550A_TRUE = @CONFIG_XENO_DRIVERS_16550A_TRUE@
-CONFIG_XENO_HW_FPU_FALSE = @CONFIG_XENO_HW_FPU_FALSE@
-CONFIG_XENO_HW_FPU_TRUE = @CONFIG_XENO_HW_FPU_TRUE@
-CONFIG_XENO_HW_NMI_DEBUG_LATENCY_FALSE = 
@CONFIG_XENO_HW_NMI_DEBUG_LATENCY_FALSE@
-CONFIG_XENO_HW_NMI_DEBUG_LATENCY_TRUE = @CONFIG_XENO_HW_NMI_DEBUG_LATENCY_TRUE@
-CONFIG_XENO_HW_SMI_DETECT_FALSE = @CONFIG_XENO_HW_SMI_DETECT_FALSE@
-CONFIG_XENO_HW_SMI_DETECT_TRUE = @CONFIG_XENO_HW_SMI_DETECT_TRUE@
-CONFIG_XENO_MAINT_FALSE = @CONFIG_XENO_MAINT_FALSE@
-CONFIG_XENO_MAINT_GCH_FALSE = @CONFIG_XENO_MAINT_GCH_FALSE@
-CONFIG_XENO_MAINT_GCH_TRUE = @CONFIG_XENO_MAINT_GCH_TRUE@
-CONFIG_XENO_MAINT_PGM_FALSE = @CONFIG_XENO_MAINT_PGM_FALSE@
-CONFIG_XENO_MAINT_PGM_TRUE = @CONFIG_XENO_MAINT_PGM_TRUE@
-CONFIG_XENO_MAINT_TRUE = @CONFIG_XENO_MAINT_TRUE@
-CONFIG_XENO_OLD_FASHIONED_BUILD_FALSE = @CONFIG_XENO_OLD_FASHIONED_BUILD_FALSE@
-CONFIG_XENO_OLD_FASHIONED_BUILD_TRUE = @CONFIG_XENO_OLD_FASHIONED_BUILD_TRUE@
-CONFIG_XENO_OPT_CONFIG_GZ_FALSE = @CONFIG_XENO_OPT_CONFIG_GZ_FALSE@
-CONFIG_XENO_OPT_CONFIG_GZ_TRUE = @CONFIG_XENO_OPT_CONFIG_GZ_TRUE@
-CONFIG_XENO_OPT_NATIVE_ALARM_FALSE = @CONFIG_XENO_OPT_NATIVE_ALARM_FALSE@
-CONFIG_XENO_OPT_NATIVE_ALARM_TRUE = @CONFIG_XENO_OPT_NATIVE_ALARM_TRUE@
-CONFIG_XENO_OPT_NATIVE_COND_FALSE = @CONFIG_XENO_OPT_NATIVE_COND_FALSE@
-CONFIG_XENO_OPT_NATIVE_COND_TRUE = @CONFIG_XENO_OPT_NATIVE_COND_TRUE@
-CONFIG_XENO_OPT_NATIVE_EVENT_FALSE = @CONFIG_XENO_OPT_NATIVE_EVENT_FALSE@
-CONFIG_XENO_OPT_NATIVE_EVENT_TRUE = @CONFIG_XENO_OPT_NATIVE_EVENT_TRUE@
-CONFIG_XENO_OPT_NATIVE_HEAP_FALSE = @CONFIG_XENO_OPT_NATIVE_HEAP_FALSE@
-CONFIG_XENO_OPT_NATIVE_HEAP_TRUE = @CONFIG_XENO_OPT_NATIVE_HEAP_TRUE@
-CONFIG_XENO_OPT_NATIVE_INTR_FALSE = @CONFIG_XENO_OPT_NATIVE_INTR_FALSE@
-CONFIG_XENO_OPT_NATIVE_INTR_TRUE = @CONFIG_XENO_OPT_NATIVE_INTR_TRUE@
-CONFIG_XENO_OPT_NATIVE_MUTEX_FALSE = @CONFIG_XENO_OPT_NATIVE_MUTEX_FALSE@
-CONFIG_XENO_OPT_NATIVE_MUTEX_TRUE = @CONFIG_XENO_OPT_NATIVE_MUTEX_TRUE@
-CONFIG_XENO_OPT_NATIVE_PIPE_FALSE = @CONFIG_XENO_OPT_NATIVE_PIPE_FALSE@
-CONFIG_XENO_OPT_NATIVE_PIPE_TRUE = @CONFIG_XENO_OPT_NATIVE_PIPE_TRUE@
-CONFIG_XENO_OPT_NATIVE_QUEUE_FALSE = @CONFIG_XENO_OPT_NATIVE_QUEUE_FALSE@
-CONFIG_XENO_OPT_NATIVE_QUEUE_TRUE = @CONFIG_XENO_OPT_NATIVE_QUEUE_TRUE@
-CONFIG_XENO_OPT_NATIVE_REGISTRY_FALSE = @CONFIG_XENO_OPT_NATIVE_REGISTRY_FALSE@
-CONFIG_XENO_OPT_NATIVE_REGISTRY_TRUE = @CONFIG_XENO_OPT_NATIVE_REGISTRY_TRUE@
-CONFIG_XENO_OPT_NATIVE_SEM_FALSE = @CONFIG_XENO_OPT_NATIVE_SEM_FALSE@
-CONFIG_XENO_OPT_NATIVE_SEM_TRUE = @CONFIG_XENO_OPT_NATIVE_SEM_TRUE@
-CONFIG_XENO_OPT_PERVASIVE_FALSE = @CONFIG_XENO_OPT_PERVASIVE_FALSE@
-CONFIG_XENO_OPT_PERVASIVE_TRUE = @CONFIG_XENO_OPT_PERVASIVE_TRUE@
-CONFIG_XENO_OPT_PIPE_FALSE = @CONFIG_XENO_OPT_PIPE_FALSE@
-CONFIG_XENO_OPT_PIPE_TRUE = @CONFIG_XENO_OPT_PIPE_TRUE@
-CONFIG_XENO_OPT_RTAI_FIFO_FALSE = @CONFIG_XENO_OPT_RTAI_FIFO_FALSE@
-CONFIG_XENO_OPT_RTAI_FIFO_TRUE = @CONFIG_XENO_OPT_RTAI_FIFO_TRUE@
-CONFIG_XENO_OPT_RTAI_SEM_FALSE = @CONFIG_XENO_OPT_RTAI_SEM_FALSE@
-CONFIG_XENO_OPT_RTAI_SEM_TRUE = @CONFIG_XENO_OPT_RTAI_SEM_TRUE@
-CONFIG_XENO_OPT_RTAI_SHM_FALSE = @CONFIG_XENO_OPT_RTAI_SHM_FALSE@
-CONFIG_XENO_OPT_RTAI_SHM_TRUE = @CONFIG_XENO_OPT_RTAI_SHM_TRUE@
-CONFIG_XENO_OPT_UDEV_FALSE = @CONFIG_XENO_OPT_UDEV_FALSE@
-CONFIG_XENO_OPT_UDEV_TRUE = @CONFIG_XENO_OPT_UDEV_TRUE@
-CONFIG_XENO_OPT_UVM_FALSE = @CONFIG_XENO_OPT_UVM_FALSE@
-CONFIG_XENO_OPT_UVM_TRUE = @CONFIG_XENO_OPT_UVM_TRUE@
-CONFIG_XENO_SKIN_NATIVE_FALSE = @CONFIG_XENO_SKIN_NATIVE_FALSE@
-CONFIG_XENO_SKIN_NATIVE_TRUE = @CONFIG_XENO_SKIN_NATIVE_TRUE@
-CONFIG_XENO_SKIN_POSIX_FALSE = @CONFIG_XENO_SKIN_POSIX_FALSE@
-CONFIG_XENO_SKIN_POSIX_TRUE = @CONFIG_XENO_SKIN_POSIX_TRUE@
-CONFIG_XENO_SKIN_PSOS_FALSE = @CONFIG_XENO_SKIN_PSOS_FALSE@
-CONFIG_XENO_SKIN_PSOS_TRUE = @CONFIG_XENO_SKIN_PSOS_TRUE@
-CONFIG_XENO_SKIN_RTAI_FALSE = @CONFIG_XENO_SKIN_RTAI_FALSE@
-CONFIG_XENO_SKIN_RTAI_TRUE = @CONFIG_XENO_SKIN_RTAI_TRUE@
-CONFIG_XENO_SKIN_RTDM_FALSE = @CONFIG_XENO_SKIN_RTDM_FALSE@
-CONFIG_XENO_SKIN_RTDM_TRUE = @CONFIG_XENO_SKIN_RTDM_TRUE@
-CONFIG_XENO_SKIN_UITRON_FALSE = @CONFIG_XENO_SKIN_UITRON_FALSE@
-CONFIG_XENO_SKIN_UITRON_TRUE = @CONFIG_XENO_SKIN_UITRON_TRUE@
-CONFIG_XENO_SKIN_VRTX_FALSE = @CONFIG_XENO_SKIN_VRTX_FALSE@
-CONFIG_XENO_SKIN_VRTX_TRUE = @CONFIG_XENO_SKIN_VRTX_TRUE@
-CONFIG_XENO_SKIN_VXWORKS_FALSE = @CONFIG_XENO_SKIN_VXWORKS_FALSE@
-CONFIG_XENO_SKIN_VXWORKS_TRUE = @CONFIG_XENO_SKIN_VXWORKS_TRUE@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CPPFLAGS_FOR_BUILD = @CPPFLAGS_FOR_BUILD@
-CPP_FOR_BUILD = @CPP_FOR_BUILD@
-CROSS_COMPILE = @CROSS_COMPILE@
-CXX = @CXX@
-CXXCPP = @CXXCPP@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CYGPATH_W = @CYGPATH_W@
-DBX_ABS_SRCDIR_FALSE = @DBX_ABS_SRCDIR_FALSE@
-DBX_ABS_SRCDIR_TRUE = @DBX_ABS_SRCDIR_TRUE@
-DBX_DOC_FALSE = @DBX_DOC_FALSE@
-DBX_DOC_ROOT = @DBX_DOC_ROOT@
-DBX_DOC_TRUE = @DBX_DOC_TRUE@
-DBX_FOP = @DBX_FOP@
-DBX_GEN_DOC_ROOT = @DBX_GEN_DOC_ROOT@
-DBX_LINT = @DBX_LINT@
-DBX_MAYBE_NONET = @DBX_MAYBE_NONET@
-DBX_ROOT = @DBX_ROOT@
-DBX_XSLTPROC = @DBX_XSLTPROC@
-DBX_XSL_ROOT = @DBX_XSL_ROOT@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-DOXYGEN = @DOXYGEN@
-DOXYGEN_HAVE_DOT = @DOXYGEN_HAVE_DOT@
-DOXYGEN_SHOW_INCLUDE_FILES = @DOXYGEN_SHOW_INCLUDE_FILES@
-ECHO = @ECHO@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-F77 = @F77@
-FFLAGS = @FFLAGS@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-LATEX_BATCHMODE = @LATEX_BATCHMODE@
-LATEX_MODE = @LATEX_MODE@
-LDFLAGS = @LDFLAGS@
-LEX = @LEX@
-LEXLIB = @LEXLIB@
-LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBTOOL = @LIBTOOL@
-LN_S = @LN_S@
-LTLIBOBJS = @LTLIBOBJS@
-MAINT = @MAINT@
-MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@
-MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@
-MAKEINFO = @MAKEINFO@
-OBJEXT = @OBJEXT@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-RANLIB = @RANLIB@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-STRIP = @STRIP@
-VERSION = @VERSION@
-XENO_BUILD_STRING = @XENO_BUILD_STRING@
-XENO_FP_CFLAGS = @XENO_FP_CFLAGS@
-XENO_HOST_STRING = @XENO_HOST_STRING@
-XENO_KBUILD_CLEAN = @XENO_KBUILD_CLEAN@
-XENO_KBUILD_CMD = @XENO_KBUILD_CMD@
-XENO_KBUILD_DISTCLEAN = @XENO_KBUILD_DISTCLEAN@
-XENO_KBUILD_ENV = @XENO_KBUILD_ENV@
-XENO_KMOD_APP_CFLAGS = @XENO_KMOD_APP_CFLAGS@
-XENO_KMOD_CFLAGS = @XENO_KMOD_CFLAGS@
-XENO_LINUX_DIR = @XENO_LINUX_DIR@
-XENO_LINUX_VERSION = @XENO_LINUX_VERSION@
-XENO_MAYBE_DOCDIR = @XENO_MAYBE_DOCDIR@
-XENO_MAYBE_SIMDIR = @XENO_MAYBE_SIMDIR@
-XENO_MODULE_DIR = @XENO_MODULE_DIR@
-XENO_MODULE_EXT = @XENO_MODULE_EXT@
-XENO_PIPE_NRDEV = @XENO_PIPE_NRDEV@
-XENO_SYMBOL_DIR = @XENO_SYMBOL_DIR@
-XENO_TARGET_ARCH = @XENO_TARGET_ARCH@
-XENO_TARGET_SUBARCH = @XENO_TARGET_SUBARCH@
-XENO_USER_APP_CFLAGS = @XENO_USER_APP_CFLAGS@
-XENO_USER_CFLAGS = @XENO_USER_CFLAGS@
-ac_ct_AR = @ac_ct_AR@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CC_FOR_BUILD = @ac_ct_CC_FOR_BUILD@
-ac_ct_CXX = @ac_ct_CXX@
-ac_ct_F77 = @ac_ct_F77@
-ac_ct_RANLIB = @ac_ct_RANLIB@
-ac_ct_STRIP = @ac_ct_STRIP@
-am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
-am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
-am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@
-am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_vendor = @build_vendor@
-datadir = @datadir@
-exec_prefix = @exec_prefix@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localstatedir = @localstatedir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-oldincludedir = @oldincludedir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-subdirs = @subdirs@
-sysconfdir = @sysconfdir@
-target = @target@
-target_alias = @target_alias@
-target_cpu = @target_cpu@
-target_os = @target_os@
-target_vendor = @target_vendor@
-moduledir = $(DESTDIR)@XENO_MODULE_DIR@
-modext = @XENO_MODULE_EXT@
-libhal_SRC = ppc64.c switch.S $(am__append_1)
-distfiles = fpu.S
-EXTRA_DIST = $(libhal_SRC) $(distfiles) Makefile
-all: all-am
-
-.SUFFIXES:
-$(srcdir)/GNUmakefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/GNUmakefile.am  
$(am__configure_deps)
-       @for dep in $?; do \
-         case '$(am__configure_deps)' in \
-           *$$dep*) \
-             cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
-               && exit 0; \
-             exit 1;; \
-         esac; \
-       done; \
-       echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  
arch/ppc64/hal/GNUmakefile'; \
-       cd $(top_srcdir) && \
-         $(AUTOMAKE) --foreign  arch/ppc64/hal/GNUmakefile
-.PRECIOUS: GNUmakefile
-GNUmakefile: $(srcdir)/GNUmakefile.in $(top_builddir)/config.status
-       @case '$?' in \
-         *config.status*) \
-           cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
-         *) \
-           echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ 
$(am__depfiles_maybe)'; \
-           cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ 
$(am__depfiles_maybe);; \
-       esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure 
$(CONFIG_STATUS_DEPENDENCIES)
-       cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
-       cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
-       cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-mostlyclean-libtool:
-       -rm -f *.lo
-
-clean-libtool:
-       -rm -rf .libs _libs
-
-distclean-libtool:
-       -rm -f libtool
-uninstall-info-am:
-tags: TAGS
-TAGS:
-
-ctags: CTAGS
-CTAGS:
-
-
-distdir: $(DISTFILES)
-       @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
-       topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
-       list='$(DISTFILES)'; for file in $$list; do \
-         case $$file in \
-           $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
-           $(top_srcdir)/*) file=`echo "$$file" | sed 
"s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
-         esac; \
-         if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
-         dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
-         if test "$$dir" != "$$file" && test "$$dir" != "."; then \
-           dir="/$$dir"; \
-           $(mkdir_p) "$(distdir)$$dir"; \
-         else \
-           dir=''; \
-         fi; \
-         if test -d $$d/$$file; then \
-           if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
-             cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
-           fi; \
-           cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
-         else \
-           test -f $(distdir)/$$file \
-           || cp -p $$d/$$file $(distdir)/$$file \
-           || exit 1; \
-         fi; \
-       done
-check-am: all-am
-check: check-am
-all-am: GNUmakefile all-local
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
-       @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
-       $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
-         install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
-         `test -z '$(STRIP)' || \
-           echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
-       -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-
-maintainer-clean-generic:
-       @echo "This command is intended for maintainers to use"
-       @echo "it deletes files that may require special tools to rebuild."
-clean: clean-am
-
-clean-am: clean-generic clean-libtool clean-local mostlyclean-am
-
-distclean: distclean-am
-       -rm -f GNUmakefile
-distclean-am: clean-am distclean-generic distclean-libtool
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-exec-am: install-exec-local
-
-install-info: install-info-am
-
-install-man:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
-       -rm -f GNUmakefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-generic mostlyclean-libtool
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am: uninstall-info-am
-
-.PHONY: all all-am all-local check check-am clean clean-generic \
-       clean-libtool clean-local distclean distclean-generic \
-       distclean-libtool distdir dvi dvi-am html html-am info info-am \
-       install install-am install-data install-data-am install-exec \
-       install-exec-am install-exec-local install-info \
-       install-info-am install-man install-strip installcheck \
-       installcheck-am installdirs maintainer-clean \
-       maintainer-clean-generic mostlyclean mostlyclean-generic \
-       mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \
-       uninstall-info-am
-
-
-xeno_hal.ko: @XENO_KBUILD_ENV@
-xeno_hal.ko: $(libhal_SRC) generic.c FORCE
-       @XENO_KBUILD_CMD@ xeno_extradef="@XENO_KMOD_CFLAGS@"
-
-clean-local:
-       @XENO_KBUILD_CLEAN@
-
-all-local: xeno_hal$(modext)
[EMAIL PROTECTED]@      $(mkinstalldirs) $(top_srcdir)/modules
[EMAIL PROTECTED]@      $(INSTALL_DATA) $^ $(top_srcdir)/modules
-
-install-exec-local: xeno_hal$(modext)
-       $(mkinstalldirs) $(moduledir)
-       $(INSTALL_DATA) $< $(moduledir)
-
-.PHONY: FORCE
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff -Nru --exclude=.svn xenomai-orig/arch/ppc64/hal/Makefile 
xenomai-devel/arch/ppc64/hal/Makefile
--- xenomai-orig/arch/ppc64/hal/Makefile        2005-10-23 11:00:14.000000000 
+0300
+++ xenomai-devel/arch/ppc64/hal/Makefile       1970-01-01 02:00:00.000000000 
+0200
@@ -1,13 +0,0 @@
-EXTRA_CFLAGS += -I$(xeno_srctree)/include \
-               -I$(src)/../../../include \
-               -I$(src)/../../.. \
-               $(xeno_extradef)
-
-EXTRA_AFLAGS += -I$(xeno_srctree)/include \
-               -I$(src)/../../../include \
-               -I$(src)/../../.. \
-               $(xeno_extradef)
-
-obj-m += xeno_hal.o
-
-xeno_hal-objs := $(xeno_objs)
diff -Nru --exclude=.svn xenomai-orig/arch/ppc64/hal/ppc64.c 
xenomai-devel/arch/ppc64/hal/ppc64.c
--- xenomai-orig/arch/ppc64/hal/ppc64.c 2005-10-11 10:32:30.000000000 +0300
+++ xenomai-devel/arch/ppc64/hal/ppc64.c        1970-01-01 02:00:00.000000000 
+0200
@@ -1,187 +0,0 @@
-/**
- *   @ingroup hal
- *   @file
- *
- *   Adeos-based Real-Time Abstraction Layer for PPC64.
- *
- *   Fusion 64-bit PowerPC adoption
- *   Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
- *   based on previous work:
- *  
- *   Copyright &copy; 2002 Philippe Gerum.
- *
- *   Xenomai is free software; you can redistribute it and/or
- *   modify it under the terms of the GNU General Public License as
- *   published by the Free Software Foundation, Inc., 675 Mass Ave,
- *   Cambridge MA 02139, USA; either version 2 of the License, or (at
- *   your option) any later version.
- *
- *   Xenomai is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *   General Public License for more details.
- *
- *   You should have received a copy of the GNU General Public License
- *   along with this program; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- *   02111-1307, USA.
- */
-
-/**
- * \ingroup hal
- * @addtogroup hal 
- *
- * PowerPC64-specific HAL services.
- *
- [EMAIL PROTECTED]/
-
-#include <linux/version.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/console.h>
-#include <linux/kallsyms.h>
-#include <asm/system.h>
-#include <asm/hw_irq.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/unistd.h>
-#include <nucleus/asm/hal.h>
-#ifdef CONFIG_PROC_FS
-#include <linux/proc_fs.h>
-#endif /* CONFIG_PROC_FS */
-#include <stdarg.h>
-
-static int rthal_periodic_p;
-
-int rthal_timer_request (void (*handler)(void),
-                        unsigned long nstick)
-{
-    unsigned long flags;
-    int err;
-
-    flags = rthal_critical_enter(NULL);
-
-    if (nstick > 0)
-       {
-       /* Periodic setup --
-          Use the built-in Adeos service directly. */
-       err = rthal_set_timer(nstick);
-       rthal_periodic_p = 1;
-       }
-    else
-       {
-       /* Oneshot setup. */
-       disarm_decr[rthal_processor_id()] = 1;
-       rthal_periodic_p = 0;
-       rthal_timer_program_shot(tb_ticks_per_jiffy);
-       }
-
-    rthal_irq_release(RTHAL_TIMER_IRQ);
-
-    err = rthal_irq_request(RTHAL_TIMER_IRQ,
-                           (rthal_irq_handler_t)handler,
-                           NULL,
-                           NULL);
-
-    rthal_critical_exit(flags);
-
-    return err;
-}
-
-void rthal_timer_release (void)
-
-{
-    unsigned long flags;
-
-    flags = rthal_critical_enter(NULL);
-
-    if (rthal_periodic_p)
-       rthal_reset_timer();
-    else
-       {
-       disarm_decr[rthal_processor_id()] = 0;
-       set_dec(tb_ticks_per_jiffy);
-       }
-
-    rthal_irq_release(RTHAL_TIMER_IRQ);
-
-    rthal_critical_exit(flags);
-}
-
-unsigned long rthal_timer_calibrate (void)
-
-{
-    return 1000000000 / RTHAL_CPU_FREQ;
-}
-
-static inline int do_exception_event (unsigned event, unsigned domid, void 
*data)
-
-{
-    rthal_declare_cpuid;
-
-    rthal_load_cpuid();
-
-    if (domid == RTHAL_DOMAIN_ID)
-       {
-       rthal_realtime_faults[cpuid][event]++;
-
-       if (rthal_trap_handler != NULL &&
-           test_bit(cpuid,&rthal_cpu_realtime) &&
-           rthal_trap_handler(event,domid,data) != 0)
-           return RTHAL_EVENT_STOP;
-       }
-
-    return RTHAL_EVENT_PROPAGATE;
-}
-
-RTHAL_DECLARE_EVENT(exception_event);
-
-static inline void do_rthal_domain_entry (void)
-
-{
-    unsigned trapnr;
-
-    /* Trap all faults. */
-    for (trapnr = 0; trapnr < RTHAL_NR_FAULTS; trapnr++)
-       rthal_catch_exception(trapnr,&exception_event);
-
-    printk(KERN_INFO "Xenomai: hal/ppc64 loaded.\n");
-}
-
-RTHAL_DECLARE_DOMAIN(rthal_domain_entry);
-
-int rthal_arch_init (void)
-
-{
-    if (rthal_cpufreq_arg == 0)
-       {
-       /* The CPU frequency is expressed as the timebase frequency
-          for this port. */
-       rthal_cpufreq_arg = (unsigned long)rthal_get_cpufreq();
-       }
-
-    if (rthal_timerfreq_arg == 0)
-       rthal_timerfreq_arg = rthal_tunables.cpu_freq;
-
-    return 0;
-}
-
-void rthal_arch_cleanup (void)
-
-{
-    /* Nothing to cleanup so far. */
-}
-
-/[EMAIL PROTECTED]/
-
-EXPORT_SYMBOL(rthal_switch_context);
-
-#ifdef CONFIG_XENO_HW_FPU
-EXPORT_SYMBOL(rthal_init_fpu);
-EXPORT_SYMBOL(rthal_save_fpu);
-EXPORT_SYMBOL(rthal_restore_fpu);
-#endif /* CONFIG_XENO_HW_FPU */
diff -Nru --exclude=.svn xenomai-orig/arch/ppc64/hal/switch.S 
xenomai-devel/arch/ppc64/hal/switch.S
--- xenomai-orig/arch/ppc64/hal/switch.S        2005-10-11 10:32:30.000000000 
+0300
+++ xenomai-devel/arch/ppc64/hal/switch.S       1970-01-01 02:00:00.000000000 
+0200
@@ -1,126 +0,0 @@
-/*
- * arch/ppc64/hal/switch.S
- *
- * Fusion 64-bit PowerPC adoption
- * Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
- * based on previous work:
- *
- * Copyright (C) 2004 Philippe Gerum.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
- * USA; either version 2 of the License, or (at your option) any later
- * version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/page.h>
-#include <asm/offsets.h>
-#include <asm/ppc_asm.h>
-#include <xeno_config.h> 
-
-#define RTHAL_SAVEREG(reg, pos)        std reg,STACK_FRAME_OVERHEAD+8*(pos)(r1)
-#define RTHAL_LOADREG(reg, pos)        ld reg,STACK_FRAME_OVERHEAD+8*(pos)(r1)
-
-/*
- * r3=out_kspp, r4=in_kspp
- */
-_GLOBAL(rthal_switch_context)
-        stdu    r1,-224-STACK_FRAME_OVERHEAD(r1)
-
-        /* Save general purpose registers. */
-
-       RTHAL_SAVEREG(r31,0)
-       RTHAL_SAVEREG(r30,1)
-       RTHAL_SAVEREG(r29,2)
-       RTHAL_SAVEREG(r28,3)
-       RTHAL_SAVEREG(r27,4)
-       RTHAL_SAVEREG(r26,5)
-       RTHAL_SAVEREG(r25,6)
-       RTHAL_SAVEREG(r24,7)
-       RTHAL_SAVEREG(r23,8)
-       RTHAL_SAVEREG(r22,9)
-       RTHAL_SAVEREG(r21,10)
-       RTHAL_SAVEREG(r20,11)
-       RTHAL_SAVEREG(r19,12)
-       RTHAL_SAVEREG(r18,13)
-       RTHAL_SAVEREG(r17,14)
-       RTHAL_SAVEREG(r16,15)
-       RTHAL_SAVEREG(r15,16)
-       RTHAL_SAVEREG(r14,17)
-       RTHAL_SAVEREG(r13,18)
-       RTHAL_SAVEREG(r3,19)
-       RTHAL_SAVEREG(r2,20)
-       RTHAL_SAVEREG(r0,21)
-
-        /* Save special registers. */
-       
-       mfctr    r2
-       RTHAL_SAVEREG(r2,22)
-        mfcr     r2
-       RTHAL_SAVEREG(r2,23)
-        mfxer    r2
-       RTHAL_SAVEREG(r2,24)
-        mflr     r2
-       RTHAL_SAVEREG(r2,25)
-        mfmsr    r2
-       RTHAL_SAVEREG(r2,26)
-
-        /* Switch stacks. */
-       
-        std      r1,0(r3)       /* *out_kspp = sp */
-       /* TODO: VSIDs */
-        ld      r1,0(r4)       /* sp = *in_kspp */
-
-        /* Restore special registers. */
-
-       RTHAL_LOADREG(r2,26)
-        mtmsrd   r2
-       RTHAL_LOADREG(r2,25)
-        mtlr     r2
-       RTHAL_LOADREG(r2,24)
-        mtxer    r2
-       RTHAL_LOADREG(r2,23)
-        mtcr     r2
-       RTHAL_LOADREG(r2,22)
-        mtctr    r2
-
-       /* Restore general purpose registers. */
-       
-       RTHAL_LOADREG(r0,21)
-       RTHAL_LOADREG(r2,20)
-       RTHAL_LOADREG(r3,19)
-       RTHAL_LOADREG(r13,18)
-       RTHAL_LOADREG(r14,17)
-       RTHAL_LOADREG(r15,16)
-       RTHAL_LOADREG(r16,15)
-       RTHAL_LOADREG(r17,14)
-       RTHAL_LOADREG(r18,13)
-       RTHAL_LOADREG(r19,12)
-       RTHAL_LOADREG(r20,11)
-       RTHAL_LOADREG(r21,10)
-       RTHAL_LOADREG(r22,9)
-       RTHAL_LOADREG(r23,8)
-       RTHAL_LOADREG(r24,7)
-       RTHAL_LOADREG(r25,6)
-       RTHAL_LOADREG(r26,5)
-       RTHAL_LOADREG(r27,4)
-       RTHAL_LOADREG(r28,3)
-       RTHAL_LOADREG(r29,2)
-       RTHAL_LOADREG(r30,1)
-       RTHAL_LOADREG(r31,0)
-
-        addi    r1,r1,224+STACK_FRAME_OVERHEAD
-
-        blr
diff -Nru --exclude=.svn xenomai-orig/arch/ppc64/Kconfig 
xenomai-devel/arch/ppc64/Kconfig
--- xenomai-orig/arch/ppc64/Kconfig     2005-10-11 10:32:31.000000000 +0300
+++ xenomai-devel/arch/ppc64/Kconfig    1970-01-01 02:00:00.000000000 +0200
@@ -1,71 +0,0 @@
-mainmenu "Xenomai/powerpc64 configuration"
-
-source Kconfig
-
-source "nucleus/Kconfig"
-
-menu "Machine (powerpc64)"
-
-config XENO_HW_FPU
-       bool "Enable FPU support"
-       default y
-       help
-       The FPU executes instructions from the processor's normal
-       instruction stream. It can handle the types of high-precision
-       floating-point processing operations commonly found in
-       scientific, engineering, and business applications.
-       If your target system has no FPU, say NO here; otherwise,
-       enabling FPU support when the hardware is available may
-       greatly improve performance.
-
-config XENO_HW_PERIODIC_TIMER
-       bool "Enable periodic timer support"
-       default y
-       help
-       On this architecture, the nucleus provides both aperiodic and
-       periodic timing modes. In aperiodic mode, timing accuracy is
-       higher - since it is not rounded to a constant time slice - at
-       the expense of a lesser efficicency when many timers are
-       simultaneously active. The aperiodic mode gives better results
-       in configuration involving a few threads requesting timing
-       services over different time scales that cannot be easily
-       expressed as multiples of a single base tick, or would lead to
-       a waste of high frequency periodic ticks. You can disable
-       the periodic support for this architecture to save a few
-       hundreds bytes if you plan to use the system timer in
-       aperiodic mode only.
-
-config XENO_HW_TIMER_LATENCY
-       depends on XENO_OPT_EXPERT
-       string "Timer tuning latency (ns)"
-       default 0
-       help
-       This parameter accounts for the time (in nanoseconds) needed
-       to program the underlying time source in one-shot timing mode.
-       This value will be used to reduce the scheduling jitter induced
-       by the time needed to setup the timer for its next shot. A
-       default value of 0 (recommended) will cause this value to be
-       estimated by the nucleus at startup.
-
-config XENO_HW_SCHED_LATENCY
-       depends on XENO_OPT_EXPERT
-       string "Scheduling latency (ns)"
-       default 0
-       help
-       Scheduling latency is the time between the termination of an
-       interrupt handler and the execution of the first instruction
-       of the real-time thread this handler resumes. A
-       default value of 0 (recommended) will cause this value to be
-       estimated by the nucleus at startup.
-
-endmenu
-
-source "skins/Kconfig"
-
-menu "Drivers"
-
-source "drivers/Kconfig"
-
-endmenu
-
-source "sim/Kconfig"
diff -Nru --exclude=.svn 
xenomai-orig/arch/ppc64/patches/adeos-linux-2.6.10-ppc64-r2.patch 
xenomai-devel/arch/ppc64/patches/adeos-linux-2.6.10-ppc64-r2.patch
--- xenomai-orig/arch/ppc64/patches/adeos-linux-2.6.10-ppc64-r2.patch   
2005-10-17 11:03:02.000000000 +0300
+++ xenomai-devel/arch/ppc64/patches/adeos-linux-2.6.10-ppc64-r2.patch  
1970-01-01 02:00:00.000000000 +0200
@@ -1,5430 +0,0 @@
-diff -Nru linux-2.6.10/adeos/generic.c 
linux-2.6.10-adeos-ppc64-r2/adeos/generic.c
---- linux-2.6.10/adeos/generic.c       1970-01-01 02:00:00.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/adeos/generic.c        2005-10-05 
10:34:53.000000000 +0300
-@@ -0,0 +1,640 @@
-+/*
-+ *   linux/adeos/generic.c
-+ *
-+ *   Copyright (C) 2002 Philippe Gerum.
-+ *
-+ *   This program is free software; you can redistribute it and/or modify
-+ *   it under the terms of the GNU General Public License as published by
-+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ *   USA; either version 2 of the License, or (at your option) any later
-+ *   version.
-+ *
-+ *   This program is distributed in the hope that it will be useful,
-+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *   GNU General Public License for more details.
-+ *
-+ *   You should have received a copy of the GNU General Public License
-+ *   along with this program; if not, write to the Free Software
-+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, 
USA.
-+ *
-+ *   Architecture-independent ADEOS services.
-+ */
-+
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/irq.h>
-+
-+MODULE_DESCRIPTION("Adeos nanokernel");
-+MODULE_AUTHOR("Philippe Gerum");
-+MODULE_LICENSE("GPL");
-+
-+/* adeos_register_domain() -- Add a new domain to the system. All
-+   client domains must call this routine to register themselves to
-+   ADEOS before using its services. */
-+
-+int adeos_register_domain (adomain_t *adp, adattr_t *attr)
-+
-+{
-+    struct list_head *pos;
-+    unsigned long flags;
-+    int n;
-+
-+    if (adp_current != adp_root)
-+      {
-+      printk(KERN_WARNING "Adeos: Only the root domain may register a new 
domain.\n");
-+      return -EPERM;
-+      }
-+
-+    flags = adeos_critical_enter(NULL);
-+
-+    list_for_each(pos,&__adeos_pipeline) {
-+      adomain_t *_adp = list_entry(pos,adomain_t,p_link);
-+      if (_adp->domid == attr->domid)
-+            break;
-+    }
-+
-+    adeos_critical_exit(flags);
-+
-+    if (pos != &__adeos_pipeline)
-+      /* A domain with the given id already exists -- fail. */
-+      return -EBUSY;
-+
-+    for (n = 0; n < ADEOS_NR_CPUS; n++)
-+      {
-+      /* Each domain starts in sleeping state on every CPU. */
-+      adp->cpudata[n].status = (1 << IPIPE_SLEEP_FLAG);
-+#ifdef CONFIG_ADEOS_THREADS
-+      adp->estackbase[n] = 0;
-+#endif /* CONFIG_ADEOS_THREADS */
-+      }
-+
-+    adp->name = attr->name;
-+    adp->priority = attr->priority;
-+    adp->domid = attr->domid;
-+    adp->dswitch = attr->dswitch;
-+    adp->flags = 0;
-+    adp->ptd_setfun = attr->ptdset;
-+    adp->ptd_getfun = attr->ptdget;
-+    adp->ptd_keymap = 0;
-+    adp->ptd_keycount = 0;
-+    adp->ptd_keymax = attr->nptdkeys;
-+
-+    for (n = 0; n < ADEOS_NR_EVENTS; n++)
-+      /* Event handlers must be cleared before the i-pipe stage is
-+         inserted since an exception may occur on behalf of the new
-+         emerging domain. */
-+      adp->events[n].handler = NULL;
-+
-+    if (attr->entry != NULL)
-+      __adeos_init_domain(adp,attr);
-+
-+    /* Insert the domain in the interrupt pipeline last, so it won't
-+       be resumed for processing interrupts until it has a valid stack
-+       context. */
-+
-+    __adeos_init_stage(adp);
-+
-+    INIT_LIST_HEAD(&adp->p_link);
-+
-+    flags = adeos_critical_enter(NULL);
-+
-+    list_for_each(pos,&__adeos_pipeline) {
-+      adomain_t *_adp = list_entry(pos,adomain_t,p_link);
-+      if (adp->priority > _adp->priority)
-+            break;
-+    }
-+
-+    list_add_tail(&adp->p_link,pos);
-+
-+    adeos_critical_exit(flags);
-+
-+    printk(KERN_WARNING "Adeos: Domain %s registered.\n",adp->name);
-+
-+    /* Finally, allow the new domain to perform its initialization
-+       chores. */
-+
-+    if (attr->entry != NULL)
-+      {
-+      adeos_declare_cpuid;
-+
-+      adeos_lock_cpu(flags);
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+      __adeos_switch_to(adp_root,adp,cpuid);
-+#else /* !CONFIG_ADEOS_THREADS */
-+      adp_cpu_current[cpuid] = adp;
-+      attr->entry(1);
-+      adp_cpu_current[cpuid] = adp_root;
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+      adeos_load_cpuid();     /* Processor might have changed. */
-+
-+      if (adp_root->cpudata[cpuid].irq_pending_hi != 0 &&
-+          !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status))
-+          __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+
-+      adeos_unlock_cpu(flags);
-+      }
-+
-+    return 0;
-+}
-+
-+/* adeos_unregister_domain() -- Remove a domain from the system. All
-+   client domains must call this routine to unregister themselves from
-+   the ADEOS layer. */
-+
-+int adeos_unregister_domain (adomain_t *adp)
-+
-+{
-+    unsigned long flags;
-+    unsigned event;
-+
-+    if (adp_current != adp_root)
-+      {
-+      printk(KERN_WARNING "Adeos: Only the root domain may unregister a 
domain.\n");
-+      return -EPERM;
-+      }
-+
-+    if (adp == adp_root)
-+      {
-+      printk(KERN_WARNING "Adeos: Cannot unregister the root domain.\n");
-+      return -EPERM;
-+      }
-+
-+    for (event = 0; event < ADEOS_NR_EVENTS; event++)
-+      /* Need this to update the monitor count. */
-+      adeos_catch_event_from(adp,event,NULL);
-+
-+#ifdef CONFIG_SMP
-+    {
-+    int nr_cpus = num_online_cpus(), _cpuid;
-+    unsigned irq;
-+
-+    /* In the SMP case, wait for the logged events to drain on other
-+       processors before eventually removing the domain from the
-+       pipeline. */
-+
-+    adeos_unstall_pipeline_from(adp);
-+
-+    flags = adeos_critical_enter(NULL);
-+
-+    for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
-+      {
-+      clear_bit(IPIPE_HANDLE_FLAG,&adp->irqs[irq].control);
-+      clear_bit(IPIPE_STICKY_FLAG,&adp->irqs[irq].control);
-+      set_bit(IPIPE_PASS_FLAG,&adp->irqs[irq].control);
-+      }
-+
-+    adeos_critical_exit(flags);
-+
-+    for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)
-+      {
-+      for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
-+          while (adp->cpudata[_cpuid].irq_hits[irq] > 0)
-+              cpu_relax();
-+
-+      while (test_bit(IPIPE_XPEND_FLAG,&adp->cpudata[_cpuid].status))
-+          cpu_relax();
-+
-+      while (!test_bit(IPIPE_SLEEP_FLAG,&adp->cpudata[_cpuid].status))
-+           cpu_relax();
-+      }
-+    }
-+#endif /* CONFIG_SMP */
-+
-+    /* Simply remove the domain from the pipeline and we are almost
-+       done. */
-+
-+    flags = adeos_critical_enter(NULL);
-+    list_del_init(&adp->p_link);
-+    adeos_critical_exit(flags);
-+
-+    __adeos_cleanup_domain(adp);
-+
-+    printk(KERN_WARNING "Adeos: Domain %s unregistered.\n",adp->name);
-+
-+    return 0;
-+}
-+
-+/* adeos_propagate_irq() -- Force a given IRQ propagation on behalf of
-+   a running interrupt handler to the next domain down the pipeline.
-+   Returns non-zero if a domain has received the interrupt
-+   notification, zero otherwise.
-+   This call is useful for handling shared interrupts among domains.
-+   e.g. pipeline = [domain-A]---[domain-B]...
-+   Both domains share IRQ #X.
-+   - domain-A handles IRQ #X but does not pass it down (i.e. Terminate
-+   or Dynamic interrupt control mode)
-+   - domain-B handles IRQ #X (i.e. Terminate or Accept interrupt
-+   control modes).
-+   When IRQ #X is raised, domain-A's handler determines whether it
-+   should process the interrupt by identifying its source. If not,
-+   adeos_propagate_irq() is called so that the next domain down the
-+   pipeline which handles IRQ #X is given a chance to process it. This
-+   process can be repeated until the end of the pipeline is
-+   reached. */
-+
-+/* adeos_schedule_irq() -- Almost the same as adeos_propagate_irq(),
-+   but attempts to pend the interrupt for the current domain first. */
-+
-+int fastcall __adeos_schedule_irq (unsigned irq, struct list_head *head)
-+
-+{
-+    struct list_head *ln;
-+    unsigned long flags;
-+    adeos_declare_cpuid;
-+
-+    if (irq >= IPIPE_NR_IRQS ||
-+      (adeos_virtual_irq_p(irq) && !test_bit(irq - 
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map)))
-+      return -EINVAL;
-+
-+    adeos_lock_cpu(flags);
-+
-+    ln = head;
-+
-+    while (ln != &__adeos_pipeline)
-+      {
-+      adomain_t *adp = list_entry(ln,adomain_t,p_link);
-+
-+      if (test_bit(IPIPE_HANDLE_FLAG,&adp->irqs[irq].control))
-+          {
-+          adp->cpudata[cpuid].irq_hits[irq]++;
-+          __adeos_set_irq_bit(adp,cpuid,irq);
-+          adeos_unlock_cpu(flags);
-+          return 1;
-+          }
-+
-+      ln = adp->p_link.next;
-+      }
-+
-+    adeos_unlock_cpu(flags);
-+
-+    return 0;
-+}
-+
-+/* adeos_free_irq() -- Return a previously allocated virtual/soft
-+   pipelined interrupt to the pool of allocatable interrupts. */
-+
-+int adeos_free_irq (unsigned irq)
-+
-+{
-+    if (irq >= IPIPE_NR_IRQS)
-+      return -EINVAL;
-+
-+    clear_bit(irq - IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map);
-+
-+    return 0;
-+}
-+
-+cpumask_t adeos_set_irq_affinity (unsigned irq, cpumask_t cpumask)
-+
-+{
-+#ifdef CONFIG_SMP
-+     if (irq >= IPIPE_NR_XIRQS)
-+       /* Allow changing affinity of external IRQs only. */
-+       return CPU_MASK_NONE;
-+
-+     if (num_online_cpus() > 1)
-+       /* Allow changing affinity of external IRQs only. */
-+       return __adeos_set_irq_affinity(irq,cpumask);
-+#endif /* CONFIG_SMP */
-+
-+    return CPU_MASK_NONE;
-+}
-+
-+/* adeos_catch_event_from() -- Interpose an event handler starting
-+   from a given domain. */
-+
-+adevhand_t adeos_catch_event_from (adomain_t *adp, unsigned event, adevhand_t 
handler)
-+
-+{
-+    adevhand_t oldhandler;
-+
-+    if (event >= ADEOS_NR_EVENTS)
-+      return NULL;
-+
-+    if ((oldhandler = (adevhand_t)xchg(&adp->events[event].handler,handler)) 
== NULL)
-+      {
-+      if (handler)
-+          __adeos_event_monitors[event]++;
-+      }
-+    else if (!handler)
-+      __adeos_event_monitors[event]--;
-+
-+    return oldhandler;
-+}
-+
-+void adeos_init_attr (adattr_t *attr)
-+
-+{
-+    attr->name = "Anonymous";
-+    attr->domid = 1;
-+    attr->entry = NULL;
-+    attr->estacksz = 0;       /* Let ADEOS choose a reasonable stack size */
-+    attr->priority = ADEOS_ROOT_PRI;
-+    attr->dswitch = NULL;
-+    attr->nptdkeys = 0;
-+    attr->ptdset = NULL;
-+    attr->ptdget = NULL;
-+}
-+
-+int adeos_alloc_ptdkey (void)
-+
-+{
-+    unsigned long flags;
-+    int key = -1;
-+
-+    spin_lock_irqsave_hw(&__adeos_pipelock,flags);
-+
-+    if (adp_current->ptd_keycount < adp_current->ptd_keymax)
-+      {
-+      key = ffz(adp_current->ptd_keymap);
-+      set_bit(key,&adp_current->ptd_keymap);
-+      adp_current->ptd_keycount++;
-+      }
-+
-+    spin_unlock_irqrestore_hw(&__adeos_pipelock,flags);
-+
-+    return key;
-+}
-+
-+int adeos_free_ptdkey (int key)
-+
-+{
-+    unsigned long flags; 
-+
-+    if (key < 0 || key >= adp_current->ptd_keymax)
-+      return -EINVAL;
-+
-+    spin_lock_irqsave_hw(&__adeos_pipelock,flags);
-+
-+    if (test_and_clear_bit(key,&adp_current->ptd_keymap))
-+      adp_current->ptd_keycount--;
-+
-+    spin_unlock_irqrestore_hw(&__adeos_pipelock,flags);
-+
-+    return 0;
-+}
-+
-+int adeos_set_ptd (int key, void *value)
-+
-+{
-+    if (key < 0 || key >= adp_current->ptd_keymax)
-+      return -EINVAL;
-+
-+    if (!adp_current->ptd_setfun)
-+      {
-+      printk(KERN_WARNING "Adeos: No ptdset hook for %s\n",adp_current->name);
-+      return -EINVAL;
-+      }
-+
-+    adp_current->ptd_setfun(key,value);
-+
-+    return 0;
-+}
-+
-+void *adeos_get_ptd (int key)
-+
-+{
-+    if (key < 0 || key >= adp_current->ptd_keymax)
-+      return NULL;
-+
-+    if (!adp_current->ptd_getfun)
-+      {
-+      printk(KERN_WARNING "Adeos: No ptdget hook for %s\n",adp_current->name);
-+      return NULL;
-+      }
-+
-+    return adp_current->ptd_getfun(key);
-+}
-+
-+int adeos_init_mutex (admutex_t *mutex)
-+
-+{
-+    admutex_t initm = ADEOS_MUTEX_UNLOCKED;
-+    *mutex = initm;
-+    return 0;
-+}
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+
-+int adeos_destroy_mutex (admutex_t *mutex)
-+
-+{
-+    if (!adeos_spin_trylock(&mutex->lock) &&
-+      adp_current != adp_root &&
-+      mutex->owner != adp_current)
-+      return -EBUSY;
-+
-+    return 0;
-+}
-+
-+static inline void __adeos_sleepon_mutex (admutex_t *mutex, adomain_t 
*sleeper, int cpuid)
-+
-+{
-+    adomain_t *owner = mutex->owner;
-+
-+    /* Make the current domain (== sleeper) wait for the mutex to be
-+       released. Adeos' pipelined scheme guarantees that the new
-+       sleeper _is_ higher priority than any aslept domain since we
-+       have stalled each sleeper's stage. Must be called with local hw
-+       interrupts off. */
-+
-+    sleeper->m_link = mutex->sleepq;
-+    mutex->sleepq = sleeper;
-+    __adeos_switch_to(adp_cpu_current[cpuid],owner,cpuid);
-+    mutex->owner = sleeper;
-+    adeos_spin_unlock(&mutex->lock);
-+}
-+
-+unsigned long fastcall adeos_lock_mutex (admutex_t *mutex)
-+
-+{
-+    unsigned long flags, hwflags;
-+    adeos_declare_cpuid;
-+    adomain_t *adp;
-+
-+    if (!adp_pipelined)
-+      {
-+      adeos_hw_local_irq_save(hwflags);
-+      flags = !adeos_hw_test_iflag(hwflags);
-+      adeos_spin_lock(&mutex->lock);
-+      return flags;
-+      }
-+
-+    adeos_lock_cpu(hwflags);
-+
-+    adp = adp_cpu_current[cpuid];
-+
-+    flags = __test_and_set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+
-+    /* Two cases to handle here on SMP systems, only one for UP: 1) in
-+       case of a conflicting access from a higher priority domain
-+       running on the same cpu, make this domain sleep on the mutex,
-+       and resume the current owner so it can release the lock asap.
-+       2) in case of a conflicting access from any domain on a
-+       different cpu than the current owner's, simply enter a spinning
-+       loop. Note that testing mutex->owncpu is safe since it is only
-+       changed by the current owner, and set to -1 when the mutex is
-+       unlocked. */
-+
-+#ifdef CONFIG_SMP
-+    while (!adeos_spin_trylock(&mutex->lock))
-+      {
-+      if (mutex->owncpu == cpuid)
-+          {
-+          __adeos_sleepon_mutex(mutex,adp,cpuid);
-+          adeos_load_cpuid();
-+          }
-+      }
-+
-+    mutex->owncpu = cpuid;
-+#else  /* !CONFIG_SMP */
-+    while (mutex->owner != NULL && mutex->owner != adp)
-+      __adeos_sleepon_mutex(mutex,adp,cpuid);
-+#endif /* CONFIG_SMP */
-+
-+    mutex->owner = adp;
-+
-+    adeos_unlock_cpu(hwflags);
-+
-+    return flags;
-+}
-+
-+void fastcall adeos_unlock_mutex (admutex_t *mutex, unsigned long flags)
-+
-+{
-+    unsigned long hwflags;
-+    adeos_declare_cpuid;
-+    adomain_t *adp;
-+
-+    if (!adp_pipelined)
-+      {
-+      adeos_spin_unlock(&mutex->lock);
-+
-+      if (flags)
-+          adeos_hw_cli();
-+      else
-+          adeos_hw_sti();
-+
-+      return;
-+      }
-+
-+#ifdef CONFIG_SMP
-+    mutex->owncpu = -1;
-+#endif /* CONFIG_SMP */
-+
-+    if (!flags)
-+      adeos_hw_sti(); /* Absolutely needed. */
-+      
-+    adeos_lock_cpu(hwflags);
-+
-+    if (mutex->sleepq != NULL)
-+      {
-+      adomain_t *sleeper = mutex->sleepq;
-+      /* Wake up the highest priority sleeper. */
-+      mutex->sleepq = sleeper->m_link;
-+      __adeos_switch_to(adp_cpu_current[cpuid],sleeper,cpuid);
-+      adeos_load_cpuid();
-+      }
-+    else
-+      {
-+      mutex->owner = NULL;
-+      adeos_spin_unlock(&mutex->lock);
-+      }
-+
-+    adp = adp_cpu_current[cpuid];
-+
-+    if (flags)
-+      __set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+    else
-+      {
-+      __clear_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+      
-+      if (adp->cpudata[cpuid].irq_pending_hi != 0)
-+          __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+      }
-+
-+    adeos_unlock_cpu(hwflags);
-+}
-+
-+#else /* !CONFIG_ADEOS_THREADS */
-+
-+int adeos_destroy_mutex (admutex_t *mutex)
-+
-+{
-+    if (!adeos_spin_trylock(&mutex->lock) &&
-+      adp_current != adp_root)
-+      return -EBUSY;
-+
-+    return 0;
-+}
-+
-+unsigned long fastcall adeos_lock_mutex (admutex_t *mutex)
-+
-+{
-+    unsigned long flags; /* FIXME: won't work on SPARC */
-+    spin_lock_irqsave_hw(&mutex->lock,flags);
-+    return flags;
-+}
-+
-+void fastcall adeos_unlock_mutex (admutex_t *mutex, unsigned long flags)
-+
-+{
-+    spin_unlock_irqrestore_hw(&mutex->lock,flags);
-+}
-+
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+void __adeos_takeover (void)
-+
-+{
-+    __adeos_enable_pipeline();
-+    printk(KERN_WARNING "Adeos: Pipelining started.\n");
-+}
-+
-+#ifdef MODULE
-+
-+static int __init adeos_init_module (void)
-+
-+{
-+    __adeos_takeover();
-+    return 0;
-+}
-+
-+static void __exit adeos_exit_module (void)
-+
-+{
-+    __adeos_disable_pipeline();
-+    printk(KERN_WARNING "Adeos: Pipelining stopped.\n");
-+}
-+
-+module_init(adeos_init_module);
-+module_exit(adeos_exit_module);
-+
-+#endif /* MODULE */
-+
-+EXPORT_SYMBOL(adeos_register_domain);
-+EXPORT_SYMBOL(adeos_unregister_domain);
-+EXPORT_SYMBOL(adeos_virtualize_irq_from);
-+EXPORT_SYMBOL(adeos_control_irq);
-+EXPORT_SYMBOL(__adeos_schedule_irq);
-+EXPORT_SYMBOL(adeos_free_irq);
-+EXPORT_SYMBOL(adeos_send_ipi);
-+EXPORT_SYMBOL(adeos_catch_event_from);
-+EXPORT_SYMBOL(adeos_init_attr);
-+EXPORT_SYMBOL(adeos_get_sysinfo);
-+EXPORT_SYMBOL(adeos_tune_timer);
-+EXPORT_SYMBOL(adeos_alloc_ptdkey);
-+EXPORT_SYMBOL(adeos_free_ptdkey);
-+EXPORT_SYMBOL(adeos_set_ptd);
-+EXPORT_SYMBOL(adeos_get_ptd);
-+EXPORT_SYMBOL(adeos_set_irq_affinity);
-+EXPORT_SYMBOL(adeos_init_mutex);
-+EXPORT_SYMBOL(adeos_destroy_mutex);
-+EXPORT_SYMBOL(adeos_lock_mutex);
-+EXPORT_SYMBOL(adeos_unlock_mutex);
-diff -Nru linux-2.6.10/adeos/Kconfig linux-2.6.10-adeos-ppc64-r2/adeos/Kconfig
---- linux-2.6.10/adeos/Kconfig 1970-01-01 02:00:00.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/adeos/Kconfig  2005-10-05 10:34:53.000000000 
+0300
-@@ -0,0 +1,40 @@
-+menu "Adeos support"
-+
-+config ADEOS
-+      tristate "Adeos support"
-+      default y
-+      ---help---
-+        Activate this option if you want the Adeos nanokernel to be
-+        compiled in.
-+
-+config ADEOS_CORE
-+      def_bool ADEOS
-+
-+config ADEOS_THREADS
-+      bool "Threaded domains"
-+      depends on ADEOS && !PPC64
-+      default y
-+      ---help---
-+        This option causes the domains to run as lightweight
-+        threads, which is useful for having seperate stacks
-+        for domains. Enabling this option is the safest setting for
-+        now; disabling it causes an experimental mode to be used
-+        where interrupts/events are directly processed on behalf of
-+        the preempted context. Say Y if unsure.
-+
-+config ADEOS_NOTHREADS
-+      def_bool !ADEOS_THREADS
-+
-+config ADEOS_PROFILING
-+      bool "Pipeline profiling"
-+      depends on ADEOS
-+      default n
-+      ---help---
-+        This option activates the profiling code which collects the
-+        timestamps needed to measure the propagation time of
-+        interrupts through the pipeline. Say N if unsure.
-+
-+config ADEOS_PREEMPT_RT
-+      def_bool PREEMPT_NONE || PREEMPT_VOLUNTARY || PREEMPT_DESKTOP || 
PREEMPT_RT
-+
-+endmenu
-diff -Nru linux-2.6.10/adeos/Makefile 
linux-2.6.10-adeos-ppc64-r2/adeos/Makefile
---- linux-2.6.10/adeos/Makefile        1970-01-01 02:00:00.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/adeos/Makefile 2005-10-05 10:34:53.000000000 
+0300
-@@ -0,0 +1,15 @@
-+#
-+# Makefile for the Adeos layer.
-+#
-+
-+obj-$(CONFIG_ADEOS)   += adeos.o
-+
-+adeos-objs            := generic.o
-+
-+adeos-$(CONFIG_X86)   += x86.o
-+
-+adeos-$(CONFIG_IA64)  += ia64.o
-+
-+adeos-$(CONFIG_PPC32) += ppc.o
-+
-+adeos-$(CONFIG_PPC64) += ppc64.o
-diff -Nru linux-2.6.10/adeos/ppc64.c linux-2.6.10-adeos-ppc64-r2/adeos/ppc64.c
---- linux-2.6.10/adeos/ppc64.c 1970-01-01 02:00:00.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/adeos/ppc64.c  2005-10-05 10:34:53.000000000 
+0300
-@@ -0,0 +1,527 @@
-+/*
-+ *   linux/adeos/ppc64.c
-+ *  
-+ *   Adeos 64-bit PowerPC adoption 
-+ *   Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
-+ *   based on previous work:
-+ * 
-+ *   Copyright (C) 2004 Philippe Gerum.
-+ * 
-+ *   Adeos/PPC port over 2.6 based on the previous 2.4 implementation by:
-+ *
-+ *   Copyright (C) 2004 Wolfgang Grandegger.
-+ *
-+ *   It follows closely the ARM and x86 ports of ADEOS.
-+ *
-+ *   Copyright (C) 2003 Philippe Gerum.
-+ *
-+ *   This program is free software; you can redistribute it and/or modify
-+ *   it under the terms of the GNU General Public License as published by
-+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ *   USA; either version 2 of the License, or (at your option) any later
-+ *   version.
-+ *
-+ *   This program is distributed in the hope that it will be useful,
-+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *   GNU General Public License for more details.
-+ *
-+ *   You should have received a copy of the GNU General Public License
-+ *   along with this program; if not, write to the Free Software
-+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, 
USA.
-+ *
-+ *   Architecture-dependent ADEOS support for PowerPC.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/irq.h>
-+#include <linux/errno.h>
-+#include <asm/system.h>
-+#include <asm/hw_irq.h>
-+#include <asm/irq.h>
-+#include <asm/atomic.h>
-+#include <asm/io.h>
-+#include <asm/time.h>
-+#include <asm/cputable.h> /* cur_cpu_spec & CPU_FTR* */
-+#include <asm/mmu_context.h> /* get_kernel_vsid */
-+
-+extern spinlock_t __adeos_pipelock;
-+
-+extern unsigned long __adeos_virtual_irq_map;
-+
-+extern struct list_head __adeos_pipeline;
-+
-+extern irq_desc_t irq_desc[];
-+
-+static struct hw_interrupt_type __adeos_std_irq_dtype[NR_IRQS];
-+
-+/*
-+ * Check NULLs when calling dtype[].X ?
-+ *  (.end)
-+ */
-+
-+static void __adeos_override_irq_enable (unsigned irq)
-+
-+{
-+    unsigned long adflags, hwflags;
-+    adeos_declare_cpuid;
-+
-+    adeos_hw_local_irq_save(hwflags);
-+    adflags = adeos_test_and_stall_pipeline();
-+    preempt_disable();
-+    __adeos_unlock_irq(adp_cpu_current[cpuid],irq);
-+    __adeos_std_irq_dtype[irq].enable(irq);
-+    preempt_enable_no_resched();
-+    adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
-+    adeos_hw_local_irq_restore(hwflags);
-+}
-+
-+static void __adeos_override_irq_disable (unsigned irq)
-+
-+{
-+    unsigned long adflags, hwflags;
-+    adeos_declare_cpuid;
-+
-+    adeos_hw_local_irq_save(hwflags);
-+    adflags = adeos_test_and_stall_pipeline();
-+    preempt_disable();
-+    __adeos_std_irq_dtype[irq].disable(irq);
-+    __adeos_lock_irq(adp_cpu_current[cpuid],cpuid,irq);
-+    preempt_enable_no_resched();
-+    adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
-+    adeos_hw_local_irq_restore(hwflags);
-+}
-+
-+static void __adeos_override_irq_end (unsigned irq)
-+
-+{
-+    unsigned long adflags, hwflags;
-+    adeos_declare_cpuid;
-+
-+    adeos_hw_local_irq_save(hwflags);
-+    adflags = adeos_test_and_stall_pipeline();
-+    preempt_disable();
-+
-+    if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-+      __adeos_unlock_irq(adp_cpu_current[cpuid],irq);
-+
-+    __adeos_std_irq_dtype[irq].end(irq);
-+
-+    preempt_enable_no_resched();
-+    adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
-+    adeos_hw_local_irq_restore(hwflags);
-+}
-+
-+static void __adeos_override_irq_affinity (unsigned irq, cpumask_t mask)
-+
-+{
-+    unsigned long adflags, hwflags;
-+    adeos_declare_cpuid;
-+
-+    adeos_hw_local_irq_save(hwflags);
-+    adflags = adeos_test_and_stall_pipeline();
-+    preempt_disable();
-+    __adeos_std_irq_dtype[irq].set_affinity(irq,mask);
-+    preempt_enable_no_resched();
-+    adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
-+    adeos_hw_local_irq_restore(hwflags);
-+}
-+
-+static void  __adeos_enable_sync (void)
-+
-+{
-+    __adeos_decr_next[adeos_processor_id()] = __adeos_read_timebase() + 
get_dec();
-+}
-+
-+/* __adeos_enable_pipeline() -- Take over the interrupt control from
-+   the root domain (i.e. Linux). After this routine has returned, all
-+   interrupts go through the pipeline. */
-+
-+void __adeos_enable_pipeline (void)
-+
-+{
-+    unsigned long flags;
-+    unsigned irq;
-+
-+    flags = adeos_critical_enter(&__adeos_enable_sync);
-+
-+    /* First, virtualize all interrupts from the root domain. */
-+
-+    for (irq = 0; irq < NR_IRQS; irq++)
-+      adeos_virtualize_irq(irq,
-+                           (void (*)(unsigned))&__adeos_do_IRQ,
-+                           &__adeos_ack_irq,
-+                           IPIPE_HANDLE_MASK|IPIPE_PASS_MASK);
-+
-+    /* We use a virtual IRQ to handle the timer irq (decrementer trap)
-+       which has been allocated early in __adeos_init_platform(). */
-+
-+    adeos_virtualize_irq(ADEOS_TIMER_VIRQ,
-+                       (void (*)(unsigned))&__adeos_do_timer,
-+                       NULL,
-+                       IPIPE_HANDLE_MASK|IPIPE_PASS_MASK);
-+  
-+
-+    /* Interpose on the IRQ control routines so we can make them
-+       atomic using hw masking and prevent the interrupt log from
-+       being untimely flushed. */
-+
-+    for (irq = 0; irq < NR_IRQS; irq++) 
-+      {
-+      if (irq_desc[irq].handler != NULL)
-+          __adeos_std_irq_dtype[irq] = *irq_desc[irq].handler;
-+      }
-+
-+    /* The original controller structs are often shared, so we first
-+       save them all before changing any of them. Notice that we don't
-+       override the ack() handler since we will enforce the necessary
-+       setup in __adeos_ack_irq(). */
-+
-+    for (irq = 0; irq < NR_IRQS; irq++)
-+      {
-+      struct hw_interrupt_type *handler = irq_desc[irq].handler;
-+
-+      if (handler == NULL)
-+          continue;
-+
-+      if (handler->enable != NULL)
-+          handler->enable = &__adeos_override_irq_enable;
-+
-+      if (handler->disable != NULL)
-+          handler->disable = &__adeos_override_irq_disable;
-+
-+      if (handler->end != NULL)
-+          handler->end = &__adeos_override_irq_end;
-+
-+      if (handler->set_affinity != NULL)
-+          handler->set_affinity = &__adeos_override_irq_affinity;
-+      }
-+
-+    __adeos_decr_next[adeos_processor_id()] = __adeos_read_timebase() + 
get_dec();
-+
-+    adp_pipelined = 1;
-+
-+    adeos_critical_exit(flags);
-+}
-+
-+/* __adeos_disable_pipeline() -- Disengage the pipeline. */
-+
-+void __adeos_disable_pipeline (void)
-+
-+{
-+    unsigned long flags;
-+    unsigned irq;
-+
-+    flags = adeos_critical_enter(NULL);
-+
-+    /* Restore interrupt controllers. */
-+
-+    for (irq = 0; irq < NR_IRQS; irq++)
-+      {
-+      if (irq_desc[irq].handler != NULL)
-+          *irq_desc[irq].handler = __adeos_std_irq_dtype[irq];
-+      }
-+
-+    adp_pipelined = 0;
-+
-+    adeos_critical_exit(flags);
-+}
-+
-+/* adeos_virtualize_irq_from() -- Attach a handler (and optionally a
-+   hw acknowledge routine) to an interrupt for the given domain. */
-+
-+int adeos_virtualize_irq_from (adomain_t *adp,
-+                             unsigned irq,
-+                             void (*handler)(unsigned irq),
-+                             int (*acknowledge)(unsigned irq),
-+                             unsigned modemask)
-+{
-+    unsigned long flags;
-+    int err;
-+
-+    if (irq >= IPIPE_NR_IRQS)
-+      return -EINVAL;
-+
-+    if (adp->irqs[irq].control & IPIPE_SYSTEM_MASK)
-+      return -EPERM;
-+      
-+    adeos_spin_lock_irqsave(&__adeos_pipelock,flags);
-+
-+    if (handler != NULL)
-+      {
-+      /* A bit of hack here: if we are re-virtualizing an IRQ just
-+         to change the acknowledge routine by passing the special
-+         ADEOS_SAME_HANDLER value, then allow to recycle the current
-+         handler for the IRQ. This allows Linux device drivers
-+         managing shared IRQ lines to call adeos_virtualize_irq() in
-+         addition to request_irq() just for the purpose of
-+         interposing their own shared acknowledge routine. */
-+
-+      if (handler == ADEOS_SAME_HANDLER)
-+          {
-+          handler = adp->irqs[irq].handler;
-+
-+          if (handler == NULL)
-+              {
-+              err = -EINVAL;
-+              goto unlock_and_exit;
-+              }
-+          }
-+      else if ((modemask & IPIPE_EXCLUSIVE_MASK) != 0 &&
-+               adp->irqs[irq].handler != NULL)
-+          {
-+          err = -EBUSY;
-+          goto unlock_and_exit;
-+          }
-+      
-+      if ((modemask & (IPIPE_SHARED_MASK|IPIPE_PASS_MASK)) == 
IPIPE_SHARED_MASK)
-+          {
-+          err = -EINVAL;
-+          goto unlock_and_exit;
-+          }
-+
-+      if ((modemask & IPIPE_STICKY_MASK) != 0)
-+          modemask |= IPIPE_HANDLE_MASK;
-+      }
-+    else
-+      modemask &= ~(IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SHARED_MASK);
-+
-+    if (acknowledge == NULL)
-+      {
-+      if ((modemask & IPIPE_SHARED_MASK) == 0)
-+          /* Acknowledge handler unspecified -- this is ok in
-+             non-shared management mode, but we will force the use
-+             of the Linux-defined handler instead. */
-+          acknowledge = adp_root->irqs[irq].acknowledge;
-+      else
-+          {
-+          /* A valid acknowledge handler to be called in shared mode
-+             is required when declaring a shared IRQ. */
-+          err = -EINVAL;
-+          goto unlock_and_exit;
-+          }
-+      }
-+
-+    adp->irqs[irq].handler = handler;
-+    adp->irqs[irq].acknowledge = acknowledge;
-+    adp->irqs[irq].control = modemask;
-+
-+    if (irq < NR_IRQS &&
-+      handler != NULL &&
-+      !adeos_virtual_irq_p(irq) &&
-+      (modemask & IPIPE_ENABLE_MASK) != 0)
-+      {
-+      if (adp != adp_current)
-+          {
-+          /* IRQ enable/disable state is domain-sensitive, so we may
-+             not change it for another domain. What is allowed
-+             however is forcing some domain to handle an interrupt
-+             source, by passing the proper 'adp' descriptor which
-+             thus may be different from adp_current. */
-+          err = -EPERM;
-+          goto unlock_and_exit;
-+          }
-+
-+      enable_irq(irq);
-+      }
-+
-+    err = 0;
-+
-+unlock_and_exit:
-+
-+    adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);
-+
-+    return err;
-+}
-+
-+/* adeos_control_irq() -- Change an interrupt mode. This affects the
-+   way a given interrupt is handled by ADEOS for the current
-+   domain. setmask is a bitmask telling whether:
-+   - the interrupt should be passed to the domain (IPIPE_HANDLE_MASK),
-+     and/or
-+   - the interrupt should be passed down to the lower priority domain(s)
-+     in the pipeline (IPIPE_PASS_MASK).
-+   This leads to four possibilities:
-+   - PASS only => Ignore the interrupt
-+   - HANDLE only => Terminate the interrupt (process but don't pass down)
-+   - PASS + HANDLE => Accept the interrupt (process and pass down)
-+   - <none> => Discard the interrupt
-+   - DYNAMIC is currently an alias of HANDLE since it marks an interrupt
-+   which is processed by the current domain but not implicitely passed
-+   down to the pipeline, letting the domain's handler choose on a case-
-+   by-case basis whether the interrupt propagation should be forced
-+   using adeos_propagate_irq().
-+   clrmask clears the corresponding bits from the control field before
-+   setmask is applied.
-+*/
-+
-+int adeos_control_irq (unsigned irq,
-+                     unsigned clrmask,
-+                     unsigned setmask)
-+{
-+    irq_desc_t *desc;
-+    unsigned long flags;
-+
-+    if (irq >= IPIPE_NR_IRQS)
-+      return -EINVAL;
-+
-+    if (adp_current->irqs[irq].control & IPIPE_SYSTEM_MASK)
-+      return -EPERM;
-+      
-+    if (((setmask|clrmask) & IPIPE_SHARED_MASK) != 0)
-+      return -EINVAL;
-+      
-+    desc = irq_desc + irq;
-+
-+    if (adp_current->irqs[irq].handler == NULL)
-+      setmask &= ~(IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK);
-+
-+    if ((setmask & IPIPE_STICKY_MASK) != 0)
-+      setmask |= IPIPE_HANDLE_MASK;
-+
-+    if ((clrmask & (IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK)) != 0)       /* If 
one goes, both go. */
-+      clrmask |= (IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK);
-+
-+    adeos_spin_lock_irqsave(&__adeos_pipelock,flags);
-+
-+    adp_current->irqs[irq].control &= ~clrmask;
-+    adp_current->irqs[irq].control |= setmask;
-+
-+    if ((setmask & IPIPE_ENABLE_MASK) != 0)
-+      enable_irq(irq);
-+    else if ((clrmask & IPIPE_ENABLE_MASK) != 0)
-+      disable_irq(irq);
-+
-+    adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);
-+
-+    return 0;
-+}
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+
-+void __adeos_init_domain (adomain_t *adp, adattr_t *attr)
-+
-+{
-+    int estacksz = attr->estacksz > 0 ? attr->estacksz : 16384, _cpuid;
-+    unsigned long flags, *ksp;
-+    adeos_declare_cpuid;
-+
-+    adeos_hw_local_irq_flags(flags);
-+
-+    for (_cpuid = 0; _cpuid < num_online_cpus(); _cpuid++)
-+      {
-+      adp->estackbase[_cpuid] = (unsigned long)kmalloc(estacksz,GFP_KERNEL);
-+    
-+      if (adp->estackbase[_cpuid] == 0)
-+          panic("Adeos: No memory for domain stack on CPU #%d",_cpuid);
-+      
-+      adp->esp[_cpuid] = adp->estackbase[_cpuid];
-+      ksp = (unsigned long *)((adp->esp[_cpuid] + estacksz - 16) & ~0xf);
-+      *ksp = 0L; /* first stack frame back-chain */
-+      ksp = ksp - STACK_FRAME_OVERHEAD; /* first stack frame (entry uses) 
-+                                         * (less would do) */
-+      *ksp = (unsigned long)ksp+STACK_FRAME_OVERHEAD; /* second back-chain */
-+      ksp = ksp - 224; /* domain context */
-+      adp->esp[_cpuid] = (unsigned long)ksp - STACK_FRAME_OVERHEAD;
-+      *((unsigned long *)adp->esp[_cpuid]) = (unsigned long)ksp + 224; 
/*back-chain*/
-+      /* NOTE: these depend on _adeos_switch_domain ordering */
-+      ksp[18] = (unsigned long)get_paca(); /* r13 needs to hold paca */
-+      ksp[19] = (_cpuid == cpuid); /* r3 */
-+      ksp[20] = ((unsigned long *)attr->entry)[1]; /* r2 = TOC base */
-+      ksp[25] = ((unsigned long *)attr->entry)[0]; /* lr = entry addr. */
-+      ksp[26] = flags & ~MSR_EE; /* msr */    
-+      }
-+}
-+
-+#else /* !CONFIG_ADEOS_THREADS */
-+
-+void __adeos_init_domain (adomain_t *adp, adattr_t *attr)
-+
-+{}
-+
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+void __adeos_cleanup_domain (adomain_t *adp)
-+
-+{
-+    int nr_cpus = num_online_cpus();
-+    int _cpuid;
-+
-+    adeos_unstall_pipeline_from(adp);
-+
-+    for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)
-+      {
-+#ifdef CONFIG_SMP
-+      while (adp->cpudata[_cpuid].irq_pending_hi != 0)
-+          cpu_relax();
-+
-+      while (test_bit(IPIPE_XPEND_FLAG,&adp->cpudata[_cpuid].status))
-+          cpu_relax();
-+#endif /* CONFIG_SMP */
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+      if (adp->estackbase[_cpuid] != 0)
-+          kfree((void *)adp->estackbase[_cpuid]);
-+#endif /* CONFIG_ADEOS_THREADS */
-+      }
-+}
-+
-+int adeos_get_sysinfo (adsysinfo_t *info)
-+
-+{
-+    info->ncpus = num_online_cpus();
-+    info->cpufreq = adeos_cpu_freq();
-+    info->archdep.tmirq = ADEOS_TIMER_VIRQ;
-+    info->archdep.tmfreq = info->cpufreq;
-+
-+    return 0;
-+}
-+
-+static void __adeos_set_decr (void)
-+
-+{
-+    adeos_declare_cpuid;
-+
-+    adeos_load_cpuid();
-+
-+    disarm_decr[cpuid] = (__adeos_decr_ticks != tb_ticks_per_jiffy);
-+    __adeos_decr_next[cpuid] = __adeos_read_timebase() + __adeos_decr_ticks;
-+    set_dec(__adeos_decr_ticks);
-+}
-+
-+int adeos_tune_timer (unsigned long ns, int flags)
-+
-+{
-+    unsigned long x, ticks;
-+
-+    if (flags & ADEOS_RESET_TIMER)
-+      ticks = tb_ticks_per_jiffy;
-+    else
-+      {
-+      ticks = ns * tb_ticks_per_jiffy / (1000000000 / HZ);
-+
-+      if (ticks > tb_ticks_per_jiffy)
-+          return -EINVAL;
-+      }
-+
-+    x = adeos_critical_enter(&__adeos_set_decr); /* Sync with all CPUs */
-+    __adeos_decr_ticks = ticks;
-+    __adeos_set_decr();
-+    adeos_critical_exit(x);
-+
-+    return 0;
-+}
-+
-+/* adeos_send_ipi() -- Send a specified service IPI to a set of
-+   processors. */
-+
-+int adeos_send_ipi (unsigned ipi, cpumask_t cpumask)
-+
-+{
-+    printk(KERN_WARNING "Adeos: Call to unimplemented adeos_send_ipi() from 
%s\n",adp_current->name);
-+    return 0;
-+}
-diff -Nru linux-2.6.10/arch/ppc64/Kconfig 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/Kconfig
---- linux-2.6.10/arch/ppc64/Kconfig    2004-12-24 23:34:58.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/Kconfig     2005-10-05 
10:34:53.000000000 +0300
-@@ -370,6 +370,8 @@
-       depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
-       default y
- 
-+source "adeos/Kconfig"
-+
- source "arch/ppc64/oprofile/Kconfig"
- 
- source "arch/ppc64/Kconfig.debug"
-diff -Nru linux-2.6.10/arch/ppc64/kernel/adeos.c 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/adeos.c
---- linux-2.6.10/arch/ppc64/kernel/adeos.c     1970-01-01 02:00:00.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/adeos.c      2005-10-05 
10:34:53.000000000 +0300
-@@ -0,0 +1,700 @@
-+/*
-+ *   linux/arch/ppc64/kernel/adeos.c
-+ *
-+ *   Adeos 64-bit PowerPC adoption
-+ *   Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
-+ *   based on previous work:
-+ *     
-+ *   Copyright (C) 2004 Philippe Gerum.
-+ *
-+ *   Adeos/PPC port over 2.6 based on the previous 2.4 implementation by:
-+ *
-+ *   Copyright (C) 2004 Wolfgang Grandegger.
-+ *
-+ *   It follows closely the ARM and x86 ports of ADEOS.
-+ *
-+ *   Copyright (C) 2003 Philippe Gerum.
-+ *
-+ *   This program is free software; you can redistribute it and/or modify
-+ *   it under the terms of the GNU General Public License as published by
-+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ *   USA; either version 2 of the License, or (at your option) any later
-+ *   version.
-+ *
-+ *   This program is distributed in the hope that it will be useful,
-+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *   GNU General Public License for more details.
-+ *
-+ *   You should have received a copy of the GNU General Public License
-+ *   along with this program; if not, write to the Free Software
-+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, 
USA.
-+ *
-+ *   Architecture-dependent ADEOS core support for PowerPC
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/smp.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/bitops.h>
-+#include <linux/interrupt.h>
-+#include <linux/irq.h>
-+#include <linux/module.h>
-+#include <asm/system.h>
-+#include <asm/atomic.h>
-+#include <asm/hw_irq.h>
-+#include <asm/irq.h>
-+#include <asm/io.h>
-+#include <asm/time.h>
-+#include <asm/machdep.h> /* ppc_md */
-+
-+#ifdef CONFIG_SMP
-+
-+static cpumask_t __adeos_cpu_sync_map;
-+
-+static cpumask_t __adeos_cpu_lock_map;
-+
-+static spinlock_t __adeos_cpu_barrier = SPIN_LOCK_UNLOCKED;
-+
-+static atomic_t __adeos_critical_count = ATOMIC_INIT(0);
-+
-+static void (*__adeos_cpu_sync)(void);
-+
-+#endif /* CONFIG_SMP */
-+
-+void do_IRQ(struct pt_regs *regs);
-+
-+extern struct list_head __adeos_pipeline;
-+
-+struct pt_regs __adeos_irq_regs;
-+
-+/* Current reload value for the decrementer. */
-+unsigned long __adeos_decr_ticks;
-+
-+/* Next tick date (timebase value). */
-+unsigned long __adeos_decr_next[ADEOS_NR_CPUS];
-+
-+static inline unsigned long ffnz (unsigned long ul) {
-+
-+    __asm__ __volatile__ ("cntlzd %0, %1" : "=r" (ul) : "r" (ul & (-ul)));
-+    return 63 - ul;
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+/* Always called with hw interrupts off. */
-+
-+static void __adeos_do_critical_sync (unsigned irq)
-+
-+{
-+    adeos_declare_cpuid;
-+
-+    adeos_load_cpuid();
-+
-+    cpu_set(cpuid,__adeos_cpu_sync_map);
-+
-+    /* Now we are in sync with the lock requestor running on another
-+       CPU. Enter a spinning wait until he releases the global
-+       lock. */
-+    adeos_spin_lock(&__adeos_cpu_barrier);
-+
-+    /* Got it. Now get out. */
-+
-+    if (__adeos_cpu_sync)
-+      /* Call the sync routine if any. */
-+      __adeos_cpu_sync();
-+
-+    adeos_spin_unlock(&__adeos_cpu_barrier);
-+
-+    cpu_clear(cpuid,__adeos_cpu_sync_map);
-+}
-+
-+#endif /* CONFIG_SMP */
-+
-+/* adeos_critical_enter() -- Grab the superlock for entering a global
-+   critical section. On this uniprocessor-only arch, this is identical
-+   to hw cli(). */
-+
-+unsigned long adeos_critical_enter (void (*syncfn)(void))
-+
-+{
-+    unsigned long flags;
-+
-+    adeos_hw_local_irq_save(flags);
-+
-+#ifdef CONFIG_SMP
-+    if (num_online_cpus() > 1) /* We might be running a SMP-kernel on a UP 
box... */
-+      {
-+      adeos_declare_cpuid;
-+      cpumask_t lock_map;
-+
-+      adeos_load_cpuid();
-+
-+      if (!cpu_test_and_set(cpuid,__adeos_cpu_lock_map))
-+          {
-+          while (cpu_test_and_set(BITS_PER_LONG - 1,__adeos_cpu_lock_map))
-+              {
-+              /* Refer to the explanations found in
-+                 linux/arch/asm-i386/irq.c about
-+                 SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND for more about
-+                 this strange loop. */
-+              int n = 0;
-+              do { cpu_relax(); } while (++n < cpuid);
-+              }
-+
-+          adeos_spin_lock(&__adeos_cpu_barrier);
-+
-+          __adeos_cpu_sync = syncfn;
-+
-+          /* Send the sync IPI to all processors but the current one. */
-+          __adeos_send_IPI_allbutself(ADEOS_CRITICAL_VECTOR);
-+
-+          cpus_andnot(lock_map,cpu_online_map,__adeos_cpu_lock_map);
-+
-+          while (!cpus_equal(__adeos_cpu_sync_map,lock_map))
-+              cpu_relax();
-+          }
-+
-+      atomic_inc(&__adeos_critical_count);
-+      }
-+#endif /* CONFIG_SMP */
-+
-+    return flags;
-+}
-+
-+/* adeos_critical_exit() -- Release the superlock. */
-+
-+void adeos_critical_exit (unsigned long flags)
-+
-+{
-+#ifdef CONFIG_SMP
-+    if (num_online_cpus() > 1) /* We might be running a SMP-kernel on a UP 
box... */
-+      {
-+      adeos_declare_cpuid;
-+
-+      adeos_load_cpuid();
-+
-+      if (atomic_dec_and_test(&__adeos_critical_count))
-+          {
-+          adeos_spin_unlock(&__adeos_cpu_barrier);
-+
-+          while (!cpus_empty(__adeos_cpu_sync_map))
-+              cpu_relax();
-+
-+          cpu_clear(cpuid,__adeos_cpu_lock_map);
-+          cpu_clear(BITS_PER_LONG - 1,__adeos_cpu_lock_map);
-+          }
-+      }
-+#endif /* CONFIG_SMP */
-+
-+    adeos_hw_local_irq_restore(flags);
-+}
-+
-+void __adeos_init_platform (void)
-+
-+{
-+    unsigned timer_virq;
-+
-+    /* Allocate a virtual IRQ for the decrementer trap early to get it
-+       mapped to IPIPE_VIRQ_BASE */
-+
-+    timer_virq = adeos_alloc_irq();
-+
-+    if (timer_virq != ADEOS_TIMER_VIRQ)
-+      panic("Adeos: cannot reserve timer virq #%d (got #%d)",
-+            ADEOS_TIMER_VIRQ,
-+            timer_virq);
-+
-+    __adeos_decr_ticks = tb_ticks_per_jiffy;
-+}
-+
-+void __adeos_init_stage (adomain_t *adp)
-+
-+{
-+    int cpuid, n;
-+
-+    for (cpuid = 0; cpuid < ADEOS_NR_CPUS; cpuid++)
-+      {
-+      adp->cpudata[cpuid].irq_pending_hi = 0;
-+
-+      for (n = 0; n < IPIPE_IRQ_IWORDS; n++)
-+          adp->cpudata[cpuid].irq_pending_lo[n] = 0;
-+
-+      for (n = 0; n < IPIPE_NR_IRQS; n++)
-+          adp->cpudata[cpuid].irq_hits[n] = 0;
-+      }
-+
-+    for (n = 0; n < IPIPE_NR_IRQS; n++)
-+      {
-+      adp->irqs[n].acknowledge = NULL;
-+      adp->irqs[n].handler = NULL;
-+      adp->irqs[n].control = IPIPE_PASS_MASK; /* Pass but don't handle */
-+      }
-+
-+#ifdef CONFIG_SMP
-+    adp->irqs[ADEOS_CRITICAL_IPI].acknowledge = &__adeos_ack_irq;
-+    adp->irqs[ADEOS_CRITICAL_IPI].handler = &__adeos_do_critical_sync;
-+    /* Immediately handle in the current domain but *never* pass */
-+    adp->irqs[ADEOS_CRITICAL_IPI].control = 
IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SYSTEM_MASK;
-+#endif /* CONFIG_SMP */
-+}
-+
-+/* __adeos_sync_stage() -- Flush the pending IRQs for the current
-+   domain (and processor).  This routine flushes the interrupt log
-+   (see "Optimistic interrupt protection" from D. Stodolsky et al. for
-+   more on the deferred interrupt scheme). Every interrupt that
-+   occurred while the pipeline was stalled gets played.  WARNING:
-+   callers on SMP boxen should always check for CPU migration on
-+   return of this routine. One can control the kind of interrupts
-+   which are going to be sync'ed using the syncmask
-+   parameter. IPIPE_IRQMASK_ANY plays them all, IPIPE_IRQMASK_VIRT
-+   plays virtual interrupts only. This routine must be called with hw
-+   interrupts off. */
-+
-+void __adeos_sync_stage (unsigned long syncmask)
-+
-+{
-+    unsigned long mask, submask;
-+    struct adcpudata *cpudata;
-+    int level, rank;
-+    adeos_declare_cpuid;
-+    adomain_t *adp;
-+    unsigned irq;
-+
-+    adeos_load_cpuid();
-+    adp = adp_cpu_current[cpuid];
-+    cpudata = &adp->cpudata[cpuid];
-+
-+    if (__test_and_set_bit(IPIPE_SYNC_FLAG,&cpudata->status))
-+              return;
-+
-+    /* The policy here is to keep the dispatching code interrupt-free
-+       by stalling the current stage. If the upper domain handler
-+       (which we call) wants to re-enable interrupts while in a safe
-+       portion of the code (e.g. SA_INTERRUPT flag unset for Linux's
-+       sigaction()), it will have to unstall (then stall again before
-+       returning to us!) the stage when it sees fit. */
-+
-+    while ((mask = (cpudata->irq_pending_hi & syncmask)) != 0)
-+      {
-+      /* Give a slight priority advantage to high-numbered IRQs
-+         like the virtual ones. */
-+      level = ffnz(mask);
-+      __clear_bit(level,&cpudata->irq_pending_hi);
-+
-+      while ((submask = cpudata->irq_pending_lo[level]) != 0)
-+          {
-+          rank = ffnz(submask);
-+          irq = (level << IPIPE_IRQ_ISHIFT) + rank;
-+
-+          if (test_bit(IPIPE_LOCK_FLAG,&adp->irqs[irq].control))
-+              {
-+              __clear_bit(rank,&cpudata->irq_pending_lo[level]);
-+              continue;
-+              }
-+
-+          if (--cpudata->irq_hits[irq] == 0)
-+              __clear_bit(rank,&cpudata->irq_pending_lo[level]);
-+
-+          __set_bit(IPIPE_STALL_FLAG,&cpudata->status);
-+
-+#ifdef CONFIG_ADEOS_PROFILING
-+          __adeos_profile_data[cpuid].irqs[irq].n_synced++;
-+          adeos_hw_tsc(__adeos_profile_data[cpuid].irqs[irq].t_synced);
-+#endif /* CONFIG_ADEOS_PROFILING */
-+
-+          if (adp == adp_root)
-+              {
-+              adeos_hw_sti();
-+              ((void (*)(unsigned, struct pt_regs 
*))adp->irqs[irq].handler)(irq,&__adeos_irq_regs);
-+              adeos_hw_cli();
-+              }
-+          else
-+              {
-+              __clear_bit(IPIPE_SYNC_FLAG,&cpudata->status);
-+              adp->irqs[irq].handler(irq);
-+              __set_bit(IPIPE_SYNC_FLAG,&cpudata->status);            
-+              }
-+
-+#ifdef CONFIG_SMP
-+          {
-+          int _cpuid = adeos_processor_id();
-+
-+          if (_cpuid != cpuid) /* Handle CPU migration. */
-+              {
-+              /* We expect any domain to clear the SYNC bit each
-+                 time it switches in a new task, so that preemptions
-+                 and/or CPU migrations (in the SMP case) over the
-+                 ISR do not lock out the log syncer for some
-+                 indefinite amount of time. In the Linux case,
-+                 schedule() handles this (see kernel/sched.c). For
-+                 this reason, we don't bother clearing it here for
-+                 the source CPU in the migration handling case,
-+                 since it must have scheduled another task in by
-+                 now. */
-+              cpuid = _cpuid;
-+              cpudata = &adp->cpudata[cpuid];
-+              __set_bit(IPIPE_SYNC_FLAG,&cpudata->status);
-+              }
-+          }
-+#endif /* CONFIG_SMP */
-+
-+          __clear_bit(IPIPE_STALL_FLAG,&cpudata->status);
-+          }
-+      }
-+
-+    __clear_bit(IPIPE_SYNC_FLAG,&cpudata->status);
-+}
-+
-+int __adeos_ack_irq (unsigned irq)
-+
-+{
-+    irq_desc_t *desc = get_irq_desc(irq);
-+
-+    if (desc->handler->ack != NULL)
-+      {
-+      unsigned long adflags;
-+      adeos_declare_cpuid;
-+
-+      /* No need to mask IRQs at hw level: we are always called from
-+         __adeos_handle_irq(), so interrupts are already off. We
-+         stall the pipeline so that spin_lock_irq*() ops won't
-+         unintentionally flush it, since this could cause infinite
-+         recursion. */
-+
-+      adeos_load_cpuid();
-+      adflags = adeos_test_and_stall_pipeline();
-+      preempt_disable();
-+      spin_lock(&desc->lock);
-+      desc->handler->ack(irq);
-+      spin_unlock(&desc->lock);
-+      preempt_enable_no_resched();
-+      adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
-+      }
-+
-+    return 1;
-+}
-+
-+static inline void __adeos_walk_pipeline (struct list_head *pos, int cpuid)
-+
-+{
-+    adomain_t *this_domain = adp_cpu_current[cpuid];
-+
-+    while (pos != &__adeos_pipeline)
-+      {
-+      adomain_t *next_domain = list_entry(pos,adomain_t,p_link);
-+
-+      if (test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
-+          break; /* Stalled stage -- do not go further. */
-+
-+      if (next_domain->cpudata[cpuid].irq_pending_hi != 0)
-+          {
-+          /* Since the critical IPI might be dispatched by the
-+             following actions, the current domain might not be
-+             linked to the pipeline anymore after its handler
-+             returns on SMP boxes, even if the domain remains valid
-+             (see adeos_unregister_domain()), so don't make any
-+             dangerous assumptions here. */
-+
-+          if (next_domain == this_domain)
-+              __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+          else
-+              {
-+              __adeos_switch_to(this_domain,next_domain,cpuid);
-+
-+              adeos_load_cpuid(); /* Processor might have changed. */
-+
-+              if (this_domain->cpudata[cpuid].irq_pending_hi != 0 &&
-+                  
!test_bit(IPIPE_STALL_FLAG,&this_domain->cpudata[cpuid].status))
-+                  __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+              }
-+
-+          break;
-+          }
-+      else if (next_domain == this_domain)
-+          break;
-+
-+      pos = next_domain->p_link.next;
-+      }
-+}
-+
-+/* __adeos_handle_irq() -- ADEOS's generic IRQ handler. An optimistic
-+   interrupt protection log is maintained here for each
-+   domain. Interrupts are off on entry. */
-+
-+void __adeos_handle_irq (int irq, struct pt_regs *regs)
-+
-+{
-+    struct list_head *head, *pos;
-+    adeos_declare_cpuid;
-+    int m_ack, s_ack;
-+
-+    m_ack = irq & ADEOS_IRQ_ACKED;
-+    irq &= ADEOS_IRQ_ACKED_MASK;
-+
-+    if (irq >= IPIPE_NR_IRQS)
-+      {
-+      printk(KERN_ERR "Adeos: spurious interrupt %d\n",irq);
-+      return;
-+      }
-+
-+    adeos_load_cpuid();
-+
-+#ifdef CONFIG_ADEOS_PROFILING
-+    __adeos_profile_data[cpuid].irqs[irq].n_handled++;
-+    adeos_hw_tsc(__adeos_profile_data[cpuid].irqs[irq].t_handled);
-+#endif /* CONFIG_ADEOS_PROFILING */
-+
-+    s_ack = m_ack;
-+
-+    if 
(test_bit(IPIPE_STICKY_FLAG,&adp_cpu_current[cpuid]->irqs[irq].control))
-+      head = &adp_cpu_current[cpuid]->p_link;
-+    else
-+      head = __adeos_pipeline.next;
-+
-+    /* Ack the interrupt. */
-+
-+    pos = head;
-+
-+    while (pos != &__adeos_pipeline)
-+      {
-+      adomain_t *_adp = list_entry(pos,adomain_t,p_link);
-+
-+      /* For each domain handling the incoming IRQ, mark it as
-+           pending in its log. */
-+
-+      if (test_bit(IPIPE_HANDLE_FLAG,&_adp->irqs[irq].control))
-+          {
-+          /* Domains that handle this IRQ are polled for
-+             acknowledging it by decreasing priority order. The
-+             interrupt must be made pending _first_ in the domain's
-+             status flags before the PIC is unlocked. */
-+
-+          _adp->cpudata[cpuid].irq_hits[irq]++;
-+          __adeos_set_irq_bit(_adp,cpuid,irq);
-+
-+          /* Always get the first master acknowledge available. Once
-+             we've got it, allow slave acknowledge handlers to run
-+             (until one of them stops us). */
-+
-+          if (_adp->irqs[irq].acknowledge != NULL)
-+              {
-+              if (!m_ack)
-+                  m_ack = _adp->irqs[irq].acknowledge(irq);
-+              else if (test_bit(IPIPE_SHARED_FLAG,&_adp->irqs[irq].control) 
&& !s_ack)
-+                  s_ack = _adp->irqs[irq].acknowledge(irq);
-+              }
-+          }
-+
-+      /* If the domain does not want the IRQ to be passed down the
-+         interrupt pipe, exit the loop now. */
-+
-+      if (!test_bit(IPIPE_PASS_FLAG,&_adp->irqs[irq].control))
-+          break;
-+
-+      pos = _adp->p_link.next;
-+      }
-+
-+    /* Now walk the pipeline, yielding control to the highest priority
-+       domain that has pending interrupt(s) or immediately to the
-+       current domain if the interrupt has been marked as
-+       'sticky'. This search does not go beyond the current domain in
-+       the pipeline. To understand this code properly, one must keep
-+       in mind that domains having a higher priority than the current
-+       one are sleeping on the adeos_suspend_domain() service. In
-+       addition, domains having a lower priority have been preempted
-+       by an interrupt dispatched to a higher priority domain. Once
-+       the first and highest priority stage has been selected here,
-+       the subsequent stages will be activated in turn when each
-+       visited domain calls adeos_suspend_domain() to wake up its
-+       neighbour down the pipeline. */
-+
-+    __adeos_walk_pipeline(head,cpuid);
-+}
-+
-+/* ADEOS's version of the interrupt trap handler. */
-+
-+int __adeos_grab_irq (struct pt_regs *regs)
-+
-+{
-+    extern int ppc_spurious_interrupts;
-+    adeos_declare_cpuid;
-+    int irq;
-+
-+    if (!adp_pipelined)
-+      {
-+      do_IRQ(regs);
-+      return 1;
-+      }
-+
-+    irq = ppc_md.get_irq(regs);
-+    if (irq >= 0)
-+      {
-+      __adeos_handle_irq(irq,regs);
-+      }
-+    else 
-+      ppc_spurious_interrupts++;
-+
-+    adeos_load_cpuid();
-+
-+    return (adp_cpu_current[cpuid] == adp_root &&
-+          !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status));
-+}
-+
-+/* ADEOS's version of irq.c:do_IRQ(). */
-+
-+void __adeos_do_IRQ (int irq, struct pt_regs *regs) {
-+      irq_enter();
-+      ppc_irq_dispatch_handler(regs, irq);
-+      irq_exit();
-+}
-+
-+/* ADEOS's version of the decrementer trap handler. */
-+
-+int __adeos_grab_timer (struct pt_regs *regs)
-+
-+{
-+    adeos_declare_cpuid;
-+
-+    if (!adp_pipelined)
-+      {
-+      timer_interrupt(regs);
-+      return 1;
-+      }
-+
-+    /* On 970 CPUs DEC cannot be disabled and without setting DEC
-+     * here, DEC interrupt would be triggered as soon as interrupts are
-+     * enabled in __adeos_sync_stage 
-+     */
-+    set_dec(0x7fffffff);
-+    
-+    __adeos_irq_regs.msr = regs->msr; /* for do_timer() */
-+
-+    __adeos_handle_irq(ADEOS_TIMER_VIRQ,regs);
-+
-+    adeos_load_cpuid();
-+
-+    if (__adeos_decr_ticks != tb_ticks_per_jiffy)
-+      {
-+      unsigned long next_date, now;
-+
-+      next_date = __adeos_decr_next[cpuid];
-+
-+      while ((now = __adeos_read_timebase()) >= next_date)
-+          next_date += __adeos_decr_ticks;
-+
-+      set_dec(next_date - now);
-+
-+      __adeos_decr_next[cpuid] = next_date;
-+      }
-+
-+    return (adp_cpu_current[cpuid] == adp_root &&
-+          !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status));
-+}
-+
-+void __adeos_do_timer (int irq, struct pt_regs *regs)
-+
-+{
-+    timer_interrupt(regs);
-+}
-+
-+asmlinkage int __adeos_check_root (struct pt_regs *regs)
-+
-+{
-+    adeos_declare_cpuid;
-+    /* This routine is called with hw interrupts off, so no migration
-+       can occur while checking the identity of the current domain. */
-+    adeos_load_cpuid();
-+    return (adp_cpu_current[cpuid] == adp_root &&
-+          !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status));
-+}
-+
-+/* adeos_trigger_irq() -- Push the interrupt to the pipeline entry
-+   just like if it has been actually received from a hw source. This
-+   both works for real and virtual interrupts. This also means that
-+   the current domain might be immediately preempted by a higher
-+   priority domain who happens to handle this interrupt. */
-+
-+int adeos_trigger_irq (unsigned irq)
-+
-+{
-+    struct pt_regs regs;
-+    unsigned long flags;
-+
-+    if (irq >= IPIPE_NR_IRQS ||
-+      (adeos_virtual_irq_p(irq) && !test_bit(irq - 
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map)))
-+      return -EINVAL;
-+
-+    adeos_hw_local_irq_save(flags);
-+
-+    regs.msr = flags;
-+
-+    __adeos_handle_irq(irq | ADEOS_IRQ_ACKED, &regs);
-+
-+    adeos_hw_local_irq_restore(flags);
-+
-+    return 1;
-+}
-+
-+int __adeos_enter_syscall (struct pt_regs *regs)
-+
-+{
-+    adeos_declare_cpuid;
-+    unsigned long flags;
-+
-+    /* This routine either returns:
-+       0 -- if the syscall is to be passed to Linux;
-+       1 -- if the syscall should not be passed to Linux, and no
-+       tail work should be performed;
-+       -1 -- if the syscall should not be passed to Linux but the
-+       tail work has to be performed. */
-+
-+    if (__adeos_event_monitors[ADEOS_SYSCALL_PROLOGUE] > 0 &&
-+      __adeos_handle_event(ADEOS_SYSCALL_PROLOGUE,regs) > 0)
-+      {
-+      if (adp_current == adp_root && !in_atomic())
-+          {
-+          /* Sync pending VIRQs before _TIF_NEED_RESCHED is
-+           * tested. */
-+
-+          adeos_lock_cpu(flags);
-+
-+          if ((adp_root->cpudata[cpuid].irq_pending_hi & IPIPE_IRQMASK_VIRT) 
!= 0)
-+              __adeos_sync_stage(IPIPE_IRQMASK_VIRT);
-+
-+          adeos_unlock_cpu(flags);
-+
-+          return -1;
-+          }
-+
-+      return 1;
-+      }
-+
-+    return 0;
-+}
-+
-+int __adeos_exit_syscall (void) 
-+
-+{
-+    if (__adeos_event_monitors[ADEOS_SYSCALL_EPILOGUE] > 0)
-+      return __adeos_handle_event(ADEOS_SYSCALL_EPILOGUE,NULL);
-+
-+    return 0;
-+}
-+
-+EXPORT_SYMBOL(__adeos_init_stage);
-+EXPORT_SYMBOL(__adeos_sync_stage);
-+EXPORT_SYMBOL(__adeos_irq_regs);
-+#ifdef CONFIG_ADEOS_THREADS
-+EXPORT_SYMBOL(__adeos_switch_domain);
-+#endif /* CONFIG_ADEOS_THREADS */
-+EXPORT_SYMBOL(__adeos_do_IRQ);
-+EXPORT_SYMBOL(__adeos_do_timer);
-+EXPORT_SYMBOL(__adeos_decr_ticks);
-+EXPORT_SYMBOL(__adeos_decr_next);
-+EXPORT_SYMBOL(__adeos_current_threadinfo);
-+EXPORT_SYMBOL(adeos_critical_enter);
-+EXPORT_SYMBOL(adeos_critical_exit);
-+EXPORT_SYMBOL(adeos_trigger_irq);
-diff -Nru linux-2.6.10/arch/ppc64/kernel/entry.S 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/entry.S
---- linux-2.6.10/arch/ppc64/kernel/entry.S     2004-12-24 23:33:49.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/entry.S      2005-10-05 
10:55:51.000000000 +0300
-@@ -108,6 +108,23 @@
-       ori     r11,r11,MSR_EE
-       mtmsrd  r11,1
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      addi    r3,r1,GPR0
-+      bl      .__adeos_enter_syscall
-+      cmpdi   r3,0
-+      ld      r0,GPR0(r1)
-+      ld      r3,GPR3(r1)
-+      ld      r4,GPR4(r1)
-+      ld      r5,GPR5(r1)
-+      ld      r6,GPR6(r1)
-+      ld      r7,GPR7(r1)
-+      ld      r8,GPR8(r1)
-+      ld      r9,GPR9(r1)
-+      bgt     adeos_end_syscall
-+      blt     syscall_exit
-+      addi    r9,r1,STACK_FRAME_OVERHEAD
-+#endif /* CONFIG_ADEOS_CORE */
-+
- #ifdef SHOW_SYSCALLS
-       bl      .do_show_syscall
-       REST_GPR(0,r1)
-@@ -145,7 +162,13 @@
-       ldx     r10,r11,r0      /* Fetch system call handler [ptr] */
-       mtctr   r10
-       bctrl                   /* Call handler */
--
-+#ifdef CONFIG_ADEOS_CORE
-+      std     r3,RESULT(r1)
-+      bl      .__adeos_exit_syscall
-+      cmpdi   r3,0
-+      ld      r3,RESULT(r1)
-+      bne-    syscall_exit_adeos
-+#endif /* CONFIG_ADEOS_CORE */
- syscall_exit:
- #ifdef SHOW_SYSCALLS
-       std     r3,GPR3(r1)
-@@ -195,6 +218,39 @@
-       mtspr   SRR1,r8
-       rfid
-       b       .       /* prevent speculative execution */
-+#ifdef CONFIG_ADEOS_CORE
-+syscall_exit_adeos:
-+      ld      r5,_CCR(r1)
-+      ld      r8,_MSR(r1)
-+      ld      r7,_NIP(r1)
-+      stdcx.  r0,0,r1                 /* to clear pending reservations */
-+      andi.   r6,r8,MSR_PR
-+      ld      r4,_LINK(r1)
-+      beq-    1f                      /* only restore r13 if */
-+      ld      r13,GPR13(r1)           /* returning to usermode */
-+1:    ld      r2,GPR2(r1)
-+      ld      r1,GPR1(r1)
-+      li      r12,MSR_RI
-+      mfmsr   r10                     /* should this be done here? */
-+      andc    r10,r10,r12
-+      mtmsrd  r10,1                   /* clear MSR.RI */
-+      mtlr    r4
-+      mtcr    r5
-+      mtspr   SRR0,r7
-+      mtspr   SRR1,r8
-+      rfid
-+      b       .       /* prevent speculative execution */
-+#endif /* CONFIG_ADEOS_CORE */
-+
-+#ifdef CONFIG_ADEOS_CORE
-+      .globl  adeos_end_syscall
-+adeos_end_syscall:
-+      mfmsr   r10
-+      rldicl  r10,r10,48,1
-+      rotldi  r10,r10,16
-+      mtmsrd  r10,1
-+      b       syscall_exit_adeos
-+#endif /* CONFIG_ADEOS_CORE */
- 
- syscall_enosys:
-       li      r3,-ENOSYS
-@@ -400,6 +456,14 @@
-       beq     2f              /* if yes, don't slbie it */
-       oris    r0,r6,0x0800    /* set C (class) bit */
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      /* disable interrupts so that SLB and impl. specific 
-+       * address translation optimizations stay sane */
-+      mfmsr   r10
-+      rldicl  r9,r10,48,1     /* clear MSR_EE */
-+      rotldi  r9,r9,16
-+      mtmsrd  r9,1
-+#endif /* CONFIG_ADEOS_CORE */
-       /* Bolt in the new stack SLB entry */
-       ld      r7,KSP_VSID(r4) /* Get new stack's VSID */
-       oris    r6,r6,(SLB_ESID_V)@h
-@@ -408,7 +472,9 @@
-       slbie   r0              /* Workaround POWER5 < DD2.1 issue */
-       slbmte  r7,r6
-       isync
--
-+#ifdef CONFIG_ADEOS_CORE
-+      mtmsrd  r10,1           /* remember old interrupt state */
-+#endif /* CONFIG_ADEOS_CORE */
- 2:
- END_FTR_SECTION_IFSET(CPU_FTR_SLB)
-       clrrdi  r7,r8,THREAD_SHIFT      /* base of new stack */
-@@ -468,6 +534,13 @@
-       rotldi  r9,r9,16
-       mtmsrd  r9,1            /* Update machine state */
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      bl      .__adeos_check_root
-+      cmpdi   r3,0
-+      mfmsr   r10             /* this is used later, might be messed */
-+      beq-    restore
-+#endif /* CONFIG_ADEOS_CORE */
-+
- #ifdef CONFIG_PREEMPT
-       clrrdi  r9,r1,THREAD_SHIFT      /* current_thread_info() */
-       li      r0,_TIF_NEED_RESCHED    /* bits to check */
-@@ -844,3 +917,124 @@
-         blr
-       
- #endif        /* CONFIG_PPC_MULTIPLATFORM */
-+
-+#ifdef CONFIG_ADEOS_CORE
-+
-+_GLOBAL(__adeos_ret_from_except_lite)
-+      cmpdi   r3,0
-+      bne+    .ret_from_except_lite
-+      b       restore
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+
-+/*
-+ * r3 = adp_next, r4 = adp_cpu_current[adeos_processor_id()].
-+ * NOTE: This code is _not_ SMP-compliant. Always called with hw
-+ * interrupts off.
-+ * TODO: implement (configure time) support for different ABIs?
-+ */   
-+_GLOBAL(__adeos_switch_domain)
-+
-+      /* 27*8 = 216 for registers
-+       * +8 padding for quad-word alignment as required by spec
-+       * = 224 */
-+      /* alloc stack frame (store and update r1) */
-+      stdu    r1,-224-STACK_FRAME_OVERHEAD(r1)
-+
-+      /* Save general purpose registers. (22) */
-+      std     r31,STACK_FRAME_OVERHEAD+0*8(r1)
-+      std     r30,STACK_FRAME_OVERHEAD+1*8(r1)
-+      std     r29,STACK_FRAME_OVERHEAD+2*8(r1)
-+      std     r28,STACK_FRAME_OVERHEAD+3*8(r1)
-+      std     r27,STACK_FRAME_OVERHEAD+4*8(r1)
-+      std     r26,STACK_FRAME_OVERHEAD+5*8(r1)
-+      std     r25,STACK_FRAME_OVERHEAD+6*8(r1)
-+      std     r24,STACK_FRAME_OVERHEAD+7*8(r1)
-+      std     r23,STACK_FRAME_OVERHEAD+8*8(r1)
-+      std     r22,STACK_FRAME_OVERHEAD+9*8(r1)
-+      std     r21,STACK_FRAME_OVERHEAD+10*8(r1)
-+      std     r20,STACK_FRAME_OVERHEAD+11*8(r1)
-+      std     r19,STACK_FRAME_OVERHEAD+12*8(r1)
-+      std     r18,STACK_FRAME_OVERHEAD+13*8(r1)
-+      std     r17,STACK_FRAME_OVERHEAD+14*8(r1)
-+      std     r16,STACK_FRAME_OVERHEAD+15*8(r1)
-+      std     r15,STACK_FRAME_OVERHEAD+16*8(r1)
-+      std     r14,STACK_FRAME_OVERHEAD+17*8(r1)
-+      std     r13,STACK_FRAME_OVERHEAD+18*8(r1)
-+      std      r3,STACK_FRAME_OVERHEAD+19*8(r1)
-+      std      r2,STACK_FRAME_OVERHEAD+20*8(r1)
-+      std      r0,STACK_FRAME_OVERHEAD+21*8(r1)
-+
-+      /* Save special registers. (5) */
-+      mfctr    r2
-+      std      r2,STACK_FRAME_OVERHEAD+22*8(r1)
-+      mfcr     r2
-+      std      r2,STACK_FRAME_OVERHEAD+23*8(r1)
-+      mfxer    r2
-+      std      r2,STACK_FRAME_OVERHEAD+24*8(r1)
-+      mflr     r2
-+      std      r2,STACK_FRAME_OVERHEAD+25*8(r1)
-+      mfmsr    r2
-+      std      r2,STACK_FRAME_OVERHEAD+26*8(r1)
-+
-+      /* Actual switch block. */
-+      ld       r2,0(r4)       /* r2 = old_adp = adp_cpu_current[cpuid] */
-+      std      r1,0(r2)       /* old_adp->esp[0] = sp */
-+      std      r3,0(r4)       /* adp_cpu_current[cpuid] = new_adp */
-+      /* CONFIG_SMP should sync here; but first, accesses to esp[]
-+      would require cpuid-indexing. */
-+      ld       r1,0(r3)       /* sp = new_adp->esp[0] */
-+
-+      /* Restore special registers. */
-+      ld       r2,STACK_FRAME_OVERHEAD+26*8(r1)
-+      mtmsrd   r2
-+      ld       r2,STACK_FRAME_OVERHEAD+25*8(r1)
-+      mtlr     r2
-+      ld       r2,STACK_FRAME_OVERHEAD+24*8(r1)
-+      mtxer    r2
-+      ld       r2,STACK_FRAME_OVERHEAD+23*8(r1)
-+      mtcr     r2
-+      ld       r2,STACK_FRAME_OVERHEAD+22*8(r1)
-+      mtctr    r2
-+
-+      /* Restore general purpose registers. */
-+      ld       r0,STACK_FRAME_OVERHEAD+21*8(r1)
-+      ld       r2,STACK_FRAME_OVERHEAD+20*8(r1)
-+      ld       r3,STACK_FRAME_OVERHEAD+19*8(r1)
-+      ld      r13,STACK_FRAME_OVERHEAD+18*8(r1)
-+      ld      r14,STACK_FRAME_OVERHEAD+17*8(r1)
-+      ld      r15,STACK_FRAME_OVERHEAD+16*8(r1)
-+      ld      r16,STACK_FRAME_OVERHEAD+15*8(r1)
-+      ld      r17,STACK_FRAME_OVERHEAD+14*8(r1)
-+      ld      r18,STACK_FRAME_OVERHEAD+13*8(r1)
-+      ld      r19,STACK_FRAME_OVERHEAD+12*8(r1)
-+      ld      r20,STACK_FRAME_OVERHEAD+11*8(r1)
-+      ld      r21,STACK_FRAME_OVERHEAD+10*8(r1)
-+      ld      r22,STACK_FRAME_OVERHEAD+9*8(r1)
-+      ld      r23,STACK_FRAME_OVERHEAD+8*8(r1)
-+      ld      r24,STACK_FRAME_OVERHEAD+7*8(r1)
-+      ld      r25,STACK_FRAME_OVERHEAD+6*8(r1)
-+      ld      r26,STACK_FRAME_OVERHEAD+5*8(r1)
-+      ld      r27,STACK_FRAME_OVERHEAD+4*8(r1)
-+      ld      r28,STACK_FRAME_OVERHEAD+3*8(r1)
-+      ld      r29,STACK_FRAME_OVERHEAD+2*8(r1)
-+      ld      r30,STACK_FRAME_OVERHEAD+1*8(r1)
-+      ld      r31,STACK_FRAME_OVERHEAD+0*8(r1)
-+
-+      addi    r1,r1,224+STACK_FRAME_OVERHEAD
-+
-+      blr
-+
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+/* Returns the current threadinfo pointer in a way which is
-+   insensitive to the underlying stack, by directly reading the
-+   special purpose register #3. */
-+/* could probably just use r13 and forget loading paca */     
-+_GLOBAL(__adeos_current_threadinfo)
-+      mfspr   r3,SPRG3                /* get PACA */
-+      ld      r3,PACACURRENT(r3)
-+      blr
-+      
-+#endif /* CONFIG_ADEOS_CORE */
-+
-diff -Nru linux-2.6.10/arch/ppc64/kernel/head.S 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/head.S
---- linux-2.6.10/arch/ppc64/kernel/head.S      2004-12-24 23:34:48.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/head.S       2005-10-05 
10:34:53.000000000 +0300
-@@ -381,6 +381,18 @@
-       bl      hdlr;                                   \
-       b       .ret_from_except_lite
- 
-+#ifdef CONFIG_ADEOS_CORE
-+#define ADEOS_EXCEPTION_COMMON_LITE(trap, label, hdlr)        \
-+      .align  7;                                      \
-+      .globl label##_common;                          \
-+label##_common:                                               \
-+      EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);      \
-+      DISABLE_INTS;                                   \
-+      addi    r3,r1,STACK_FRAME_OVERHEAD;             \
-+      bl      hdlr;                                   \
-+      b       .__adeos_ret_from_except_lite
-+#endif /* CONFIG_ADEOS_CORE */
-+
- /*
-  * Start of pSeries system interrupt routines
-  */
-@@ -761,7 +773,12 @@
-       bl      .MachineCheckException
-       b       .ret_from_except
- 
-+
-+#ifdef CONFIG_ADEOS_CORE
-+      ADEOS_EXCEPTION_COMMON_LITE(0x900, Decrementer, .__adeos_grab_timer) 
-+#else /* !CONFIG_ADEOS_CORE */
-       STD_EXCEPTION_COMMON_LITE(0x900, Decrementer, .timer_interrupt)
-+#endif /* CONFIG_ADEOS_CORE */
-       STD_EXCEPTION_COMMON(0xa00, Trap_0a, .UnknownException)
-       STD_EXCEPTION_COMMON(0xb00, Trap_0b, .UnknownException)
-       STD_EXCEPTION_COMMON(0xd00, SingleStep, .SingleStepException)
-@@ -890,8 +907,13 @@
- HardwareInterrupt_entry:
-       DISABLE_INTS
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-+#ifdef CONFIG_ADEOS_CORE
-+      bl      .__adeos_grab_irq
-+      b       .__adeos_ret_from_except_lite
-+#else /* !CONFIG_ADEOS_CORE */
-       bl      .do_IRQ
-       b       .ret_from_except_lite
-+#endif /* CONFIG_ADEOS_CORE */
- 
-       .align  7
-       .globl Alignment_common
-diff -Nru linux-2.6.10/arch/ppc64/kernel/idle.c 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/idle.c
---- linux-2.6.10/arch/ppc64/kernel/idle.c      2004-12-24 23:35:24.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/idle.c       2005-10-05 
10:34:53.000000000 +0300
-@@ -131,6 +131,9 @@
- 
-                       while (!need_resched() && !cpu_is_offline(cpu)) {
-                               barrier();
-+#ifdef CONFIG_ADEOS_CORE
-+                              adeos_suspend_domain();
-+#endif /* CONFIG_ADEOS_CORE */
-                               /*
-                                * Go into low thread priority and possibly
-                                * low power mode.
-@@ -288,8 +291,15 @@
- {
-       while(1) {
-               /* check CPU type here */
--              if (!need_resched())
-+              if (!need_resched()) 
-+#ifdef CONFIG_ADEOS_CORE
-+              {
-+                      adeos_suspend_domain();
-                       power4_idle();
-+              }
-+#else /* !CONFIG_ADEOS_CORE */
-+                      power4_idle();
-+#endif /* CONFIG_ADEOS_CORE */                
-               if (need_resched())
-                       schedule();
-       }
-diff -Nru linux-2.6.10/arch/ppc64/kernel/irq.c 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/irq.c
---- linux-2.6.10/arch/ppc64/kernel/irq.c       2004-12-24 23:34:32.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/irq.c        2005-10-05 
10:34:53.000000000 +0300
-@@ -134,14 +134,25 @@
- 
-       if (desc->status & IRQ_PER_CPU) {
-               /* no locking required for CPU-local interrupts: */
-+#ifdef CONFIG_ADEOS_CORE
-+              if (!adp_pipelined)
-+                      ack_irq(irq);
-+#else
-               ack_irq(irq);
-+#endif /* CONFIG_ADEOS_CORE */
-               action_ret = handle_IRQ_event(irq, regs, desc->action);
-               desc->handler->end(irq);
-               return;
-       }
- 
-       spin_lock(&desc->lock);
-+#ifdef CONFIG_ADEOS_CORE
-+      if (!adp_pipelined)
-+              ack_irq(irq);
-+#else
-       ack_irq(irq);   
-+#endif /* CONFIG_ADEOS_CORE */
-+      
-       /*
-          REPLAY is when Linux resends an IRQ that was dropped earlier
-          WAITING is used by probe to mark irqs that are being tested
-diff -Nru linux-2.6.10/arch/ppc64/kernel/Makefile 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/Makefile
---- linux-2.6.10/arch/ppc64/kernel/Makefile    2004-12-24 23:35:39.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/Makefile     2005-10-05 
10:34:53.000000000 +0300
-@@ -62,4 +62,6 @@
- 
- obj-$(CONFIG_ALTIVEC)         += vecemu.o vector.o
- 
-+obj-$(CONFIG_ADEOS_CORE)      += adeos.o
-+
- CFLAGS_ioctl32.o += -Ifs/
-diff -Nru linux-2.6.10/arch/ppc64/kernel/ppc_ksyms.c 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/ppc_ksyms.c
---- linux-2.6.10/arch/ppc64/kernel/ppc_ksyms.c 2004-12-24 23:34:26.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/ppc_ksyms.c  2005-10-05 
10:34:53.000000000 +0300
-@@ -163,3 +163,34 @@
- EXPORT_SYMBOL(paca);
- EXPORT_SYMBOL(cur_cpu_spec);
- EXPORT_SYMBOL(systemcfg);
-+
-+#ifdef CONFIG_ADEOS_CORE
-+/* The following are per-platform convenience exports which are needed
-+   by some Adeos domains loaded as kernel modules. */
-+extern unsigned long disarm_decr[NR_CPUS];
-+EXPORT_SYMBOL(disarm_decr);
-+EXPORT_SYMBOL(tb_ticks_per_jiffy);
-+EXPORT_SYMBOL(__switch_to);
-+void show_stack(struct task_struct *task,
-+              unsigned long *esp);
-+EXPORT_SYMBOL(show_stack);
-+
-+/* these two are needed by the task switching code in fusion */
-+extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
-+extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
-+EXPORT_SYMBOL(switch_stab);
-+EXPORT_SYMBOL(switch_slb);
-+
-+/* flush_tlb_pending() */
-+EXPORT_PER_CPU_SYMBOL(ppc64_tlb_batch);
-+EXPORT_SYMBOL(__flush_tlb_pending);
-+
-+EXPORT_SYMBOL(_switch);
-+#ifdef FEW_CONTEXTS
-+EXPORT_SYMBOL(nr_free_contexts);
-+EXPORT_SYMBOL(context_mm);
-+EXPORT_SYMBOL(steal_context);
-+#endif
-+extern struct task_struct *last_task_used_math;
-+EXPORT_SYMBOL(last_task_used_math);
-+#endif /* CONFIG_ADEOS_CORE */
-diff -Nru linux-2.6.10/arch/ppc64/kernel/time.c 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/time.c
---- linux-2.6.10/arch/ppc64/kernel/time.c      2004-12-24 23:35:28.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/time.c       2005-10-05 
10:34:53.000000000 +0300
-@@ -73,6 +73,9 @@
- 
- EXPORT_SYMBOL(jiffies_64);
- 
-+#ifdef CONFIG_ADEOS_CORE
-+unsigned long disarm_decr[NR_CPUS];
-+#endif /* CONFIG_ADEOS_CORE */
- /* keep track of when we need to update the rtc */
- time_t last_rtc_update;
- extern int piranha_simulator;
-@@ -293,6 +296,9 @@
-       next_dec = lpaca->next_jiffy_update_tb - cur_tb;
-       if (next_dec > lpaca->default_decr)
-               next_dec = lpaca->default_decr;
-+#ifdef CONFIG_ADEOS_CORE
-+      if (!disarm_decr[smp_processor_id()])
-+#endif /* CONFIG_ADEOS_CORE */        
-       set_dec(next_dec);
- 
- #ifdef CONFIG_PPC_ISERIES
-diff -Nru linux-2.6.10/arch/ppc64/kernel/traps.c 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/traps.c
---- linux-2.6.10/arch/ppc64/kernel/traps.c     2004-12-24 23:34:47.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/kernel/traps.c      2005-10-05 
10:34:53.000000000 +0300
-@@ -75,6 +75,11 @@
-       if (debugger(regs))
-               return 1;
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      /* lets us see Oopses from other domains, too */
-+      if (adp_current != adp_root)
-+              adeos_set_printk_sync(adp_current);
-+#endif /* CONFIG_ADEOS_CORE */
-       console_verbose();
-       spin_lock_irq(&die_lock);
-       bust_spinlocks(1);
-@@ -185,9 +190,20 @@
- }
- #endif
- 
-+#ifdef CONFIG_ADEOS_CORE
-+static inline int __adeos_pipeline_trap(int trap, struct pt_regs *regs)
-+{
-+    return __adeos_event_monitors[trap] > 0 ? __adeos_handle_event(trap,regs) 
: 0;
-+}
-+#endif /* CONFIG_ADEOS_CORE */
-+
- void
- SystemResetException(struct pt_regs *regs)
- {
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_SYSRESET_TRAP,regs))
-+              return;
-+#endif /* CONFIG_ADEOS_CORE */
- #ifdef CONFIG_PPC_PSERIES
-       if (fwnmi_active) {
-               struct rtas_error_log *errhdr = FWNMI_get_errinfo(regs);
-@@ -265,7 +281,11 @@
-                       return;
-       }
- #endif
--
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_MCE_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-+      
-       if (debugger_fault_handler(regs))
-               return;
-       die("Machine check", regs, 0);
-@@ -278,6 +298,11 @@
- void
- UnknownException(struct pt_regs *regs)
- {
-+#ifdef CONFIG_ADEOS_CORE
-+       if (__adeos_pipeline_trap(ADEOS_UNKNOWN_TRAP,regs))
-+         return;
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
-              regs->nip, regs->msr, regs->trap);
- 
-@@ -287,6 +312,10 @@
- void
- InstructionBreakpointException(struct pt_regs *regs)
- {
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_IABR_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-       if (debugger_iabr_match(regs))
-               return;
-       _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
-@@ -296,7 +325,10 @@
- SingleStepException(struct pt_regs *regs)
- {
-       regs->msr &= ~MSR_SE;  /* Turn off 'trace' bit */
--
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_SSTEP_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-       if (debugger_sstep(regs))
-               return;
- 
-@@ -459,6 +491,11 @@
- void
- ProgramCheckException(struct pt_regs *regs)
- {
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_PCE_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       if (regs->msr & 0x100000) {
-               /* IEEE FP exception */
-               parse_fpe(regs);
-@@ -500,6 +537,10 @@
- 
- void KernelFPUnavailableException(struct pt_regs *regs)
- {
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_KFPUNAVAIL_TRAP,regs))
-+              return;
-+#endif /* CONFIG_ADEOS_CORE */
-       printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
-                         "%lx at %lx\n", regs->trap, regs->nip);
-       die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
-@@ -507,6 +548,11 @@
- 
- void AltivecUnavailableException(struct pt_regs *regs)
- {
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_ALTUNAVAIL_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-+
- #ifndef CONFIG_ALTIVEC
-       if (user_mode(regs)) {
-               /* A user program has executed an altivec instruction,
-@@ -539,6 +585,10 @@
- void
- PerformanceMonitorException(struct pt_regs *regs)
- {
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_PERFMON_TRAP,regs))
-+              return;
-+#endif /* CONFIG_ADEOS_CORE */
-       perf_irq(regs);
- }
- 
-@@ -554,7 +604,12 @@
-               emulate_single_step(regs);
-               return;
-       }
--
-+#ifdef CONFIG_ADEOS_CORE
-+      /* Assume that fixing alignment can always be done regardless
-+         of the current domain. */
-+      if (__adeos_pipeline_trap(ADEOS_ALIGNMENT_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-       /* Operand address was bad */   
-       if (fixed == -EFAULT) {
-               if (user_mode(regs)) {
-@@ -577,6 +632,11 @@
-       int err;
-       siginfo_t info;
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_ALTASSIST_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       if (!user_mode(regs)) {
-               printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
-                      " at %lx\n", regs->nip);
-@@ -618,6 +678,10 @@
-  */
- void unrecoverable_exception(struct pt_regs *regs)
- {
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_pipeline_trap(ADEOS_NREC_TRAP,regs))
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-       printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
-              regs->trap, regs->nip);
-       die("Unrecoverable exception", regs, SIGABRT);
-diff -Nru linux-2.6.10/arch/ppc64/mm/fault.c 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/mm/fault.c
---- linux-2.6.10/arch/ppc64/mm/fault.c 2004-12-24 23:35:23.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/mm/fault.c  2005-10-05 
10:34:53.000000000 +0300
-@@ -95,6 +95,12 @@
- 
-       BUG_ON((trap == 0x380) || (trap == 0x480));
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      if (__adeos_event_monitors[ADEOS_ACCESS_TRAP] > 0 &&
-+          __adeos_handle_event(ADEOS_ACCESS_TRAP,regs) != 0)
-+          return 0;
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       if (trap == 0x300) {
-               if (debugger_fault_handler(regs))
-                       return 0;
-diff -Nru linux-2.6.10/arch/ppc64/mm/hash_native.c 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/mm/hash_native.c
---- linux-2.6.10/arch/ppc64/mm/hash_native.c   2004-12-24 23:34:30.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/mm/hash_native.c    2005-10-05 
10:57:19.000000000 +0300
-@@ -278,7 +278,7 @@
-       if (large)
-               avpn &= ~0x1UL;
- 
--      local_irq_save(flags);
-+      adeos_hw_local_irq_save(flags);
-       native_lock_hpte(hptep);
- 
-       dw0 = hptep->dw0.dw0;
-@@ -301,7 +301,7 @@
-               if (lock_tlbie)
-                       spin_unlock(&native_tlbie_lock);
-       }
--      local_irq_restore(flags);
-+      adeos_hw_local_irq_restore(flags);
- }
- 
- static void native_flush_hash_range(unsigned long context,
-@@ -316,7 +316,7 @@
-       /* XXX fix for large ptes */
-       unsigned long large = 0;
- 
--      local_irq_save(flags);
-+      adeos_hw_local_irq_save(flags);
- 
-       j = 0;
-       for (i = 0; i < number; i++) {
-@@ -384,7 +384,7 @@
-                       spin_unlock(&native_tlbie_lock);
-       }
- 
--      local_irq_restore(flags);
-+      adeos_hw_local_irq_restore(flags);
- }
- 
- #ifdef CONFIG_PPC_PSERIES
-diff -Nru linux-2.6.10/arch/ppc64/mm/init.c 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/mm/init.c
---- linux-2.6.10/arch/ppc64/mm/init.c  2004-12-24 23:34:58.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/mm/init.c   2005-10-05 
10:57:59.000000000 +0300
-@@ -859,14 +859,14 @@
- 
-       vsid = get_vsid(vma->vm_mm->context.id, ea);
- 
--      local_irq_save(flags);
-+      adeos_hw_local_irq_save(flags);
-       tmp = cpumask_of_cpu(smp_processor_id());
-       if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
-               local = 1;
- 
-       __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
-                   0x300, local);
--      local_irq_restore(flags);
-+      adeos_hw_local_irq_restore(flags);
- }
- 
- void __iomem * reserve_phb_iospace(unsigned long size)
-diff -Nru linux-2.6.10/arch/ppc64/mm/slb.c 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/mm/slb.c
---- linux-2.6.10/arch/ppc64/mm/slb.c   2004-12-24 23:34:57.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/mm/slb.c    2005-10-05 
11:00:53.000000000 +0300
-@@ -83,6 +83,9 @@
-       unsigned long pc = KSTK_EIP(tsk);
-       unsigned long stack = KSTK_ESP(tsk);
-       unsigned long unmapped_base;
-+      unsigned long flags;
-+
-+      adeos_hw_local_irq_save(flags);
- 
-       if (offset <= SLB_CACHE_ENTRIES) {
-               int i;
-@@ -115,24 +118,35 @@
-       else
-               unmapped_base = TASK_UNMAPPED_BASE_USER64;
- 
--      if (pc >= KERNELBASE)
-+      if (pc >= KERNELBASE) {
-+              adeos_hw_local_irq_restore(flags);
-               return;
-+      }
-       slb_allocate(pc);
- 
--      if (GET_ESID(pc) == GET_ESID(stack))
-+      if (GET_ESID(pc) == GET_ESID(stack)) {
-+              adeos_hw_local_irq_restore(flags);
-               return;
-+      }
- 
--      if (stack >= KERNELBASE)
-+      if (stack >= KERNELBASE) {
-+              adeos_hw_local_irq_restore(flags);
-               return;
-+      }
-       slb_allocate(stack);
- 
-       if ((GET_ESID(pc) == GET_ESID(unmapped_base))
--          || (GET_ESID(stack) == GET_ESID(unmapped_base)))
-+          || (GET_ESID(stack) == GET_ESID(unmapped_base))) {
-+              adeos_hw_local_irq_restore(flags);
-               return;
-+      }
- 
--      if (unmapped_base >= KERNELBASE)
-+      if (unmapped_base >= KERNELBASE) {
-+              adeos_hw_local_irq_restore(flags);
-               return;
-+      }
-       slb_allocate(unmapped_base);
-+      adeos_hw_local_irq_restore(flags);
- }
- 
- void slb_initialize(void)
-diff -Nru linux-2.6.10/arch/ppc64/mm/slb.c.orig 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/mm/slb.c.orig
---- linux-2.6.10/arch/ppc64/mm/slb.c.orig      1970-01-01 02:00:00.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/mm/slb.c.orig       2005-10-05 
10:59:55.000000000 +0300
-@@ -0,0 +1,163 @@
-+/*
-+ * PowerPC64 SLB support.
-+ *
-+ * Copyright (C) 2004 David Gibson <[EMAIL PROTECTED]>, IBM
-+ * Based on earlier code writteh by:
-+ * Dave Engebretsen and Mike Corrigan {engebret|[EMAIL PROTECTED]
-+ *    Copyright (c) 2001 Dave Engebretsen
-+ * Copyright (C) 2002 Anton Blanchard <[EMAIL PROTECTED]>, IBM
-+ *
-+ *
-+ *      This program is free software; you can redistribute it and/or
-+ *      modify it under the terms of the GNU General Public License
-+ *      as published by the Free Software Foundation; either version
-+ *      2 of the License, or (at your option) any later version.
-+ */
-+
-+#include <linux/config.h>
-+#include <asm/pgtable.h>
-+#include <asm/mmu.h>
-+#include <asm/mmu_context.h>
-+#include <asm/paca.h>
-+#include <asm/naca.h>
-+#include <asm/cputable.h>
-+
-+extern void slb_allocate(unsigned long ea);
-+
-+static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
-+{
-+      return (ea & ESID_MASK) | SLB_ESID_V | slot;
-+}
-+
-+static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long 
flags)
-+{
-+      return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
-+}
-+
-+static inline void create_slbe(unsigned long ea, unsigned long vsid,
-+                             unsigned long flags, unsigned long entry)
-+{
-+      asm volatile("slbmte  %0,%1" :
-+                   : "r" (mk_vsid_data(ea, flags)),
-+                     "r" (mk_esid_data(ea, entry))
-+                   : "memory" );
-+}
-+
-+static void slb_flush_and_rebolt(void)
-+{
-+      /* If you change this make sure you change SLB_NUM_BOLTED
-+       * appropriately too. */
-+      unsigned long ksp_flags = SLB_VSID_KERNEL;
-+      unsigned long ksp_esid_data;
-+
-+      WARN_ON(!irqs_disabled());
-+
-+      if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
-+              ksp_flags |= SLB_VSID_L;
-+
-+      ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
-+      if ((ksp_esid_data & ESID_MASK) == KERNELBASE)
-+              ksp_esid_data &= ~SLB_ESID_V;
-+
-+      /* We need to do this all in asm, so we're sure we don't touch
-+       * the stack between the slbia and rebolting it. */
-+      asm volatile("isync\n"
-+                   "slbia\n"
-+                   /* Slot 1 - first VMALLOC segment */
-+                   "slbmte    %0,%1\n"
-+                   /* Slot 2 - kernel stack */
-+                   "slbmte    %2,%3\n"
-+                   "isync"
-+                   :: "r"(mk_vsid_data(VMALLOCBASE, SLB_VSID_KERNEL)),
-+                      "r"(mk_esid_data(VMALLOCBASE, 1)),
-+                      "r"(mk_vsid_data(ksp_esid_data, ksp_flags)),
-+                      "r"(ksp_esid_data)
-+                   : "memory");
-+}
-+
-+/* Flush all user entries from the segment table of the current processor. */
-+void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
-+{
-+      unsigned long offset = get_paca()->slb_cache_ptr;
-+      unsigned long esid_data;
-+      unsigned long pc = KSTK_EIP(tsk);
-+      unsigned long stack = KSTK_ESP(tsk);
-+      unsigned long unmapped_base;
-+
-+      if (offset <= SLB_CACHE_ENTRIES) {
-+              int i;
-+              asm volatile("isync" : : : "memory");
-+              for (i = 0; i < offset; i++) {
-+                      esid_data = (unsigned long)get_paca()->slb_cache[i]
-+                              << SID_SHIFT;
-+                      asm volatile("slbie %0" : : "r" (esid_data));
-+              }
-+              asm volatile("isync" : : : "memory");
-+      } else {
-+              slb_flush_and_rebolt();
-+      }
-+
-+      /* Workaround POWER5 < DD2.1 issue */
-+      if (offset == 1 || offset > SLB_CACHE_ENTRIES) {
-+              /* flush segment in EEH region, we shouldn't ever
-+               * access addresses in this region. */
-+              asm volatile("slbie %0" : : "r"(EEHREGIONBASE));
-+      }
-+
-+      get_paca()->slb_cache_ptr = 0;
-+      get_paca()->context = mm->context;
-+
-+      /*
-+       * preload some userspace segments into the SLB.
-+       */
-+      if (test_tsk_thread_flag(tsk, TIF_32BIT))
-+              unmapped_base = TASK_UNMAPPED_BASE_USER32;
-+      else
-+              unmapped_base = TASK_UNMAPPED_BASE_USER64;
-+
-+      if (pc >= KERNELBASE)
-+              return;
-+      slb_allocate(pc);
-+
-+      if (GET_ESID(pc) == GET_ESID(stack))
-+              return;
-+
-+      if (stack >= KERNELBASE)
-+              return;
-+      slb_allocate(stack);
-+
-+      if ((GET_ESID(pc) == GET_ESID(unmapped_base))
-+          || (GET_ESID(stack) == GET_ESID(unmapped_base)))
-+              return;
-+
-+      if (unmapped_base >= KERNELBASE)
-+              return;
-+      slb_allocate(unmapped_base);
-+}
-+
-+void slb_initialize(void)
-+{
-+      /* On iSeries the bolted entries have already been set up by
-+       * the hypervisor from the lparMap data in head.S */
-+#ifndef CONFIG_PPC_ISERIES
-+      unsigned long flags = SLB_VSID_KERNEL;
-+
-+      /* Invalidate the entire SLB (even slot 0) & all the ERATS */
-+      if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
-+              flags |= SLB_VSID_L;
-+
-+      asm volatile("isync":::"memory");
-+      asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
-+      asm volatile("isync; slbia; isync":::"memory");
-+      create_slbe(KERNELBASE, get_kernel_vsid(KERNELBASE), flags, 0);
-+      create_slbe(VMALLOCBASE, get_kernel_vsid(KERNELBASE),
-+                  SLB_VSID_KERNEL, 1);
-+      /* We don't bolt the stack for the time being - we're in boot,
-+       * so the stack is in the bolted segment.  By the time it goes
-+       * elsewhere, we'll call _switch() which will bolt in the new
-+       * one. */
-+      asm volatile("isync":::"memory");
-+#endif
-+
-+      get_paca()->stab_rr = SLB_NUM_BOLTED;
-+}
-diff -Nru linux-2.6.10/arch/ppc64/mm/tlb.c 
linux-2.6.10-adeos-ppc64-r2/arch/ppc64/mm/tlb.c
---- linux-2.6.10/arch/ppc64/mm/tlb.c   2004-12-24 23:34:45.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/arch/ppc64/mm/tlb.c    2005-10-05 
10:34:53.000000000 +0300
-@@ -122,7 +122,11 @@
-       cpumask_t tmp;
-       int local = 0;
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      BUG_ON(adp_current==adp_root && in_interrupt());
-+#else /* !CONFIG_ADEOS_CORE */
-       BUG_ON(in_interrupt());
-+#endif /* CONFIG_ADEOS_CORE */
- 
-       cpu = get_cpu();
-       i = batch->index;
-diff -Nru linux-2.6.10/Documentation/adeos.txt 
linux-2.6.10-adeos-ppc64-r2/Documentation/adeos.txt
---- linux-2.6.10/Documentation/adeos.txt       1970-01-01 02:00:00.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/Documentation/adeos.txt        2005-10-05 
10:34:53.000000000 +0300
-@@ -0,0 +1,176 @@
-+
-+The Adeos nanokernel is based on research and publications made in the
-+early '90s on the subject of nanokernels. Our basic method was to
-+reverse the approach described in most of the papers on the subject.
-+Instead of first building the nanokernel and then building the client
-+OSes, we started from a live and known-to-be-functional OS, Linux, and
-+inserted a nanokernel beneath it. Starting from Adeos, other client
-+OSes can now be put side-by-side with the Linux kernel.
-+
-+To this end, Adeos enables multiple domains to exist simultaneously on
-+the same hardware. None of these domains see each other, but all of
-+them see Adeos. A domain is most probably a complete OS, but there is
-+no assumption being made regarding the sophistication of what's in
-+a domain.
-+
-+To share the hardware among the different OSes, Adeos implements an
-+interrupt pipeline (ipipe). Every OS domain has an entry in the ipipe.
-+Each interrupt that comes in the ipipe is passed on to every domain
-+in the ipipe. Instead of disabling/enabling interrupts, each domain
-+in the pipeline only needs to stall/unstall his pipeline stage. If
-+an ipipe stage is stalled, then the interrupts do not progress in the
-+ipipe until that stage has been unstalled. Each stage of the ipipe
-+can, of course, decide to do a number of things with an interrupt.
-+Among other things, it can decide that it's the last recipient of the
-+interrupt. In that case, the ipipe does not propagate the interrupt
-+to the rest of the domains in the ipipe.
-+
-+Regardless of the operations being done in the ipipe, the Adeos code
-+does __not__ play with the interrupt masks. The only case where the
-+hardware masks are altered is during the addition/removal of a domain
-+from the ipipe. This also means that no OS is allowed to use the real
-+hardware cli/sti. But this is OK, since the stall/unstall calls
-+achieve the same functionality.
-+
-+Our approach is based on the following papers (links to these
-+papers are provided at the bottom of this message):
-+[1] D. Probert, J. Bruno, and M. Karzaorman. "Space: a new approach to
-+operating system abstraction." In: International Workshop on Object
-+Orientation in Operating Systems, pages 133-137, October 1991.
-+[2] D. Probert, J. Bruno. "Building fundamentally extensible application-
-+specific operating systems in Space", March 1995.
-+[3] D. Cheriton, K. Duda. "A caching model of operating system kernel
-+functionality". In: Proc. Symp. on Operating Systems Design and
-+Implementation, pages 179-194, Monterey CA (USA), 1994.
-+[4] D. Engler, M. Kaashoek, and J. O'Toole Jr. "Exokernel: an operating
-+system architecture for application-specific resource management",
-+December 1995.
-+
-+If you don't want to go fetch the complete papers, here's a summary.
-+The first 2 discuss the Space nanokernel, the 3rd discussed the cache
-+nanokernel, and the last discusses exokernel.
-+
-+The complete Adeos approach has been thoroughly documented in a whitepaper
-+published more than a year ago entitled "Adaptive Domain Environment
-+for Operating Systems" and available here: http://www.opersys.com/adeos
-+The current implementation is slightly different. Mainly, we do not
-+implement the functionality to move Linux out of ring 0. Although of
-+interest, this approach is not very portable.
-+
-+Instead, our patch taps right into Linux's main source of control
-+over the hardware, the interrupt dispatching code, and inserts an
-+interrupt pipeline which can then serve all the nanokernel's clients,
-+including Linux.
-+
-+This is not a novelty in itself. Other OSes have been modified in such
-+a way for a wide range of purposes. One of the most interesting
-+examples is described by Stodolsky, Chen, and Bershad in a paper
-+entitled "Fast Interrupt Priority Management in Operating System
-+Kernels" published in 1993 as part of the Usenix Microkernels and
-+Other Kernel Architectures Symposium. In that case, cli/sti were
-+replaced by virtual cli/sti which did not modify the real interrupt
-+mask in any way. Instead, interrupts were defered and delivered to
-+the OS upon a call to the virtualized sti.
-+
-+Mainly, this resulted in increased performance for the OS. Although
-+we haven't done any measurements on Linux's interrupt handling
-+performance with Adeos, our nanokernel includes by definition the
-+code implementing the technique described in the abovementioned
-+Stodolsky paper, which we use to redirect the hardware interrupt flow
-+to the pipeline.
-+
-+i386 and armnommu are currently supported. Most of the
-+architecture-dependent code is easily portable to other architectures.
-+
-+Aside of adding the Adeos module (driver/adeos), we also modified some
-+files to tap into Linux interrupt and system event dispatching (all
-+the modifications are encapsulated in #ifdef CONFIG_ADEOS_*/#endif).
-+
-+We modified the idle task so it gives control back to Adeos in order for
-+the ipipe to continue propagation.
-+
-+We modified init/main.c to initialize Adeos very early in the startup.
-+
-+Of course, we also added the appropriate makefile modifications and
-+config options so that you can choose to enable/disable Adeos as
-+part of the kernel build configuration.
-+
-+Adeos' public API is fully documented here:
-+http://www.freesoftware.fsf.org/adeos/doc/api/index.html.
-+
-+In Linux's case, adeos_register_domain() is called very early during
-+system startup.
-+
-+To add your domain to the ipipe, you need to:
-+1) Register your domain with Adeos using adeos_register_domain()
-+2) Call adeos_virtualize_irq() for all the IRQs you wish to be
-+notified about in the ipipe.
-+
-+That's it. Provided you gave Adeos appropriate handlers in step
-+#2, your interrupts will be delivered via the ipipe.
-+
-+During runtime, you may change your position in the ipipe using
-+adeos_renice_domain(). You may also stall/unstall the pipeline
-+and change the ipipe's handling of the interrupts according to your
-+needs.
-+
-+Adeos supports SMP, and APIC support on UP.
-+
-+Here are some of the possible uses for Adeos (this list is far
-+from complete):
-+1) Much like User-Mode Linux, it should now be possible to have 2
-+Linux kernels living side-by-side on the same hardware. In contrast
-+to UML, this would not be 2 kernels one ontop of the other, but
-+really side-by-side. Since Linux can be told at boot time to use
-+only one portion of the available RAM, on a 128MB machine this
-+would mean that the first could be made to use the 0-64MB space and
-+the second would use the 64-128MB space. We realize that many
-+modifications are required. Among other things, one of the 2 kernels
-+will not need to conduct hardware initialization. Nevertheless, this
-+possibility should be studied closer.
-+
-+2) It follows from #1 that adding other kernels beside Linux should
-+be feasible. BSD is a prime candidate, but it would also be nice to
-+see what virtualizers such as VMWare and Plex86 could do with Adeos.
-+Proprietary operating systems could potentially also be accomodated.
-+
-+3) All the previous work that has been done on nanokernels should now
-+be easily ported to Linux. Mainly, we would be very interested to
-+hear about extensions to Adeos. Primarily, we have no mechanisms
-+currently enabling multiple domains to share information. The papers
-+mentioned earlier provide such mechanisms, but we'd like to see
-+actual practical examples.
-+
-+4) Kernel debuggers' main problem (tapping into the kernel's
-+interrupts) is solved and it should then be possible to provide
-+patchless kernel debuggers. They would then become loadable kernel
-+modules.
-+
-+5) Drivers who require absolute priority and dislike other kernel
-+portions who use cli/sti can now create a domain of their own
-+and place themselves before Linux in the ipipe. This provides a
-+mechanism for the implementation of systems that can provide guaranteed
-+realtime response.
-+
-+Philippe Gerum <[EMAIL PROTECTED]>
-+Karim Yaghmour <[EMAIL PROTECTED]>
-+
-+----------------------------------------------------------------------
-+Links to papers:
-+1-
-+http://citeseer.nj.nec.com/probert91space.html
-+ftp://ftp.cs.ucsb.edu/pub/papers/space/iwooos91.ps.gz (not working)
-+http://www4.informatik.uni-erlangen.de/~tsthiel/Papers/Space-iwooos91.ps.gz
-+
-+2-
-+http://www.cs.ucsb.edu/research/trcs/abstracts/1995-06.shtml
-+http://www4.informatik.uni-erlangen.de/~tsthiel/Papers/Space-trcs95-06.ps.gz
-+
-+3-
-+http://citeseer.nj.nec.com/kenneth94caching.html
-+http://guir.cs.berkeley.edu/projects/osprelims/papers/cachmodel-OSkernel.ps.gz
-+
-+4-
-+http://citeseer.nj.nec.com/engler95exokernel.html
-+ftp://ftp.cag.lcs.mit.edu/multiscale/exokernel.ps.Z
-+----------------------------------------------------------------------
-diff -Nru linux-2.6.10/include/asm-ppc64/adeos.h 
linux-2.6.10-adeos-ppc64-r2/include/asm-ppc64/adeos.h
---- linux-2.6.10/include/asm-ppc64/adeos.h     1970-01-01 02:00:00.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/include/asm-ppc64/adeos.h      2005-10-05 
17:54:03.000000000 +0300
-@@ -0,0 +1,444 @@
-+/*
-+ *   include/asm-ppc64/adeos.h
-+ *
-+ *   Adeos 64-bit PowerPC adoption
-+ *   Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
-+ *   based on previous work:
-+ *     
-+ *   Copyright (C) 2004 Philippe Gerum.
-+ *
-+ *   Adeos/PPC port over 2.6 based on the previous 2.4 implementation by:
-+ *
-+ *   Copyright (C) 2004 Wolfgang Grandegger.
-+ *
-+ *   It follows closely the ARM and x86 ports of ADEOS.
-+ *
-+ *   Copyright (C) 2002 Philippe Gerum.
-+ *
-+ *   This program is free software; you can redistribute it and/or modify
-+ *   it under the terms of the GNU General Public License as published by
-+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ *   USA; either version 2 of the License, or (at your option) any later
-+ *   version.
-+ *
-+ *   This program is distributed in the hope that it will be useful,
-+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *   GNU General Public License for more details.
-+ *
-+ *   You should have received a copy of the GNU General Public License
-+ *   along with this program; if not, write to the Free Software
-+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, 
USA.
-+ */
-+
-+#ifndef __PPC64_ADEOS_H
-+#define __PPC64_ADEOS_H
-+
-+#include <asm/ptrace.h>
-+#include <asm/irq.h>
-+#include <asm/bitops.h>
-+#include <linux/list.h>
-+#include <linux/cpumask.h>
-+#include <linux/threads.h>
-+
-+#define ADEOS_ARCH_STRING     "r2/ppc64"
-+#define ADEOS_MAJOR_NUMBER    2
-+#define ADEOS_MINOR_NUMBER    255
-+
-+#define ADEOS_IRQ_ACKED               0x1000
-+#define ADEOS_IRQ_ACKED_MASK  (ADEOS_IRQ_ACKED - 1)
-+
-+#ifdef CONFIG_SMP
-+
-+#error "Adeos/ppc64: SMP not yet implemented"
-+
-+#define ADEOS_NR_CPUS          NR_CPUS
-+#define ADEOS_CRITICAL_IPI     0
-+
-+#define adeos_processor_id()   (__adeos_current_threadinfo()->cpu)
-+
-+#define adeos_declare_cpuid    int cpuid
-+#define adeos_load_cpuid()     do { \
-+                                  (cpuid) = adeos_processor_id();     \
-+                               } while(0)
-+#define adeos_lock_cpu(flags)  do { \
-+                                  adeos_hw_local_irq_save(flags); \
-+                                  (cpuid) = adeos_processor_id(); \
-+                               } while(0)
-+#define adeos_unlock_cpu(flags) adeos_hw_local_irq_restore(flags)
-+#define adeos_get_cpu(flags)    adeos_lock_cpu(flags)
-+#define adeos_put_cpu(flags)    adeos_unlock_cpu(flags)
-+#define adp_current             (adp_cpu_current[adeos_processor_id()])
-+
-+#else  /* !CONFIG_SMP */
-+
-+#define ADEOS_NR_CPUS          1
-+#define adeos_processor_id()   0
-+/* Array references using this index should be optimized out. */
-+#define adeos_declare_cpuid    const int cpuid = 0
-+#define adeos_load_cpuid()      /* nop */
-+#define adeos_lock_cpu(flags)   adeos_hw_local_irq_save(flags)
-+#define adeos_unlock_cpu(flags) adeos_hw_local_irq_restore(flags)
-+#define adeos_get_cpu(flags)    do { flags = flags; } while(0)
-+#define adeos_put_cpu(flags)    /* nop */
-+#define adp_current             (adp_cpu_current[0])
-+
-+#endif /* CONFIG_SMP */
-+
-+ /* PPC traps */
-+#define ADEOS_ACCESS_TRAP     0       /* Data or instruction access exception 
*/
-+#define ADEOS_ALIGNMENT_TRAP  1       /* Alignment exception */
-+#define ADEOS_ALTUNAVAIL_TRAP 2       /* Altivec unavailable */
-+#define ADEOS_PCE_TRAP        3       /* Program check exception */
-+#define ADEOS_MCE_TRAP        4       /* Machine check exception */
-+#define ADEOS_UNKNOWN_TRAP    5       /* Unknown exception */
-+#define ADEOS_IABR_TRAP       6       /* Instruction breakpoint */
-+#define ADEOS_SSTEP_TRAP      7       /* Single-step exception  */
-+#define ADEOS_NREC_TRAP       8       /* Non-recoverable exception  */
-+#define ADEOS_ALTASSIST_TRAP  9 /* Altivec assist exception */
-+#define ADEOS_SYSRESET_TRAP   10 /* System reset exception */
-+#define ADEOS_KFPUNAVAIL_TRAP 11 /* Kernel FP Unavailable exception */
-+#define ADEOS_PERFMON_TRAP    12 /* Performance Monitor exception */
-+#define ADEOS_NR_FAULTS       13
-+/* Pseudo-vectors used for kernel events */
-+#define ADEOS_FIRST_KEVENT      ADEOS_NR_FAULTS
-+#define ADEOS_SYSCALL_PROLOGUE  (ADEOS_FIRST_KEVENT)
-+#define ADEOS_SYSCALL_EPILOGUE  (ADEOS_FIRST_KEVENT + 1)
-+#define ADEOS_SCHEDULE_HEAD     (ADEOS_FIRST_KEVENT + 2)
-+#define ADEOS_SCHEDULE_TAIL     (ADEOS_FIRST_KEVENT + 3)
-+#define ADEOS_ENTER_PROCESS     (ADEOS_FIRST_KEVENT + 4)
-+#define ADEOS_EXIT_PROCESS      (ADEOS_FIRST_KEVENT + 5)
-+#define ADEOS_SIGNAL_PROCESS    (ADEOS_FIRST_KEVENT + 6)
-+#define ADEOS_KICK_PROCESS      (ADEOS_FIRST_KEVENT + 7)
-+#define ADEOS_RENICE_PROCESS    (ADEOS_FIRST_KEVENT + 8)
-+#define ADEOS_USER_EVENT        (ADEOS_FIRST_KEVENT + 9)
-+#define ADEOS_LAST_KEVENT       (ADEOS_USER_EVENT)
-+
-+#define ADEOS_NR_EVENTS         (ADEOS_LAST_KEVENT + 1)
-+
-+typedef struct adevinfo {
-+
-+    unsigned domid;
-+    unsigned event;
-+    void *evdata;
-+
-+    volatile int propagate;   /* Private */
-+
-+} adevinfo_t;
-+
-+typedef struct adsysinfo {
-+
-+    int ncpus;                        /* Number of CPUs on board */
-+
-+    u64 cpufreq;              /* CPU frequency (in Hz) */
-+
-+    /* Arch-dependent block */
-+
-+    struct {
-+      unsigned tmirq;         /* Decrementer virtual IRQ */
-+      u64 tmfreq;             /* Timebase frequency */
-+    } archdep;
-+
-+} adsysinfo_t;
-+
-+#define IPIPE_NR_XIRQS   NR_IRQS
-+/* Number of virtual IRQs */
-+#define IPIPE_NR_VIRQS   BITS_PER_LONG
-+/* First virtual IRQ # */
-+#define IPIPE_VIRQ_BASE  (((IPIPE_NR_XIRQS + BITS_PER_LONG - 1) / 
BITS_PER_LONG) * BITS_PER_LONG)
-+/* Total number of IRQ slots */
-+#define IPIPE_NR_IRQS     (IPIPE_VIRQ_BASE + IPIPE_NR_VIRQS)
-+/* Number of indirect words needed to map the whole IRQ space. */
-+#define IPIPE_IRQ_IWORDS  ((IPIPE_NR_IRQS + BITS_PER_LONG - 1) / 
BITS_PER_LONG)
-+#define IPIPE_IRQ_IMASK   (BITS_PER_LONG - 1)
-+#define IPIPE_IRQ_ISHIFT  6   /* 2^6 for 64bits arch. */
-+
-+#define IPIPE_IRQMASK_ANY   (~0L)
-+#define IPIPE_IRQMASK_VIRT  (IPIPE_IRQMASK_ANY << (IPIPE_VIRQ_BASE / 
BITS_PER_LONG))
-+
-+/* The first virtual interrupt is reserved for the timer (see
-+   __adeos_init_platform). */
-+#define ADEOS_TIMER_VIRQ    IPIPE_VIRQ_BASE
-+
-+typedef struct adomain {
-+
-+    /* -- Section: offset-based references are made on these fields
-+       from inline assembly code. Please don't move or reorder. */
-+#ifdef CONFIG_ADEOS_THREADS
-+    unsigned long esp[ADEOS_NR_CPUS]; /* Domain stack pointers */
-+#endif /* CONFIG_ADEOS_THREADS */
-+    void (*dswitch)(void);    /* Domain switch hook */
-+    /* -- End of section. */
-+
-+    struct list_head p_link;  /* Link in pipeline */
-+
-+    struct adcpudata {
-+      unsigned long status;
-+      unsigned long irq_pending_hi;
-+      unsigned long irq_pending_lo[IPIPE_IRQ_IWORDS];
-+      unsigned irq_hits[IPIPE_NR_IRQS];
-+#ifdef CONFIG_ADEOS_THREADS
-+      adevinfo_t event_info;
-+#endif /* CONFIG_ADEOS_THREADS */
-+    } cpudata[ADEOS_NR_CPUS];
-+
-+    struct {
-+      int (*acknowledge)(unsigned irq);
-+      void (*handler)(unsigned irq);
-+      unsigned long control;
-+    } irqs[IPIPE_NR_IRQS];
-+
-+    struct {
-+      void (*handler)(adevinfo_t *evinfo);
-+    } events[ADEOS_NR_EVENTS];
-+
-+    struct adomain *m_link;   /* Link in mutex sleep queue */
-+
-+    unsigned long flags;
-+
-+    unsigned domid;
-+
-+    const char *name;
-+
-+    int priority;
-+
-+    int ptd_keymax;
-+    int ptd_keycount;
-+    unsigned long ptd_keymap;
-+    void (*ptd_setfun)(int, void *);
-+    void *(*ptd_getfun)(int);
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+    unsigned long estackbase[ADEOS_NR_CPUS];
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+} adomain_t;
-+
-+/* The following macros must be used hw interrupts off. */
-+
-+#define __adeos_set_irq_bit(adp,cpuid,irq) \
-+do { \
-+    if (!test_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control)) { \
-+        __set_bit(irq & 
IPIPE_IRQ_IMASK,&(adp)->cpudata[cpuid].irq_pending_lo[irq >> 
IPIPE_IRQ_ISHIFT]); \
-+        __set_bit(irq >> 
IPIPE_IRQ_ISHIFT,&(adp)->cpudata[cpuid].irq_pending_hi); \
-+       } \
-+} while(0)
-+
-+#define __adeos_clear_pend(adp,cpuid,irq) \
-+do { \
-+    __clear_bit(irq & 
IPIPE_IRQ_IMASK,&(adp)->cpudata[cpuid].irq_pending_lo[irq >> 
IPIPE_IRQ_ISHIFT]); \
-+    if ((adp)->cpudata[cpuid].irq_pending_lo[irq >> IPIPE_IRQ_ISHIFT] == 0) \
-+        __clear_bit(irq >> 
IPIPE_IRQ_ISHIFT,&(adp)->cpudata[cpuid].irq_pending_hi); \
-+} while(0)
-+
-+#define __adeos_lock_irq(adp,cpuid,irq) \
-+do { \
-+    if (!test_and_set_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control)) \
-+      __adeos_clear_pend(adp,cpuid,irq); \
-+} while(0)
-+
-+#define __adeos_unlock_irq(adp,irq) \
-+do { \
-+    if (test_and_clear_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control)) { \
-+        int __cpuid, __nr_cpus = num_online_cpus();         \
-+      for (__cpuid = 0; __cpuid < __nr_cpus; __cpuid++)      \
-+         if ((adp)->cpudata[__cpuid].irq_hits[irq] > 0) { /* We need atomic 
ops next. */ \
-+           set_bit(irq & 
IPIPE_IRQ_IMASK,&(adp)->cpudata[__cpuid].irq_pending_lo[irq >> 
IPIPE_IRQ_ISHIFT]); \
-+           set_bit(irq >> 
IPIPE_IRQ_ISHIFT,&(adp)->cpudata[__cpuid].irq_pending_hi); \
-+         } \
-+    } \
-+} while(0)
-+
-+#define __adeos_clear_irq(adp,irq) \
-+do { \
-+    int __cpuid, __nr_cpus = num_online_cpus(); \
-+    clear_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control); \
-+    for (__cpuid = 0; __cpuid < __nr_cpus; __cpuid++) {       \
-+       (adp)->cpudata[__cpuid].irq_hits[irq] = 0; \
-+       __adeos_clear_pend(adp,__cpuid,irq); \
-+    } \
-+} while(0)
-+
-+#define adeos_virtual_irq_p(irq) ((irq) >= IPIPE_VIRQ_BASE && \
-+                                (irq) < IPIPE_NR_IRQS)
-+
-+static inline void adeos_hw_local_irq_save_ptr(unsigned long *flags)
-+{
-+    unsigned long msr;
-+    msr = mfmsr();
-+    *flags = msr;
-+    __mtmsrd(msr & ~MSR_EE, 1);
-+    __asm__ __volatile__("": : :"memory");
-+}
-+
-+#define adeos_hw_local_irq_save_flags(flags) 
adeos_hw_local_irq_save_ptr(&(flags))
-+#define adeos_hw_local_irq_restore(flags)    do { \
-+      __asm__ __volatile__("": : :"memory"); \
-+      __mtmsrd((flags), 1); \
-+} while(0)
-+
-+static inline void adeos_hw_local_irq_disable(void)
-+{
-+    unsigned long msr;
-+    msr = mfmsr();
-+    __mtmsrd(msr & ~MSR_EE, 1);
-+    __asm__ __volatile__("": : :"memory");
-+}
-+
-+static inline void adeos_hw_local_irq_enable(void)
-+{
-+    unsigned long msr;
-+    __asm__ __volatile__("": : :"memory");
-+    msr = mfmsr();
-+    __mtmsrd(msr | MSR_EE, 1);
-+}
-+
-+#define adeos_hw_local_irq_save(flags) 
({adeos_hw_local_irq_save_flags(flags);adeos_hw_local_irq_disable();})
-+#define adeos_hw_save_flags_and_sti(flags) 
({adeos_hw_local_irq_save_flags(flags);adeos_hw_local_irq_enable();})
-+
-+#define adeos_hw_cli() adeos_hw_local_irq_disable()
-+#define adeos_hw_sti() adeos_hw_local_irq_enable()
-+
-+#define adeos_hw_local_irq_flags(flags)       ((flags) = mfmsr())
-+#define adeos_hw_test_iflag(x)                ((x) & MSR_EE)
-+#define adeos_hw_irqs_disabled()      \
-+({                                    \
-+      unsigned long flags;            \
-+      adeos_hw_local_irq_flags(flags);\
-+      !adeos_hw_test_iflag(flags);    \
-+})
-+
-+#define adeos_hw_tsc(t) (t = mftb())
-+
-+extern unsigned long tb_ticks_per_jiffy;
-+
-+#define adeos_cpu_freq() (HZ * tb_ticks_per_jiffy)
-+
-+#define adeos_spin_lock(x)     _spin_lock(x)
-+#define adeos_spin_unlock(x)   _spin_unlock(x)
-+#define adeos_spin_trylock(x)  _spin_trylock(x)
-+#define adeos_write_lock(x)    _write_lock(x)
-+#define adeos_write_unlock(x)  _write_unlock(x)
-+#define adeos_write_trylock(x) _write_trylock(x)
-+#define adeos_read_lock(x)     _read_lock(x)
-+#define adeos_read_unlock(x)   _read_unlock(x)
-+#define raw_spinlock_t         spinlock_t
-+#define RAW_SPIN_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
-+#define raw_rwlock_t           rwlock_t
-+#define RAW_RW_LOCK_UNLOCKED   RW_LOCK_UNLOCKED
-+
-+#define spin_lock_irqsave_hw(lock,flags)      adeos_spin_lock_irqsave(lock, 
flags)
-+#define spin_unlock_irqrestore_hw(lock,flags) 
adeos_spin_unlock_irqrestore(lock, flags)
-+
-+#define adeos_spin_lock_irqsave(x,flags)  \
-+do { \
-+   adeos_hw_local_irq_save(flags); \
-+   adeos_spin_lock(x); \
-+} while (0)
-+
-+#define adeos_spin_unlock_irqrestore(x,flags)  \
-+do { \
-+   adeos_spin_unlock(x); \
-+   adeos_hw_local_irq_restore(flags); \
-+} while (0)
-+
-+#define adeos_spin_lock_disable(x)  \
-+do { \
-+   adeos_hw_cli(); \
-+   adeos_spin_lock(x); \
-+} while (0)
-+
-+#define adeos_spin_unlock_enable(x)  \
-+do { \
-+   adeos_spin_unlock(x); \
-+   adeos_hw_sti(); \
-+} while (0)
-+
-+#define adeos_read_lock_irqsave(lock, flags) \
-+do { \
-+   adeos_hw_local_irq_save(flags); \
-+   adeos_read_lock(lock); \
-+} while (0)
-+
-+#define adeos_read_unlock_irqrestore(lock, flags) \
-+do { \
-+   adeos_read_unlock(lock); \
-+   adeos_hw_local_irq_restore(flags); \
-+} while (0)
-+
-+#define adeos_write_lock_irqsave(lock, flags) \
-+do { \
-+   adeos_hw_local_irq_save(flags); \
-+   adeos_write_lock(lock); \
-+} while (0)
-+
-+#define adeos_write_unlock_irqrestore(lock, flags) \
-+do { \
-+   adeos_write_unlock(lock); \
-+   adeos_hw_local_irq_restore(flags); \
-+} while (0)
-+
-+/* Private interface -- Internal use only */
-+
-+struct adattr;
-+
-+void __adeos_init(void);
-+
-+void __adeos_init_domain(adomain_t *adp,
-+                       struct adattr *attr);
-+
-+void __adeos_cleanup_domain(adomain_t *adp);
-+
-+#define __adeos_check_platform() do { } while(0)
-+
-+#define __adeos_read_timebase() ({ unsigned long t; adeos_hw_tsc(t); t; })
-+
-+void __adeos_init_platform(void);
-+
-+void __adeos_enable_pipeline(void);
-+
-+void __adeos_disable_pipeline(void);
-+
-+void __adeos_init_stage(adomain_t *adp);
-+
-+void __adeos_sync_stage(unsigned long syncmask);
-+
-+int __adeos_ack_irq(unsigned irq);
-+
-+void __adeos_do_IRQ(int irq,
-+                  struct pt_regs *regs);
-+
-+void __adeos_do_timer(int irq,
-+                    struct pt_regs *regs);
-+
-+struct thread_info *__adeos_current_threadinfo(void);
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+
-+int __adeos_switch_domain(adomain_t *adp,
-+                        adomain_t **currentp);
-+
-+/* Called with hw interrupts off. */
-+static inline void __adeos_switch_to (adomain_t *out,
-+                                    adomain_t *in,
-+                                    int cpuid)
-+{
-+    extern adomain_t *adp_cpu_current[];
-+
-+    __adeos_switch_domain(in,&adp_cpu_current[cpuid]);
-+
-+    if (out->dswitch != NULL)
-+      out->dswitch();
-+}
-+
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+extern struct pt_regs __adeos_irq_regs;
-+
-+extern unsigned long __adeos_virtual_irq_map;
-+
-+extern unsigned long __adeos_decr_ticks;
-+
-+extern unsigned long __adeos_decr_next[];
-+
-+#endif /* !__PPC64_ADEOS_H */
-diff -Nru linux-2.6.10/include/asm-ppc64/hw_irq.h 
linux-2.6.10-adeos-ppc64-r2/include/asm-ppc64/hw_irq.h
---- linux-2.6.10/include/asm-ppc64/hw_irq.h    2004-12-24 23:35:40.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/include/asm-ppc64/hw_irq.h     2005-10-05 
10:34:53.000000000 +0300
-@@ -19,6 +19,37 @@
- int timer_interrupt(struct pt_regs *);
- extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
- 
-+#ifdef CONFIG_ADEOS_CORE
-+
-+void __adeos_stall_root(void);
-+void __adeos_unstall_root(void);
-+unsigned long __adeos_test_root(void);
-+unsigned long __adeos_test_and_stall_root(void);
-+void __adeos_restore_root(unsigned long flags);
-+
-+#define irqs_disabled()  __adeos_test_root()
-+
-+static inline void local_irq_disable(void) {
-+    __adeos_stall_root();
-+}
-+
-+static inline void local_irq_enable(void) {
-+    __adeos_unstall_root();
-+}
-+
-+static inline void local_irq_save_ptr(unsigned long *flags) {
-+    *flags = __adeos_test_and_stall_root();
-+}
-+
-+static inline void local_irq_restore(unsigned long flags) {
-+    __adeos_restore_root(flags);
-+}
-+
-+#define local_save_flags(flags)               ((flags) = __adeos_test_root())
-+#define local_irq_save(flags)         local_irq_save_ptr(&flags)
-+
-+#else /* !CONFIG_ADEOS_CORE */
-+
- #ifdef CONFIG_PPC_ISERIES
- 
- extern unsigned long local_get_flags(void);
-@@ -75,6 +106,8 @@
- 
- #endif /* CONFIG_PPC_ISERIES */
- 
-+#endif /* CONFIG_ADEOS_CORE */
-+
- #define mask_irq(irq)                                         \
-       ({                                                      \
-               irq_desc_t *desc = get_irq_desc(irq);           \
-diff -Nru linux-2.6.10/include/asm-ppc64/mmu_context.h 
linux-2.6.10-adeos-ppc64-r2/include/asm-ppc64/mmu_context.h
---- linux-2.6.10/include/asm-ppc64/mmu_context.h       2004-12-24 
23:34:31.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/include/asm-ppc64/mmu_context.h        
2005-10-05 10:34:53.000000000 +0300
-@@ -82,9 +82,17 @@
- {
-       unsigned long flags;
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      adeos_hw_local_irq_save(flags);
-+#else /* !CONFIG_ADEOS_CORE */
-       local_irq_save(flags);
-+#endif /* CONFIG_ADEOS_CORE */
-       switch_mm(prev, next, current);
-+#ifdef CONFIG_ADEOS_CORE
-+      adeos_hw_local_irq_restore(flags);
-+#else /* !CONFIG_ADEOS_CORE */
-       local_irq_restore(flags);
-+#endif /* CONFIG_ADEOS_CORE */
- }
- 
- /* VSID allocation
-diff -Nru linux-2.6.10/include/asm-ppc64/smp.h 
linux-2.6.10-adeos-ppc64-r2/include/asm-ppc64/smp.h
---- linux-2.6.10/include/asm-ppc64/smp.h       2004-12-24 23:33:47.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/include/asm-ppc64/smp.h        2005-10-05 
10:34:53.000000000 +0300
-@@ -37,8 +37,12 @@
- struct pt_regs;
- extern void smp_message_recv(int, struct pt_regs *);
- 
--
-+#ifdef CONFIG_ADEOS_CORE
-+#include <asm/adeos.h>
-+#define smp_processor_id() adeos_processor_id()
-+#else /* !CONFIG_ADEOS_CORE */
- #define smp_processor_id() (get_paca()->paca_index)
-+#endif /* CONFIG_ADEOS_CORE */
- #define hard_smp_processor_id() (get_paca()->hw_cpu_id)
- 
- extern cpumask_t cpu_sibling_map[NR_CPUS];
-diff -Nru linux-2.6.10/include/asm-ppc64/time.h 
linux-2.6.10-adeos-ppc64-r2/include/asm-ppc64/time.h
---- linux-2.6.10/include/asm-ppc64/time.h      2004-12-24 23:34:44.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/include/asm-ppc64/time.h       2005-10-05 
10:34:53.000000000 +0300
-@@ -23,6 +23,9 @@
- #include <asm/iSeries/HvCall.h>
- 
- /* time.c */
-+#ifdef CONFIG_ADEOS_CORE
-+extern unsigned long disarm_decr[NR_CPUS];
-+#endif /* CONFIG_ADEOS_CORE */
- extern unsigned long tb_ticks_per_jiffy;
- extern unsigned long tb_ticks_per_usec;
- extern unsigned long tb_ticks_per_sec;
-diff -Nru linux-2.6.10/include/linux/adeos.h 
linux-2.6.10-adeos-ppc64-r2/include/linux/adeos.h
---- linux-2.6.10/include/linux/adeos.h 1970-01-01 02:00:00.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/include/linux/adeos.h  2005-10-05 
10:34:53.000000000 +0300
-@@ -0,0 +1,553 @@
-+/*
-+ *   include/linux/adeos.h
-+ *
-+ *   Copyright (C) 2002,2003,2004 Philippe Gerum.
-+ *
-+ *   This program is free software; you can redistribute it and/or modify
-+ *   it under the terms of the GNU General Public License as published by
-+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ *   USA; either version 2 of the License, or (at your option) any later
-+ *   version.
-+ *
-+ *   This program is distributed in the hope that it will be useful,
-+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *   GNU General Public License for more details.
-+ *
-+ *   You should have received a copy of the GNU General Public License
-+ *   along with this program; if not, write to the Free Software
-+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, 
USA.
-+ */
-+
-+#ifndef __LINUX_ADEOS_H
-+#define __LINUX_ADEOS_H
-+
-+#include <linux/config.h>
-+
-+#ifdef CONFIG_ADEOS_CORE
-+
-+#include <linux/spinlock.h>
-+#include <asm/adeos.h>
-+
-+#define ADEOS_VERSION_PREFIX  "2.6"
-+#define ADEOS_VERSION_STRING  (ADEOS_VERSION_PREFIX ADEOS_ARCH_STRING)
-+#define ADEOS_RELEASE_NUMBER  
(0x02060000|((ADEOS_MAJOR_NUMBER&0xff)<<8)|(ADEOS_MINOR_NUMBER&0xff))
-+
-+#define ADEOS_ROOT_PRI       100
-+#define ADEOS_ROOT_ID        0
-+#define ADEOS_ROOT_NPTDKEYS  4        /* Must be <= 32 */
-+
-+#define ADEOS_RESET_TIMER  0x1
-+#define ADEOS_SAME_HANDLER ((void (*)(unsigned))(-1))
-+
-+/* Global domain flags */
-+#define ADEOS_SPRINTK_FLAG 0  /* Synchronous printk() allowed */
-+#define ADEOS_PPRINTK_FLAG 1  /* Asynchronous printk() request pending */
-+
-+/* Per-cpu pipeline flags.
-+   WARNING: some implementation might refer to those flags
-+   non-symbolically in assembly portions (e.g. x86). */
-+#define IPIPE_STALL_FLAG   0  /* Stalls a pipeline stage */
-+#define IPIPE_XPEND_FLAG   1  /* Exception notification is pending */
-+#define IPIPE_SLEEP_FLAG   2  /* Domain has self-suspended */
-+#define IPIPE_SYNC_FLAG    3  /* The interrupt syncer is running for the 
domain */
-+
-+#define IPIPE_HANDLE_FLAG    0
-+#define IPIPE_PASS_FLAG      1
-+#define IPIPE_ENABLE_FLAG    2
-+#define IPIPE_DYNAMIC_FLAG   IPIPE_HANDLE_FLAG
-+#define IPIPE_EXCLUSIVE_FLAG 3
-+#define IPIPE_STICKY_FLAG    4
-+#define IPIPE_SYSTEM_FLAG    5
-+#define IPIPE_LOCK_FLAG      6
-+#define IPIPE_SHARED_FLAG    7
-+#define IPIPE_CALLASM_FLAG   8        /* Arch-dependent -- might be unused. */
-+
-+#define IPIPE_HANDLE_MASK    (1 << IPIPE_HANDLE_FLAG)
-+#define IPIPE_PASS_MASK      (1 << IPIPE_PASS_FLAG)
-+#define IPIPE_ENABLE_MASK    (1 << IPIPE_ENABLE_FLAG)
-+#define IPIPE_DYNAMIC_MASK   IPIPE_HANDLE_MASK
-+#define IPIPE_EXCLUSIVE_MASK (1 << IPIPE_EXCLUSIVE_FLAG)
-+#define IPIPE_STICKY_MASK    (1 << IPIPE_STICKY_FLAG)
-+#define IPIPE_SYSTEM_MASK    (1 << IPIPE_SYSTEM_FLAG)
-+#define IPIPE_LOCK_MASK      (1 << IPIPE_LOCK_FLAG)
-+#define IPIPE_SHARED_MASK    (1 << IPIPE_SHARED_FLAG)
-+#define IPIPE_SYNC_MASK      (1 << IPIPE_SYNC_FLAG)
-+#define IPIPE_CALLASM_MASK   (1 << IPIPE_CALLASM_FLAG)
-+
-+#define IPIPE_DEFAULT_MASK  (IPIPE_HANDLE_MASK|IPIPE_PASS_MASK)
-+
-+typedef struct adattr {
-+
-+    unsigned domid;           /* Domain identifier -- Magic value set by 
caller */
-+    const char *name;         /* Domain name -- Warning: won't be dup'ed! */
-+    int priority;             /* Priority in interrupt pipeline */
-+    void (*entry)(int);               /* Domain entry point */
-+    int estacksz;             /* Stack size for entry context -- 0 means 
unspec */
-+    void (*dswitch)(void);    /* Handler called each time the domain is 
switched in */
-+    int nptdkeys;             /* Max. number of per-thread data keys */
-+    void (*ptdset)(int,void *);       /* Routine to set pt values */
-+    void *(*ptdget)(int);     /* Routine to get pt values */
-+
-+} adattr_t;
-+
-+typedef struct admutex {
-+
-+    raw_spinlock_t lock;
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+    adomain_t *sleepq, /* Pending domain queue */
-+            *owner;   /* Domain owning the mutex */
-+#ifdef CONFIG_SMP
-+    volatile int owncpu;
-+#define ADEOS_MUTEX_UNLOCKED { RAW_SPIN_LOCK_UNLOCKED, NULL, NULL, -1 }
-+#else  /* !CONFIG_SMP */
-+#define ADEOS_MUTEX_UNLOCKED { RAW_SPIN_LOCK_UNLOCKED, NULL, NULL }
-+#endif /* CONFIG_SMP */
-+#else /* !CONFIG_ADEOS_THREADS */
-+#define ADEOS_MUTEX_UNLOCKED { RAW_SPIN_LOCK_UNLOCKED }
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+} admutex_t;
-+
-+typedef void (*adevhand_t)(adevinfo_t *);
-+
-+extern int adp_pipelined;
-+
-+extern adomain_t *adp_cpu_current[],
-+                 *adp_root;
-+
-+extern int __adeos_event_monitors[];
-+
-+extern unsigned __adeos_printk_virq;
-+
-+extern unsigned long __adeos_virtual_irq_map;
-+
-+extern struct list_head __adeos_pipeline;
-+
-+extern raw_spinlock_t __adeos_pipelock;
-+
-+#ifdef CONFIG_ADEOS_PROFILING
-+
-+typedef struct adprofdata {
-+
-+    struct {
-+      unsigned long long t_handled;
-+      unsigned long long t_synced;
-+      unsigned long n_handled;
-+      unsigned long n_synced;
-+    } irqs[IPIPE_NR_IRQS];
-+
-+} adprofdata_t;
-+
-+extern adprofdata_t __adeos_profile_data[ADEOS_NR_CPUS];
-+
-+#endif /* CONFIG_ADEOS_PROFILING */
-+
-+/* Private interface */
-+
-+#ifdef CONFIG_PROC_FS
-+void __adeos_init_proc(void);
-+#endif /* CONFIG_PROC_FS */
-+
-+void __adeos_takeover(void);
-+
-+asmlinkage int __adeos_handle_event(unsigned event,
-+                                  void *evdata);
-+
-+void __adeos_flush_printk(unsigned irq);
-+
-+void __adeos_dump_state(void);
-+
-+static inline void __adeos_schedule_head(void *evdata) {
-+
-+    if (__adeos_event_monitors[ADEOS_SCHEDULE_HEAD] > 0)
-+      __adeos_handle_event(ADEOS_SCHEDULE_HEAD,evdata);
-+}
-+
-+static inline int __adeos_schedule_tail(void *evdata) {
-+
-+    if (__adeos_event_monitors[ADEOS_SCHEDULE_TAIL] > 0)
-+      return __adeos_handle_event(ADEOS_SCHEDULE_TAIL,evdata);
-+
-+    return 0;
-+}
-+
-+static inline void __adeos_enter_process(void) {
-+
-+    if (__adeos_event_monitors[ADEOS_ENTER_PROCESS] > 0)
-+      __adeos_handle_event(ADEOS_ENTER_PROCESS,NULL);
-+}
-+
-+static inline void __adeos_exit_process(void *evdata) {
-+
-+    if (__adeos_event_monitors[ADEOS_EXIT_PROCESS] > 0)
-+      __adeos_handle_event(ADEOS_EXIT_PROCESS,evdata);
-+}
-+
-+static inline int __adeos_signal_process(void *evdata) {
-+
-+    if (__adeos_event_monitors[ADEOS_SIGNAL_PROCESS] > 0)
-+      return __adeos_handle_event(ADEOS_SIGNAL_PROCESS,evdata);
-+
-+    return 0;
-+}
-+
-+static inline void __adeos_kick_process(void *evdata) {
-+
-+    if (__adeos_event_monitors[ADEOS_KICK_PROCESS] > 0)
-+      __adeos_handle_event(ADEOS_KICK_PROCESS,evdata);
-+}
-+
-+static inline int __adeos_renice_process(void *evdata) {
-+
-+    if (__adeos_event_monitors[ADEOS_RENICE_PROCESS] > 0)
-+      return __adeos_handle_event(ADEOS_RENICE_PROCESS,evdata);
-+
-+    return 0;
-+}
-+
-+void __adeos_stall_root(void);
-+
-+void __adeos_unstall_root(void);
-+
-+unsigned long __adeos_test_root(void);
-+
-+unsigned long __adeos_test_and_stall_root(void);
-+
-+void fastcall __adeos_restore_root(unsigned long flags);
-+
-+void __adeos_schedule_back_root(struct task_struct *prev);
-+
-+int __adeos_setscheduler_root(struct task_struct *p,
-+                            int policy,
-+                            int prio);
-+
-+void __adeos_reenter_root(struct task_struct *prev,
-+                        int policy,
-+                        int prio);
-+
-+int fastcall __adeos_schedule_irq(unsigned irq,
-+                                struct list_head *head);
-+
-+#define __adeos_pipeline_head_p(adp) (&(adp)->p_link == __adeos_pipeline.next)
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+
-+static inline int __adeos_domain_work_p (adomain_t *adp, int cpuid)
-+
-+{
-+    return (!test_bit(IPIPE_SLEEP_FLAG,&adp->cpudata[cpuid].status) ||
-+          (!test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status) &&
-+           adp->cpudata[cpuid].irq_pending_hi != 0) ||
-+          test_bit(IPIPE_XPEND_FLAG,&adp->cpudata[cpuid].status));
-+}
-+
-+#else /* !CONFIG_ADEOS_THREADS */
-+
-+static inline int __adeos_domain_work_p (adomain_t *adp, int cpuid)
-+
-+{
-+    return (!test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status) &&
-+          adp->cpudata[cpuid].irq_pending_hi != 0);
-+}
-+
-+static inline void __adeos_switch_to (adomain_t *out, adomain_t *in, int 
cpuid)
-+
-+{
-+    void adeos_suspend_domain(void);
-+
-+    /* "in" is guaranteed to be closer than "out" from the head of the
-+       pipeline (and obviously different). */
-+
-+    adp_cpu_current[cpuid] = in;
-+
-+    if (in->dswitch)
-+      in->dswitch();
-+
-+    adeos_suspend_domain(); /* Sync stage and propagate interrupts. */
-+    adeos_load_cpuid(); /* Processor might have changed. */
-+
-+    if (adp_cpu_current[cpuid] == in)
-+      /* Otherwise, something has changed the current domain under
-+         our feet recycling the register set; do not override. */
-+      adp_cpu_current[cpuid] = out;
-+}
-+
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+/* Public interface */
-+
-+int adeos_register_domain(adomain_t *adp,
-+                        adattr_t *attr);
-+
-+int adeos_unregister_domain(adomain_t *adp);
-+
-+void adeos_suspend_domain(void);
-+
-+int adeos_virtualize_irq_from(adomain_t *adp,
-+                            unsigned irq,
-+                            void (*handler)(unsigned irq),
-+                            int (*acknowledge)(unsigned irq),
-+                            unsigned modemask);
-+
-+static inline int adeos_virtualize_irq(unsigned irq,
-+                                     void (*handler)(unsigned irq),
-+                                     int (*acknowledge)(unsigned irq),
-+                                     unsigned modemask) {
-+
-+    return adeos_virtualize_irq_from(adp_current,
-+                                   irq,
-+                                   handler,
-+                                   acknowledge,
-+                                   modemask);
-+}
-+
-+int adeos_control_irq(unsigned irq,
-+                    unsigned clrmask,
-+                    unsigned setmask);
-+
-+cpumask_t adeos_set_irq_affinity(unsigned irq,
-+                               cpumask_t cpumask);
-+
-+static inline int adeos_share_irq (unsigned irq, int (*acknowledge)(unsigned 
irq)) {
-+
-+    return adeos_virtualize_irq(irq,
-+                              ADEOS_SAME_HANDLER,
-+                              acknowledge,
-+                              
IPIPE_SHARED_MASK|IPIPE_HANDLE_MASK|IPIPE_PASS_MASK);
-+}
-+
-+unsigned adeos_alloc_irq(void);
-+
-+int adeos_free_irq(unsigned irq);
-+
-+int fastcall adeos_trigger_irq(unsigned irq);
-+
-+static inline int adeos_propagate_irq(unsigned irq) {
-+
-+    return __adeos_schedule_irq(irq,adp_current->p_link.next);
-+}
-+
-+static inline int adeos_schedule_irq(unsigned irq) {
-+
-+    return __adeos_schedule_irq(irq,&adp_current->p_link);
-+}
-+
-+int fastcall adeos_send_ipi(unsigned ipi,
-+                          cpumask_t cpumask);
-+
-+static inline void adeos_stall_pipeline_from (adomain_t *adp)
-+
-+{
-+    adeos_declare_cpuid;
-+#ifdef CONFIG_SMP
-+    unsigned long flags;
-+
-+    adeos_lock_cpu(flags);
-+
-+    __set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+
-+    if (!__adeos_pipeline_head_p(adp))
-+      adeos_unlock_cpu(flags);
-+#else /* CONFIG_SMP */
-+    set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+
-+    if (__adeos_pipeline_head_p(adp))
-+      adeos_hw_cli();
-+#endif /* CONFIG_SMP */
-+}
-+
-+static inline unsigned long adeos_test_pipeline_from (adomain_t *adp)
-+
-+{
-+    unsigned long flags, s;
-+    adeos_declare_cpuid;
-+    
-+    adeos_get_cpu(flags);
-+    s = test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+    adeos_put_cpu(flags);
-+
-+    return s;
-+}
-+
-+static inline unsigned long adeos_test_and_stall_pipeline_from (adomain_t 
*adp)
-+
-+{
-+    adeos_declare_cpuid;
-+    unsigned long s;
-+#ifdef CONFIG_SMP
-+    unsigned long flags;
-+
-+    adeos_lock_cpu(flags);
-+
-+    s = __test_and_set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+
-+    if (!__adeos_pipeline_head_p(adp))
-+      adeos_unlock_cpu(flags);
-+#else /* CONFIG_SMP */
-+    s = test_and_set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+
-+    if (__adeos_pipeline_head_p(adp))
-+      adeos_hw_cli();
-+#endif /* CONFIG_SMP */
-+    
-+    return s;
-+}
-+
-+void fastcall adeos_unstall_pipeline_from(adomain_t *adp);
-+
-+static inline unsigned long adeos_test_and_unstall_pipeline_from(adomain_t 
*adp)
-+
-+{
-+    unsigned long flags, s;
-+    adeos_declare_cpuid;
-+    
-+    adeos_get_cpu(flags);
-+    s = test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+    adeos_unstall_pipeline_from(adp);
-+    adeos_put_cpu(flags);
-+
-+    return s;
-+}
-+
-+static inline void adeos_unstall_pipeline(void)
-+
-+{
-+    adeos_unstall_pipeline_from(adp_current);
-+}
-+
-+static inline unsigned long adeos_test_and_unstall_pipeline(void)
-+
-+{
-+    return adeos_test_and_unstall_pipeline_from(adp_current);
-+}
-+
-+static inline unsigned long adeos_test_pipeline (void)
-+
-+{
-+    return adeos_test_pipeline_from(adp_current);
-+}
-+
-+static inline unsigned long adeos_test_and_stall_pipeline (void)
-+
-+{
-+    return adeos_test_and_stall_pipeline_from(adp_current);
-+}
-+
-+static inline void adeos_restore_pipeline_from (adomain_t *adp, unsigned long 
flags)
-+
-+{
-+    if (flags)
-+      adeos_stall_pipeline_from(adp);
-+    else
-+      adeos_unstall_pipeline_from(adp);
-+}
-+
-+static inline void adeos_stall_pipeline (void)
-+
-+{
-+    adeos_stall_pipeline_from(adp_current);
-+}
-+
-+static inline void adeos_restore_pipeline (unsigned long flags)
-+
-+{
-+    adeos_restore_pipeline_from(adp_current,flags);
-+}
-+
-+static inline void adeos_restore_pipeline_nosync (adomain_t *adp, unsigned 
long flags, int cpuid)
-+
-+{
-+    /* If cpuid is current, then it must be held on entry
-+       (adeos_get_cpu/adeos_hw_local_irq_save/adeos_hw_cli). */
-+
-+    if (flags)
-+      __set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+    else
-+      __clear_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+}
-+
-+adevhand_t adeos_catch_event_from(adomain_t *adp,
-+                                unsigned event,
-+                                adevhand_t handler);
-+
-+static inline adevhand_t adeos_catch_event (unsigned event, adevhand_t 
handler)
-+
-+{
-+    return adeos_catch_event_from(adp_current,event,handler);
-+}
-+
-+static inline void adeos_propagate_event(adevinfo_t *evinfo)
-+
-+{
-+    evinfo->propagate = 1;
-+}
-+
-+void adeos_init_attr(adattr_t *attr);
-+
-+int adeos_get_sysinfo(adsysinfo_t *sysinfo);
-+
-+int adeos_tune_timer(unsigned long ns,
-+                   int flags);
-+
-+int adeos_alloc_ptdkey(void);
-+
-+int adeos_free_ptdkey(int key);
-+
-+int adeos_set_ptd(int key,
-+                void *value);
-+
-+void *adeos_get_ptd(int key);
-+
-+unsigned long adeos_critical_enter(void (*syncfn)(void));
-+
-+void adeos_critical_exit(unsigned long flags);
-+
-+int adeos_init_mutex(admutex_t *mutex);
-+
-+int adeos_destroy_mutex(admutex_t *mutex);
-+
-+unsigned long fastcall adeos_lock_mutex(admutex_t *mutex);
-+
-+void fastcall adeos_unlock_mutex(admutex_t *mutex,
-+                               unsigned long flags);
-+
-+static inline void adeos_set_printk_sync (adomain_t *adp) {
-+    set_bit(ADEOS_SPRINTK_FLAG,&adp->flags);
-+}
-+
-+static inline void adeos_set_printk_async (adomain_t *adp) {
-+    clear_bit(ADEOS_SPRINTK_FLAG,&adp->flags);
-+}
-+
-+#define spin_lock_irqsave_hw_cond(lock,flags)      
spin_lock_irqsave_hw(lock,flags)
-+#define spin_unlock_irqrestore_hw_cond(lock,flags) 
spin_unlock_irqrestore_hw(lock,flags)
-+
-+#define pic_irq_lock(irq)     \
-+      do {            \
-+              adeos_declare_cpuid; \
-+              adeos_load_cpuid();             \
-+              __adeos_lock_irq(adp_cpu_current[cpuid], cpuid, irq); \
-+      } while(0)
-+
-+#define pic_irq_unlock(irq)   \
-+      do {            \
-+              adeos_declare_cpuid; \
-+              adeos_load_cpuid();          \
-+              __adeos_unlock_irq(adp_cpu_current[cpuid], irq); \
-+      } while(0)
-+
-+#else /* !CONFIG_ADEOS_CORE */
-+
-+#define spin_lock_irqsave_hw(lock,flags)      spin_lock_irqsave(lock, flags)
-+#define spin_unlock_irqrestore_hw(lock,flags) spin_unlock_irqrestore(lock, 
flags)
-+#define spin_lock_irqsave_hw_cond(lock,flags)      do { flags = 0; 
spin_lock(lock); } while(0)
-+#define spin_unlock_irqrestore_hw_cond(lock,flags) spin_unlock(lock)
-+
-+#define pic_irq_lock(irq)     do { } while(0)
-+#define pic_irq_unlock(irq)   do { } while(0)
-+
-+#endif        /* CONFIG_ADEOS_CORE */
-+
-+#endif /* !__LINUX_ADEOS_H */
-diff -Nru linux-2.6.10/include/linux/preempt.h 
linux-2.6.10-adeos-ppc64-r2/include/linux/preempt.h
---- linux-2.6.10/include/linux/preempt.h       2004-12-24 23:34:26.000000000 
+0200
-+++ linux-2.6.10-adeos-ppc64-r2/include/linux/preempt.h        2005-10-05 
10:34:53.000000000 +0300
-@@ -25,6 +25,47 @@
- 
- asmlinkage void preempt_schedule(void);
- 
-+#ifdef CONFIG_ADEOS_CORE
-+
-+#include <asm/adeos.h>
-+
-+extern adomain_t *adp_cpu_current[],
-+                 *adp_root;
-+
-+#define preempt_disable() \
-+do { \
-+      if (adp_current == adp_root) { \
-+          inc_preempt_count();       \
-+          barrier(); \
-+        } \
-+} while (0)
-+
-+#define preempt_enable_no_resched() \
-+do { \
-+        if (adp_current == adp_root) { \
-+          barrier(); \
-+          dec_preempt_count(); \
-+        } \
-+} while (0)
-+
-+#define preempt_check_resched() \
-+do { \
-+        if (adp_current == adp_root) { \
-+          if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
-+              preempt_schedule(); \
-+        } \
-+} while (0)
-+
-+#define preempt_enable() \
-+do { \
-+      if (adp_current == adp_root) { \
-+          preempt_enable_no_resched(); \
-+          preempt_check_resched(); \
-+        } \
-+} while (0)
-+
-+#else /* !CONFIG_ADEOS_CORE */
-+
- #define preempt_disable() \
- do { \
-       inc_preempt_count(); \
-@@ -49,6 +90,8 @@
-       preempt_check_resched(); \
- } while (0)
- 
-+#endif /* CONFIG_ADEOS_CORE */
-+
- #else
- 
- #define preempt_disable()             do { } while (0)
-diff -Nru linux-2.6.10/include/linux/sched.h 
linux-2.6.10-adeos-ppc64-r2/include/linux/sched.h
---- linux-2.6.10/include/linux/sched.h 2004-12-24 23:33:59.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/include/linux/sched.h  2005-10-05 
10:34:53.000000000 +0300
-@@ -4,6 +4,9 @@
- #include <asm/param.h>        /* for HZ */
- 
- #include <linux/config.h>
-+#ifdef CONFIG_ADEOS_CORE
-+#include <linux/adeos.h>
-+#endif /* CONFIG_ADEOS_CORE */
- #include <linux/capability.h>
- #include <linux/threads.h>
- #include <linux/kernel.h>
-@@ -664,6 +667,10 @@
-       struct mempolicy *mempolicy;
-       short il_next;          /* could be shared with used_math */
- #endif
-+
-+#ifdef CONFIG_ADEOS_CORE
-+        void *ptd[ADEOS_ROOT_NPTDKEYS];
-+#endif /* CONFIG_ADEOS_CORE */
- };
- 
- static inline pid_t process_group(struct task_struct *tsk)
-diff -Nru linux-2.6.10/init/main.c linux-2.6.10-adeos-ppc64-r2/init/main.c
---- linux-2.6.10/init/main.c   2004-12-24 23:34:01.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/init/main.c    2005-10-05 10:34:53.000000000 
+0300
-@@ -526,6 +526,11 @@
-       init_timers();
-       softirq_init();
-       time_init();
-+#ifdef CONFIG_ADEOS_CORE
-+      /* On PPC, we need calibrated values for the decrementer to
-+         initialize, so run time_init() first. */
-+      __adeos_init();
-+#endif /* CONFIG_ADEOS_CORE */
- 
-       /*
-        * HACK ALERT! This is early. We're enabling the console before
-@@ -652,6 +657,11 @@
-       sock_init();
- 
-       do_initcalls();
-+
-+#ifdef CONFIG_ADEOS
-+      /* i.e. Permanent pipelining from boot onwards. */
-+      __adeos_takeover();
-+#endif /* CONFIG_ADEOS */
- }
- 
- static void do_pre_smp_initcalls(void)
-diff -Nru linux-2.6.10/kernel/adeos.c 
linux-2.6.10-adeos-ppc64-r2/kernel/adeos.c
---- linux-2.6.10/kernel/adeos.c        1970-01-01 02:00:00.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/kernel/adeos.c 2005-10-05 10:34:53.000000000 
+0300
-@@ -0,0 +1,826 @@
-+/*
-+ *   linux/kernel/adeos.c
-+ *
-+ *   Copyright (C) 2002,2003,2004 Philippe Gerum.
-+ *
-+ *   This program is free software; you can redistribute it and/or modify
-+ *   it under the terms of the GNU General Public License as published by
-+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
-+ *   USA; either version 2 of the License, or (at your option) any later
-+ *   version.
-+ *
-+ *   This program is distributed in the hope that it will be useful,
-+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *   GNU General Public License for more details.
-+ *
-+ *   You should have received a copy of the GNU General Public License
-+ *   along with this program; if not, write to the Free Software
-+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, 
USA.
-+ *
-+ *   Architecture-independent ADEOS core support.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/module.h>
-+#ifdef CONFIG_PROC_FS
-+#include <linux/proc_fs.h>
-+#endif /* CONFIG_PROC_FS */
-+
-+/* The pre-defined domain slot for the root domain. */
-+static adomain_t adeos_root_domain;
-+
-+/* A constant pointer to the root domain. */
-+adomain_t *adp_root = &adeos_root_domain;
-+
-+/* A pointer to the current domain. */
-+adomain_t *adp_cpu_current[ADEOS_NR_CPUS] = { [ 0 ... ADEOS_NR_CPUS - 1] = 
&adeos_root_domain };
-+
-+/* The spinlock protecting from races while modifying the pipeline. */
-+raw_spinlock_t __adeos_pipelock = RAW_SPIN_LOCK_UNLOCKED;
-+
-+/* The pipeline data structure. Enqueues adomain_t objects by priority. */
-+struct list_head __adeos_pipeline;
-+
-+/* A global flag telling whether Adeos pipelining is engaged. */
-+int adp_pipelined;
-+
-+/* An array of global counters tracking domains monitoring events. */
-+int __adeos_event_monitors[ADEOS_NR_EVENTS] = { [ 0 ... ADEOS_NR_EVENTS - 1] 
= 0 };
-+
-+/* The allocated VIRQ map. */
-+unsigned long __adeos_virtual_irq_map = 0;
-+
-+/* A VIRQ to kick printk() output out when the root domain is in control. */
-+unsigned __adeos_printk_virq;
-+
-+#ifdef CONFIG_ADEOS_PROFILING
-+adprofdata_t __adeos_profile_data[ADEOS_NR_CPUS];
-+#endif /* CONFIG_ADEOS_PROFILING */
-+
-+static void __adeos_set_root_ptd (int key, void *value) {
-+
-+    current->ptd[key] = value;
-+}
-+
-+static void *__adeos_get_root_ptd (int key) {
-+
-+    return current->ptd[key];
-+}
-+
-+/* adeos_init() -- Initialization routine of the ADEOS layer. Called
-+   by the host kernel early during the boot procedure. */
-+
-+void __adeos_init (void)
-+
-+{
-+    adomain_t *adp = &adeos_root_domain;
-+
-+    __adeos_check_platform(); /* Do platform dependent checks first. */
-+
-+    /*
-+      A lightweight registration code for the root domain. Current
-+      assumptions are:
-+      - We are running on the boot CPU, and secondary CPUs are still
-+      lost in space.
-+      - adeos_root_domain has been zero'ed.
-+    */
-+
-+    INIT_LIST_HEAD(&__adeos_pipeline);
-+
-+    adp->name = "Linux";
-+    adp->domid = ADEOS_ROOT_ID;
-+    adp->priority = ADEOS_ROOT_PRI;
-+    adp->ptd_setfun = &__adeos_set_root_ptd;
-+    adp->ptd_getfun = &__adeos_get_root_ptd;
-+    adp->ptd_keymax = ADEOS_ROOT_NPTDKEYS;
-+
-+    __adeos_init_stage(adp);
-+
-+    INIT_LIST_HEAD(&adp->p_link);
-+    list_add_tail(&adp->p_link,&__adeos_pipeline);
-+
-+    __adeos_init_platform();
-+
-+    __adeos_printk_virq = adeos_alloc_irq(); /* Cannot fail here. */
-+    adp->irqs[__adeos_printk_virq].handler = &__adeos_flush_printk; 
-+    adp->irqs[__adeos_printk_virq].acknowledge = NULL; 
-+    adp->irqs[__adeos_printk_virq].control = IPIPE_HANDLE_MASK; 
-+
-+    printk(KERN_INFO "Adeos %s: Root domain %s registered.\n",
-+         ADEOS_VERSION_STRING,
-+         adp->name);
-+}
-+
-+/* adeos_handle_event() -- Adeos' generic event handler. This routine
-+   calls the per-domain handlers registered for a given
-+   exception/event. Each domain before the one which raised the event
-+   in the pipeline will get a chance to process the event. The latter
-+   will eventually be allowed to process its own event too if a valid
-+   handler exists for it.  Handler executions are always scheduled by
-+   the domain which raised the event for the higher priority domains
-+   wanting to be notified of such event.  Note: evdata might be
-+   NULL. */
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+
-+asmlinkage int __adeos_handle_event (unsigned event, void *evdata)
-+/* asmlinkage is there just in case CONFIG_REGPARM is enabled... */
-+{
-+    struct list_head *pos, *npos;
-+    adomain_t *this_domain;
-+    unsigned long flags;
-+    adeos_declare_cpuid;
-+    adevinfo_t evinfo;
-+    int propagate = 1;
-+
-+    adeos_lock_cpu(flags);
-+
-+    this_domain = adp_cpu_current[cpuid];
-+
-+    list_for_each_safe(pos,npos,&__adeos_pipeline) {
-+
-+      adomain_t *next_domain = list_entry(pos,adomain_t,p_link);
-+
-+      if (next_domain->events[event].handler != NULL)
-+          {
-+          if (next_domain == this_domain)
-+              {
-+              adeos_unlock_cpu(flags);
-+              evinfo.domid = this_domain->domid;
-+              evinfo.event = event;
-+              evinfo.evdata = evdata;
-+              evinfo.propagate = 0;
-+              this_domain->events[event].handler(&evinfo);
-+              propagate = evinfo.propagate;
-+              goto done;
-+              }
-+
-+          next_domain->cpudata[cpuid].event_info.domid = this_domain->domid;
-+          next_domain->cpudata[cpuid].event_info.event = event;
-+          next_domain->cpudata[cpuid].event_info.evdata = evdata;
-+          next_domain->cpudata[cpuid].event_info.propagate = 0;
-+          __set_bit(IPIPE_XPEND_FLAG,&next_domain->cpudata[cpuid].status);
-+
-+          /* Let the higher priority domain process the event. */
-+          __adeos_switch_to(this_domain,next_domain,cpuid);
-+          
-+          adeos_load_cpuid(); /* Processor might have changed. */
-+
-+          if (!next_domain->cpudata[cpuid].event_info.propagate)
-+              {
-+              propagate = 0;
-+              break;
-+              }
-+          }
-+
-+      if (next_domain->cpudata[cpuid].irq_pending_hi != 0 &&
-+          !test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
-+          {
-+          if (next_domain != this_domain)
-+              __adeos_switch_to(this_domain,next_domain,cpuid);
-+          else
-+              __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+
-+          adeos_load_cpuid(); /* Processor might have changed. */
-+          }
-+
-+      if (next_domain == this_domain)
-+          break;
-+    }
-+
-+    adeos_unlock_cpu(flags);
-+
-+ done:
-+
-+    return !propagate;
-+}
-+
-+#else /* !CONFIG_ADEOS_THREADS */
-+
-+asmlinkage int __adeos_handle_event (unsigned event, void *evdata)
-+/* asmlinkage is there just in case CONFIG_REGPARM is enabled... */
-+{
-+    adomain_t *start_domain, *this_domain, *next_domain;
-+    struct list_head *pos, *npos;
-+    unsigned long flags;
-+    adeos_declare_cpuid;
-+    adevinfo_t evinfo;
-+    int propagate = 1;
-+
-+    adeos_lock_cpu(flags);
-+
-+    start_domain = this_domain = adp_cpu_current[cpuid];
-+
-+    list_for_each_safe(pos,npos,&__adeos_pipeline) {
-+
-+      next_domain = list_entry(pos,adomain_t,p_link);
-+
-+      /*  Note: Domain migration may occur while running event or
-+          interrupt handlers, in which case the current register set
-+          is going to be recycled for a different domain than the
-+          initiating one. We do care for that, always tracking the
-+          current domain descriptor upon return from those
-+          handlers. */
-+
-+      if (next_domain->events[event].handler != NULL)
-+          {
-+          adp_cpu_current[cpuid] = next_domain;
-+          evinfo.domid = start_domain->domid;
-+          adeos_unlock_cpu(flags);
-+          evinfo.event = event;
-+          evinfo.evdata = evdata;
-+          evinfo.propagate = 0;
-+          next_domain->events[event].handler(&evinfo);
-+          adeos_lock_cpu(flags);
-+
-+          if (adp_cpu_current[cpuid] != next_domain)
-+              this_domain = adp_cpu_current[cpuid];
-+
-+          propagate = evinfo.propagate;
-+          }
-+
-+      if (next_domain->cpudata[cpuid].irq_pending_hi != 0 &&
-+          !test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
-+          {
-+          adp_cpu_current[cpuid] = next_domain;
-+          __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+          adeos_load_cpuid();
-+
-+          if (adp_cpu_current[cpuid] != next_domain)
-+              this_domain = adp_cpu_current[cpuid];
-+          }
-+
-+      adp_cpu_current[cpuid] = this_domain;
-+
-+      if (next_domain == this_domain || !propagate)
-+          break;
-+    }
-+
-+    adeos_unlock_cpu(flags);
-+
-+    return !propagate;
-+}
-+
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+void __adeos_stall_root (void)
-+
-+{
-+    if (adp_pipelined)
-+      {
-+      adeos_declare_cpuid;
-+
-+#ifdef CONFIG_SMP
-+      unsigned long flags;
-+      adeos_lock_cpu(flags);
-+      __set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
-+      adeos_unlock_cpu(flags);
-+#else /* !CONFIG_SMP */
-+      set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
-+#endif /* CONFIG_SMP */
-+      }
-+    else
-+      adeos_hw_cli();
-+}
-+
-+void __adeos_unstall_root (void)
-+
-+{
-+    if (adp_pipelined)
-+      {
-+      adeos_declare_cpuid;
-+
-+      adeos_hw_cli();
-+
-+      adeos_load_cpuid();
-+
-+      __clear_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
-+
-+      if (adp_root->cpudata[cpuid].irq_pending_hi != 0)
-+          __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+      }
-+
-+    adeos_hw_sti();   /* Needed in both cases. */
-+}
-+
-+unsigned long __adeos_test_root (void)
-+
-+{
-+    if (adp_pipelined)
-+      {
-+      adeos_declare_cpuid;
-+      unsigned long s;
-+
-+#ifdef CONFIG_SMP
-+      unsigned long flags;
-+      adeos_lock_cpu(flags);
-+      s = test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
-+      adeos_unlock_cpu(flags);
-+#else /* !CONFIG_SMP */
-+      s = test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
-+#endif /* CONFIG_SMP */
-+
-+      return s;
-+      }
-+
-+    return adeos_hw_irqs_disabled();
-+}
-+
-+unsigned long __adeos_test_and_stall_root (void)
-+
-+{
-+    unsigned long flags;
-+
-+    if (adp_pipelined)
-+      {
-+      adeos_declare_cpuid;
-+      unsigned long s;
-+
-+#ifdef CONFIG_SMP
-+      adeos_lock_cpu(flags);
-+      s = 
__test_and_set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
-+      adeos_unlock_cpu(flags);
-+#else /* !CONFIG_SMP */
-+      s = test_and_set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
-+#endif /* CONFIG_SMP */
-+
-+      return s;
-+      }
-+
-+    adeos_hw_local_irq_save(flags);
-+
-+    return !adeos_hw_test_iflag(flags);
-+}
-+
-+void fastcall __adeos_restore_root (unsigned long flags)
-+
-+{
-+    if (flags)
-+      __adeos_stall_root();
-+    else
-+      __adeos_unstall_root();
-+}
-+
-+/* adeos_unstall_pipeline_from() -- Unstall the interrupt pipeline and
-+   synchronize pending events from a given domain. */
-+
-+void fastcall adeos_unstall_pipeline_from (adomain_t *adp)
-+
-+{
-+    adomain_t *this_domain;
-+    struct list_head *pos;
-+    unsigned long flags;
-+    adeos_declare_cpuid;
-+
-+    adeos_lock_cpu(flags);
-+
-+    __clear_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
-+
-+    this_domain = adp_cpu_current[cpuid];
-+
-+    if (adp == this_domain)
-+      {
-+      if (adp->cpudata[cpuid].irq_pending_hi != 0)
-+          __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+
-+      goto release_cpu_and_exit;
-+      }
-+
-+    /* Attempt to flush all events that might be pending at the
-+       unstalled domain level. This code is roughly lifted from
-+       __adeos_walk_pipeline(). */
-+
-+    list_for_each(pos,&__adeos_pipeline) {
-+
-+      adomain_t *next_domain = list_entry(pos,adomain_t,p_link);
-+
-+      if (test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
-+          break; /* Stalled stage -- do not go further. */
-+
-+      if (next_domain->cpudata[cpuid].irq_pending_hi != 0)
-+          {
-+          /* Since the critical IPI might be triggered by the
-+             following actions, the current domain might not be
-+             linked to the pipeline anymore after its handler
-+             returns on SMP boxen, even if the domain remains valid
-+             (see adeos_unregister_domain()), so don't make any
-+             hazardous assumptions here. */
-+
-+          if (next_domain == this_domain)
-+              __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+          else
-+              {
-+              __adeos_switch_to(this_domain,next_domain,cpuid);
-+
-+              adeos_load_cpuid(); /* Processor might have changed. */
-+
-+              if (this_domain->cpudata[cpuid].irq_pending_hi != 0 &&
-+                  
!test_bit(IPIPE_STALL_FLAG,&this_domain->cpudata[cpuid].status))
-+                  __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+              }
-+          
-+          break;
-+          }
-+      else if (next_domain == this_domain)
-+          break;
-+    }
-+
-+release_cpu_and_exit:
-+
-+    if (__adeos_pipeline_head_p(adp))
-+      adeos_hw_sti();
-+    else
-+      adeos_unlock_cpu(flags);
-+}
-+
-+/* adeos_suspend_domain() -- tell the ADEOS layer that the current
-+   domain is now dormant. The calling domain is switched out, while
-+   the next domain with work in progress or pending in the pipeline is
-+   switched in. */
-+
-+#ifdef CONFIG_ADEOS_THREADS
-+
-+#define __flush_pipeline_stage() \
-+do { \
-+    if (!test_bit(IPIPE_STALL_FLAG,&cpudata->status) && \
-+      cpudata->irq_pending_hi != 0) \
-+      { \
-+      __adeos_sync_stage(IPIPE_IRQMASK_ANY); \
-+      adeos_load_cpuid(); \
-+      cpudata = &this_domain->cpudata[cpuid]; \
-+      } \
-+} while(0)
-+
-+void adeos_suspend_domain (void)
-+
-+{
-+    adomain_t *this_domain, *next_domain;
-+    struct adcpudata *cpudata;
-+    struct list_head *ln;
-+    unsigned long flags;
-+    adeos_declare_cpuid;
-+
-+    adeos_lock_cpu(flags);
-+
-+    this_domain = next_domain = adp_cpu_current[cpuid];
-+    cpudata = &this_domain->cpudata[cpuid];
-+
-+    /* A suspending domain implicitely unstalls the pipeline. */
-+    __clear_bit(IPIPE_STALL_FLAG,&cpudata->status);
-+
-+    /* Make sure that no event remains stuck in the pipeline. This
-+       could happen with emerging SMP instances, or domains which
-+       forget to unstall their stage before calling us. */
-+    __flush_pipeline_stage();
-+
-+    for (;;)
-+      {
-+      ln = next_domain->p_link.next;
-+
-+      if (ln == &__adeos_pipeline)    /* End of pipeline reached? */
-+          /* Caller should loop on its idle task on return. */
-+          goto release_cpu_and_exit;
-+
-+      next_domain = list_entry(ln,adomain_t,p_link);
-+
-+      /* Make sure the domain was preempted (i.e. not sleeping) or
-+         has some event to process before switching to it. */
-+
-+      if (__adeos_domain_work_p(next_domain,cpuid))
-+          break;
-+      }
-+
-+    /* Mark the outgoing domain as aslept (i.e. not preempted). */
-+    __set_bit(IPIPE_SLEEP_FLAG,&cpudata->status);
-+
-+    /* Suspend the calling domain, switching to the next one. */
-+    __adeos_switch_to(this_domain,next_domain,cpuid);
-+
-+#ifdef CONFIG_SMP
-+    adeos_load_cpuid();       /* Processor might have changed. */
-+    cpudata = &this_domain->cpudata[cpuid];
-+#endif /* CONFIG_SMP */
-+
-+    /* Clear the sleep bit for the incoming domain. */
-+    __clear_bit(IPIPE_SLEEP_FLAG,&cpudata->status);
-+
-+    /* Now, we are back into the calling domain. Flush the interrupt
-+       log and fire the event interposition handler if needed.  CPU
-+       migration is allowed in SMP-mode on behalf of an event handler
-+       provided that the current domain raised it. Otherwise, it's
-+       not. */
-+
-+    __flush_pipeline_stage();
-+
-+    if (__test_and_clear_bit(IPIPE_XPEND_FLAG,&cpudata->status))
-+      {
-+      adeos_unlock_cpu(flags);
-+      
this_domain->events[cpudata->event_info.event].handler(&cpudata->event_info);
-+      return;
-+      }
-+
-+release_cpu_and_exit:
-+
-+    adeos_unlock_cpu(flags);
-+
-+    /* Return to the point of suspension in the calling domain. */
-+}
-+
-+#else /* !CONFIG_ADEOS_THREADS */
-+
-+void adeos_suspend_domain (void)
-+
-+{
-+    adomain_t *this_domain, *next_domain;
-+    struct list_head *ln;
-+    unsigned long flags;
-+    adeos_declare_cpuid;
-+
-+    adeos_lock_cpu(flags);
-+
-+    this_domain = next_domain = adp_cpu_current[cpuid];
-+
-+    __clear_bit(IPIPE_STALL_FLAG,&this_domain->cpudata[cpuid].status);
-+
-+    if (this_domain->cpudata[cpuid].irq_pending_hi != 0)
-+      goto sync_stage;
-+
-+    for (;;)
-+      {
-+      ln = next_domain->p_link.next;
-+
-+      if (ln == &__adeos_pipeline)
-+          break;
-+
-+      next_domain = list_entry(ln,adomain_t,p_link);
-+
-+      if (test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
-+          break;
-+
-+      if (next_domain->cpudata[cpuid].irq_pending_hi == 0)
-+          continue;
-+
-+      adp_cpu_current[cpuid] = next_domain;
-+
-+      if (next_domain->dswitch)
-+          next_domain->dswitch();
-+
-+ sync_stage:
-+
-+      __adeos_sync_stage(IPIPE_IRQMASK_ANY);
-+
-+      adeos_load_cpuid();     /* Processor might have changed. */
-+
-+      if (adp_cpu_current[cpuid] != next_domain)
-+          /* Something has changed the current domain under our feet
-+             recycling the register set; take note. */
-+          this_domain = adp_cpu_current[cpuid];
-+      }
-+
-+    adp_cpu_current[cpuid] = this_domain;
-+
-+    adeos_unlock_cpu(flags);
-+}
-+
-+#endif /* CONFIG_ADEOS_THREADS */
-+
-+/* adeos_alloc_irq() -- Allocate a virtual/soft pipelined interrupt.
-+   Virtual interrupts are handled in exactly the same way than their
-+   hw-generated counterparts. This is a very basic, one-way only,
-+   inter-domain communication system (see adeos_trigger_irq()).  Note:
-+   it is not necessary for a domain to allocate a virtual interrupt to
-+   trap it using adeos_virtualize_irq(). The newly allocated VIRQ
-+   number which can be passed to other IRQ-related services is
-+   returned on success, zero otherwise (i.e. no more virtual interrupt
-+   channel is available). We need this service as part of the Adeos
-+   bootstrap code, hence it must reside in a built-in area. */
-+
-+unsigned adeos_alloc_irq (void)
-+
-+{
-+    unsigned long flags, irq = 0;
-+    int ipos;
-+
-+    spin_lock_irqsave_hw(&__adeos_pipelock,flags);
-+
-+    if (__adeos_virtual_irq_map != ~0)
-+      {
-+      ipos = ffz(__adeos_virtual_irq_map);
-+      set_bit(ipos,&__adeos_virtual_irq_map);
-+      irq = ipos + IPIPE_VIRQ_BASE;
-+      }
-+
-+    spin_unlock_irqrestore_hw(&__adeos_pipelock,flags);
-+
-+    return irq;
-+}
-+
-+#ifdef CONFIG_PROC_FS
-+
-+#include <linux/proc_fs.h>
-+
-+static struct proc_dir_entry *adeos_proc_entry;
-+
-+static int __adeos_read_proc (char *page,
-+                            char **start,
-+                            off_t off,
-+                            int count,
-+                            int *eof,
-+                            void *data)
-+{
-+    unsigned long ctlbits;
-+    struct list_head *pos;
-+    unsigned irq, _irq;
-+    char *p = page;
-+    int len;
-+
-+#ifdef CONFIG_ADEOS_MODULE
-+    p += sprintf(p,"Adeos %s -- Pipelining: 
%s",ADEOS_VERSION_STRING,adp_pipelined ? "active" : "stopped");
-+#else /* !CONFIG_ADEOS_MODULE */
-+    p += sprintf(p,"Adeos %s -- Pipelining: permanent",ADEOS_VERSION_STRING);
-+#endif /* CONFIG_ADEOS_MODULE */
-+#ifdef CONFIG_ADEOS_THREADS
-+    p += sprintf(p, " (threaded)\n\n");
-+#else                         /* CONFIG_ADEOS_THREADS */
-+    p += sprintf(p, "\n\n");
-+#endif                                /* CONFIG_ADEOS_THREADS */
-+
-+    spin_lock(&__adeos_pipelock);
-+
-+    list_for_each(pos,&__adeos_pipeline) {
-+
-+      adomain_t *adp = list_entry(pos,adomain_t,p_link);
-+
-+      p += sprintf(p,"%8s: priority=%d, id=0x%.8x, ptdkeys=%d/%d\n",
-+                   adp->name,
-+                   adp->priority,
-+                   adp->domid,
-+                   adp->ptd_keycount,
-+                   adp->ptd_keymax);
-+      irq = 0;
-+
-+      while (irq < IPIPE_NR_IRQS)
-+          {
-+          ctlbits = (adp->irqs[irq].control & 
(IPIPE_HANDLE_MASK|IPIPE_PASS_MASK|IPIPE_STICKY_MASK));
-+
-+          if (irq >= IPIPE_NR_XIRQS && !adeos_virtual_irq_p(irq))
-+              {
-+              /* There might be a hole between the last external IRQ
-+                 and the first virtual one; skip it. */
-+              irq++;
-+              continue;
-+              }
-+
-+          if (adeos_virtual_irq_p(irq) && !test_bit(irq - 
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map))
-+              {
-+              /* Non-allocated virtual IRQ; skip it. */
-+              irq++;
-+              continue;
-+              }
-+
-+          /* Attempt to group consecutive IRQ numbers having the
-+             same virtualization settings in a single line. */
-+
-+          _irq = irq;
-+
-+          while (++_irq < IPIPE_NR_IRQS)
-+              {
-+              if (adeos_virtual_irq_p(_irq) != adeos_virtual_irq_p(irq) ||
-+                  (adeos_virtual_irq_p(_irq) &&
-+                   !test_bit(_irq - 
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map)) ||
-+                  ctlbits != (adp->irqs[_irq].control & 
(IPIPE_HANDLE_MASK|IPIPE_PASS_MASK|IPIPE_STICKY_MASK)))
-+                  break;
-+              }
-+
-+          if (_irq == irq + 1)
-+              p += sprintf(p,"\tirq%u: ",irq);
-+          else
-+              p += sprintf(p,"\tirq%u-%u: ",irq,_irq - 1);
-+
-+          /* Statuses are as follows:
-+             o "accepted" means handled _and_ passed down the
-+             pipeline.
-+             o "grabbed" means handled, but the interrupt might be
-+             terminated _or_ passed down the pipeline depending on
-+             what the domain handler asks for to Adeos.
-+             o "passed" means unhandled by the domain but passed
-+             down the pipeline.
-+             o "discarded" means unhandled and _not_ passed down the
-+             pipeline. The interrupt merely disappears from the
-+             current domain down to the end of the pipeline. */
-+
-+          if (ctlbits & IPIPE_HANDLE_MASK)
-+              {
-+              if (ctlbits & IPIPE_PASS_MASK)
-+                  p += sprintf(p,"accepted");
-+              else
-+                  p += sprintf(p,"grabbed");
-+              }
-+          else if (ctlbits & IPIPE_PASS_MASK)
-+              p += sprintf(p,"passed");
-+          else
-+              p += sprintf(p,"discarded");
-+
-+          if (ctlbits & IPIPE_STICKY_MASK)
-+              p += sprintf(p,", sticky");
-+
-+          if (adeos_virtual_irq_p(irq))
-+              p += sprintf(p,", virtual");
-+
-+          p += sprintf(p,"\n");
-+
-+          irq = _irq;
-+          }
-+    }
-+
-+    spin_unlock(&__adeos_pipelock);
-+
-+    len = p - page;
-+
-+    if (len <= off + count)
-+      *eof = 1;
-+
-+    *start = page + off;
-+
-+    len -= off;
-+
-+    if (len > count)
-+      len = count;
-+
-+    if (len < 0)
-+      len = 0;
-+
-+    return len;
-+}
-+
-+void __adeos_init_proc (void) {
-+
-+    adeos_proc_entry = create_proc_read_entry("adeos",
-+                                            0444,
-+                                            NULL,
-+                                            &__adeos_read_proc,
-+                                            NULL);
-+}
-+
-+#endif /* CONFIG_PROC_FS */
-+
-+void __adeos_dump_state (void)
-+
-+{
-+    int _cpuid, nr_cpus = num_online_cpus();
-+    struct list_head *pos;
-+    unsigned long flags;
-+    adeos_declare_cpuid;
-+
-+    adeos_lock_cpu(flags);
-+
-+    printk(KERN_WARNING "Adeos: Current domain=%s on CPU #%d 
[stackbase=%p]\n",
-+         adp_current->name,
-+         cpuid,
-+#ifdef CONFIG_ADEOS_THREADS
-+         (void *)adp_current->estackbase[cpuid]
-+#else /* !CONFIG_ADEOS_THREADS */
-+         current
-+#endif /* CONFIG_ADEOS_THREADS */
-+         );
-+
-+    list_for_each(pos,&__adeos_pipeline) {
-+
-+        adomain_t *adp = list_entry(pos,adomain_t,p_link);
-+
-+        for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)
-+            printk(KERN_WARNING "%8s[cpuid=%d]: priority=%d, status=0x%lx, 
pending_hi=0x%lx\n",
-+                   adp->name,
-+                   _cpuid,
-+                   adp->priority,
-+                   adp->cpudata[_cpuid].status,
-+                   adp->cpudata[_cpuid].irq_pending_hi);
-+    }
-+
-+    adeos_unlock_cpu(flags);
-+}
-+
-+EXPORT_SYMBOL(adeos_suspend_domain);
-+EXPORT_SYMBOL(adeos_alloc_irq);
-+EXPORT_SYMBOL(adp_cpu_current);
-+EXPORT_SYMBOL(adp_root);
-+EXPORT_SYMBOL(adp_pipelined);
-+EXPORT_SYMBOL(__adeos_handle_event);
-+EXPORT_SYMBOL(__adeos_unstall_root);
-+EXPORT_SYMBOL(__adeos_stall_root);
-+EXPORT_SYMBOL(__adeos_restore_root);
-+EXPORT_SYMBOL(__adeos_test_and_stall_root);
-+EXPORT_SYMBOL(__adeos_test_root);
-+EXPORT_SYMBOL(__adeos_dump_state);
-+EXPORT_SYMBOL(__adeos_pipeline);
-+EXPORT_SYMBOL(__adeos_pipelock);
-+EXPORT_SYMBOL(__adeos_virtual_irq_map);
-+EXPORT_SYMBOL(__adeos_event_monitors);
-+EXPORT_SYMBOL(adeos_unstall_pipeline_from);
-+#ifdef CONFIG_ADEOS_PROFILING
-+EXPORT_SYMBOL(__adeos_profile_data);
-+#endif /* CONFIG_ADEOS_PROFILING */
-+/* The following are convenience exports which are needed by some
-+   Adeos domains loaded as kernel modules. */
-+EXPORT_SYMBOL(do_exit);
-diff -Nru linux-2.6.10/kernel/exit.c linux-2.6.10-adeos-ppc64-r2/kernel/exit.c
---- linux-2.6.10/kernel/exit.c 2004-12-24 23:35:27.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/kernel/exit.c  2005-10-05 10:34:53.000000000 
+0300
-@@ -809,6 +809,9 @@
-       group_dead = atomic_dec_and_test(&tsk->signal->live);
-       if (group_dead)
-               acct_process(code);
-+#ifdef CONFIG_ADEOS_CORE
-+      __adeos_exit_process(tsk);
-+#endif /* CONFIG_ADEOS_CORE */
-       __exit_mm(tsk);
- 
-       exit_sem(tsk);
-diff -Nru linux-2.6.10/kernel/fork.c linux-2.6.10-adeos-ppc64-r2/kernel/fork.c
---- linux-2.6.10/kernel/fork.c 2004-12-24 23:33:59.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/kernel/fork.c  2005-10-05 10:34:53.000000000 
+0300
-@@ -1021,6 +1021,14 @@
- 
-       nr_threads++;
-       write_unlock_irq(&tasklist_lock);
-+#ifdef CONFIG_ADEOS_CORE
-+      {
-+      int k;
-+
-+      for (k = 0; k < ADEOS_ROOT_NPTDKEYS; k++)
-+          p->ptd[k] = NULL;
-+      }
-+#endif /* CONFIG_ADEOS_CORE */
-       retval = 0;
- 
- fork_out:
-diff -Nru linux-2.6.10/kernel/Makefile 
linux-2.6.10-adeos-ppc64-r2/kernel/Makefile
---- linux-2.6.10/kernel/Makefile       2004-12-24 23:34:26.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/kernel/Makefile        2005-10-05 
10:34:53.000000000 +0300
-@@ -9,6 +9,7 @@
-           rcupdate.o intermodule.o extable.o params.o posix-timers.o \
-           kthread.o wait.o kfifo.o sys_ni.o
- 
-+obj-$(CONFIG_ADEOS_CORE) += adeos.o
- obj-$(CONFIG_FUTEX) += futex.o
- obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
- obj-$(CONFIG_SMP) += cpu.o spinlock.o
-diff -Nru linux-2.6.10/kernel/panic.c 
linux-2.6.10-adeos-ppc64-r2/kernel/panic.c
---- linux-2.6.10/kernel/panic.c        2004-12-24 23:35:29.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/kernel/panic.c 2005-10-05 10:34:53.000000000 
+0300
-@@ -70,6 +70,9 @@
-       va_end(args);
-       printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
-       bust_spinlocks(0);
-+#ifdef CONFIG_ADEOS_CORE
-+      __adeos_dump_state();
-+#endif /* CONFIG_ADEOS_CORE */
- 
- #ifdef CONFIG_SMP
-       smp_send_stop();
-diff -Nru linux-2.6.10/kernel/printk.c 
linux-2.6.10-adeos-ppc64-r2/kernel/printk.c
---- linux-2.6.10/kernel/printk.c       2004-12-24 23:35:40.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/kernel/printk.c        2005-10-05 
10:34:53.000000000 +0300
-@@ -509,6 +509,66 @@
-  * then changes console_loglevel may break. This is because console_loglevel
-  * is inspected when the actual printing occurs.
-  */
-+#ifdef CONFIG_ADEOS_CORE
-+
-+static raw_spinlock_t __adeos_printk_lock = RAW_SPIN_LOCK_UNLOCKED;
-+
-+static int __adeos_printk_fill;
-+
-+static char __adeos_printk_buf[__LOG_BUF_LEN];
-+
-+void __adeos_flush_printk (unsigned virq)
-+{
-+      char *p = __adeos_printk_buf;
-+      int out = 0, len;
-+
-+      clear_bit(ADEOS_PPRINTK_FLAG,&adp_root->flags);
-+
-+      while (out < __adeos_printk_fill) {
-+              len = strlen(p) + 1;
-+              printk("%s",p);
-+              p += len;
-+              out += len;
-+      }
-+      __adeos_printk_fill = 0;
-+}
-+
-+asmlinkage int printk(const char *fmt, ...)
-+{
-+      unsigned long flags;
-+      int r, fbytes;
-+      va_list args;
-+
-+      va_start(args, fmt);
-+
-+      if (adp_current == adp_root ||
-+          test_bit(ADEOS_SPRINTK_FLAG,&adp_current->flags) ||
-+          oops_in_progress) {
-+              r = vprintk(fmt, args);
-+              goto out;
-+      }
-+
-+      adeos_spin_lock_irqsave(&__adeos_printk_lock,flags);
-+
-+      fbytes = __LOG_BUF_LEN - __adeos_printk_fill;
-+
-+      if (fbytes > 1) {
-+              r = vscnprintf(__adeos_printk_buf + __adeos_printk_fill,
-+                             fbytes, fmt, args) + 1; /* account for the null 
byte */
-+              __adeos_printk_fill += r;
-+      } else
-+              r = 0;
-+      
-+      adeos_spin_unlock_irqrestore(&__adeos_printk_lock,flags);
-+
-+      if (!test_and_set_bit(ADEOS_PPRINTK_FLAG,&adp_root->flags))
-+              adeos_trigger_irq(__adeos_printk_virq);
-+out: 
-+      va_end(args);
-+
-+      return r;
-+}
-+#else /* !CONFIG_ADEOS_CORE */
- asmlinkage int printk(const char *fmt, ...)
- {
-       va_list args;
-@@ -520,6 +580,7 @@
- 
-       return r;
- }
-+#endif /* CONFIG_ADEOS_CORE */
- 
- asmlinkage int vprintk(const char *fmt, va_list args)
- {
-diff -Nru linux-2.6.10/kernel/sched.c 
linux-2.6.10-adeos-ppc64-r2/kernel/sched.c
---- linux-2.6.10/kernel/sched.c        2004-12-24 23:35:24.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/kernel/sched.c 2005-10-05 10:34:53.000000000 
+0300
-@@ -302,7 +302,16 @@
-  * Default context-switch locking:
-  */
- #ifndef prepare_arch_switch
-+#ifdef CONFIG_ADEOS_CORE
-+#define prepare_arch_switch(rq,prev,next) \
-+do { \
-+    struct { struct task_struct *prev, *next; } arg = { (prev), (next) }; \
-+    __adeos_schedule_head(&arg); \
-+    adeos_hw_cli(); \
-+} while(0)
-+#else /* !CONFIG_ADEOS_CORE */
- # define prepare_arch_switch(rq, next)        do { } while (0)
-+#endif /* CONFIG_ADEOS_CORE */
- # define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
- # define task_running(rq, p)          ((rq)->curr == (p))
- #endif
-@@ -1367,6 +1376,9 @@
- 
-       if (current->set_child_tid)
-               put_user(current->pid, current->set_child_tid);
-+#ifdef CONFIG_ADEOS_CORE
-+      __adeos_enter_process();
-+#endif /* CONFIG_ADEOS_CORE */
- }
- 
- /*
-@@ -2535,6 +2547,11 @@
-       unsigned long run_time;
-       int cpu, idx;
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      if (adp_current != adp_root) /* Let's be helpful and conservative. */
-+          return;
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       /*
-        * Test if we are atomic.  Since do_exit() needs to call into
-        * schedule() atomically, we ignore that path for now.
-@@ -2684,9 +2701,28 @@
-               rq->curr = next;
-               ++*switch_count;
- 
--              prepare_arch_switch(rq, next);
-+#ifdef CONFIG_ADEOS_CORE
-+              prepare_arch_switch(rq, prev, next);
-+#else /* !CONFIG_ADEOS_CORE */
-+              prepare_arch_switch(rq, next);
-+#endif /* CONFIG_ADEOS_CORE */
-               prev = context_switch(rq, prev, next);
-               barrier();
-+#ifdef CONFIG_ADEOS_CORE
-+              if (adp_pipelined)
-+                  {
-+                  
__clear_bit(IPIPE_SYNC_FLAG,&adp_root->cpudata[task_cpu(current)].status);
-+                  adeos_hw_sti();
-+                  }
-+
-+              if (__adeos_schedule_tail(prev) > 0 || adp_current != adp_root)
-+                  /* Someone has just recycled the register set of
-+                     prev for running over a non-root domain, or
-+                     some event handler in the pipeline asked for a
-+                     truncated scheduling tail. Don't perform the
-+                     Linux housekeeping chores, at least not now. */
-+                  return;
-+#endif /* CONFIG_ADEOS_CORE */
- 
-               finish_task_switch(prev);
-       } else
-@@ -3148,6 +3184,16 @@
-       retval = security_task_setscheduler(p, policy, &lp);
-       if (retval)
-               goto out_unlock;
-+#ifdef CONFIG_ADEOS_CORE
-+      {
-+      struct { struct task_struct *task; int policy; struct sched_param 
*param; } evdata = { p, policy, &lp };
-+      if (__adeos_renice_process(&evdata))
-+          {
-+          retval = 0;
-+          goto out_unlock;
-+          }
-+      }
-+#endif /* CONFIG_ADEOS_CORE */
-       /*
-        * To be able to change p->policy safely, the apropriate
-        * runqueue lock must be held.
-@@ -4676,3 +4722,62 @@
- }
- 
- #endif /* CONFIG_MAGIC_SYSRQ */
-+
-+#ifdef CONFIG_ADEOS_CORE
-+
-+int __adeos_setscheduler_root (struct task_struct *p, int policy, int prio)
-+{
-+      prio_array_t *array;
-+      unsigned long flags;
-+      runqueue_t *rq;
-+      int oldprio;
-+
-+      if (prio < 1 || prio > MAX_RT_PRIO-1)
-+          return -EINVAL;
-+
-+      read_lock_irq(&tasklist_lock);
-+      rq = task_rq_lock(p, &flags);
-+      array = p->array;
-+      if (array)
-+              deactivate_task(p, rq);
-+      oldprio = p->prio;
-+      __setscheduler(p, policy, prio);
-+      if (array) {
-+              __activate_task(p, rq);
-+              if (task_running(rq, p)) {
-+                      if (p->prio > oldprio)
-+                              resched_task(rq->curr);
-+              } else if (TASK_PREEMPTS_CURR(p, rq))
-+                      resched_task(rq->curr);
-+      }
-+      task_rq_unlock(rq, &flags);
-+      read_unlock_irq(&tasklist_lock);
-+
-+      return 0;
-+}
-+
-+EXPORT_SYMBOL(__adeos_setscheduler_root);
-+
-+void __adeos_reenter_root (struct task_struct *prev,
-+                         int policy,
-+                         int prio)
-+{
-+      finish_task_switch(prev);
-+      if (reacquire_kernel_lock(current) < 0)
-+          ;
-+      preempt_enable_no_resched();
-+
-+      if (current->policy != policy || current->rt_priority != prio)
-+          __adeos_setscheduler_root(current,policy,prio);
-+}
-+
-+EXPORT_SYMBOL(__adeos_reenter_root);
-+
-+void __adeos_schedule_back_root (struct task_struct *prev)
-+{
-+    __adeos_reenter_root(prev,current->policy,current->rt_priority);
-+}
-+
-+EXPORT_SYMBOL(__adeos_schedule_back_root);
-+
-+#endif /* CONFIG_ADEOS_CORE */
-diff -Nru linux-2.6.10/kernel/signal.c 
linux-2.6.10-adeos-ppc64-r2/kernel/signal.c
---- linux-2.6.10/kernel/signal.c       2004-12-24 23:34:32.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/kernel/signal.c        2005-10-05 
10:34:53.000000000 +0300
-@@ -576,6 +576,13 @@
- 
-       set_tsk_thread_flag(t, TIF_SIGPENDING);
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      {
-+      struct { struct task_struct *t; } evdata = { t };
-+      __adeos_kick_process(&evdata);
-+      }
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       /*
-        * If resume is set, we want to wake it up in the TASK_STOPPED case.
-        * We don't check for TASK_STOPPED because there is a race with it
-@@ -823,6 +830,17 @@
-               BUG();
- #endif
- 
-+#ifdef CONFIG_ADEOS_CORE
-+      /* If some domain handler in the pipeline doesn't ask for
-+         propagation, return success pretending that 'sig' was
-+         delivered. */
-+      {
-+      struct { struct task_struct *task; int sig; } evdata = { t, sig };
-+      if (__adeos_signal_process(&evdata))
-+          goto out;
-+      }
-+#endif /* CONFIG_ADEOS_CORE */
-+
-       if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
-               /*
-                * Set up a return to indicate that we dropped the signal.
-diff -Nru linux-2.6.10/kernel/sysctl.c 
linux-2.6.10-adeos-ppc64-r2/kernel/sysctl.c
---- linux-2.6.10/kernel/sysctl.c       2004-12-24 23:33:59.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/kernel/sysctl.c        2005-10-05 
10:34:53.000000000 +0300
-@@ -946,6 +946,9 @@
- #ifdef CONFIG_PROC_FS
-       register_proc_table(root_table, proc_sys_root);
-       init_irq_proc();
-+#ifdef CONFIG_ADEOS_CORE
-+      __adeos_init_proc();
-+#endif /* CONFIG_ADEOS_CORE */
- #endif
- }
- 
-diff -Nru linux-2.6.10/Makefile linux-2.6.10-adeos-ppc64-r2/Makefile
---- linux-2.6.10/Makefile      2004-12-24 23:35:01.000000000 +0200
-+++ linux-2.6.10-adeos-ppc64-r2/Makefile       2005-10-05 10:34:53.000000000 
+0300
-@@ -558,6 +558,8 @@
- ifeq ($(KBUILD_EXTMOD),)
- core-y                += kernel/ mm/ fs/ ipc/ security/ crypto/
- 
-+core-$(CONFIG_ADEOS) += adeos/
-+
- vmlinux-dirs  := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
-                    $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-                    $(net-y) $(net-m) $(libs-y) $(libs-m)))
diff -Nru --exclude=.svn xenomai-orig/arch/ppc64/patches/README 
xenomai-devel/arch/ppc64/patches/README
--- xenomai-orig/arch/ppc64/patches/README      2005-10-11 10:32:31.000000000 
+0300
+++ xenomai-devel/arch/ppc64/patches/README     1970-01-01 02:00:00.000000000 
+0200
@@ -1,17 +0,0 @@
--- arch/ppc64/patches
-
-Xenomai needs special kernel support to deliver fast and
-deterministic response time to external interrupts, and also to
-provide real-time services highly integrated with the standard Linux
-kernel.
-
-This support is provided by the Adeos real-time enabler [1], in the
-form of a kernel patch you have to apply to a vanilla kernel tree,
-before you attempt to compile the Xenomai codebase against the
-latter kernel.
-
-Apply one of the patches found in this directory to the corresponding
-kernel release. You may want to have a look at the README.*INSTALL
-guides at the top of the Xenomai tree for more information.
-
-[1] http://www.gna.org/projects/adeos/

Reply via email to