A new Adeos patch for the 2.6.10 ppc64 (r3) with minor paranoia reduction.
Also from:

http://www.cs.helsinki.fi/group/nonsto/rtaippc64.html

-- Heikki Lindholm

diff -Nru linux-2.6.10/adeos/generic.c 
linux-2.6.10-adeos-ppc64-r3/adeos/generic.c
--- linux-2.6.10/adeos/generic.c        1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/adeos/generic.c 2005-11-13 11:45:31.000000000 
+0200
@@ -0,0 +1,640 @@
+/*
+ *   linux/adeos/generic.c
+ *
+ *   Copyright (C) 2002 Philippe Gerum.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ *   Architecture-independent ADEOS services.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+
+MODULE_DESCRIPTION("Adeos nanokernel");
+MODULE_AUTHOR("Philippe Gerum");
+MODULE_LICENSE("GPL");
+
+/* adeos_register_domain() -- Add a new domain to the system. All
+   client domains must call this routine to register themselves to
+   ADEOS before using its services. */
+
+int adeos_register_domain (adomain_t *adp, adattr_t *attr)
+
+{
+    struct list_head *pos;
+    unsigned long flags;
+    int n;
+
+    if (adp_current != adp_root)
+       {
+       printk(KERN_WARNING "Adeos: Only the root domain may register a new 
domain.\n");
+       return -EPERM;
+       }
+
+    flags = adeos_critical_enter(NULL);
+
+    list_for_each(pos,&__adeos_pipeline) {
+       adomain_t *_adp = list_entry(pos,adomain_t,p_link);
+       if (_adp->domid == attr->domid)
+            break;
+    }
+
+    adeos_critical_exit(flags);
+
+    if (pos != &__adeos_pipeline)
+       /* A domain with the given id already exists -- fail. */
+       return -EBUSY;
+
+    for (n = 0; n < ADEOS_NR_CPUS; n++)
+       {
+       /* Each domain starts in sleeping state on every CPU. */
+       adp->cpudata[n].status = (1 << IPIPE_SLEEP_FLAG);
+#ifdef CONFIG_ADEOS_THREADS
+       adp->estackbase[n] = 0;
+#endif /* CONFIG_ADEOS_THREADS */
+       }
+
+    adp->name = attr->name;
+    adp->priority = attr->priority;
+    adp->domid = attr->domid;
+    adp->dswitch = attr->dswitch;
+    adp->flags = 0;
+    adp->ptd_setfun = attr->ptdset;
+    adp->ptd_getfun = attr->ptdget;
+    adp->ptd_keymap = 0;
+    adp->ptd_keycount = 0;
+    adp->ptd_keymax = attr->nptdkeys;
+
+    for (n = 0; n < ADEOS_NR_EVENTS; n++)
+       /* Event handlers must be cleared before the i-pipe stage is
+          inserted since an exception may occur on behalf of the new
+          emerging domain. */
+       adp->events[n].handler = NULL;
+
+    if (attr->entry != NULL)
+       __adeos_init_domain(adp,attr);
+
+    /* Insert the domain in the interrupt pipeline last, so it won't
+       be resumed for processing interrupts until it has a valid stack
+       context. */
+
+    __adeos_init_stage(adp);
+
+    INIT_LIST_HEAD(&adp->p_link);
+
+    flags = adeos_critical_enter(NULL);
+
+    list_for_each(pos,&__adeos_pipeline) {
+       adomain_t *_adp = list_entry(pos,adomain_t,p_link);
+       if (adp->priority > _adp->priority)
+            break;
+    }
+
+    list_add_tail(&adp->p_link,pos);
+
+    adeos_critical_exit(flags);
+
+    printk(KERN_WARNING "Adeos: Domain %s registered.\n",adp->name);
+
+    /* Finally, allow the new domain to perform its initialization
+       chores. */
+
+    if (attr->entry != NULL)
+       {
+       adeos_declare_cpuid;
+
+       adeos_lock_cpu(flags);
+
+#ifdef CONFIG_ADEOS_THREADS
+       __adeos_switch_to(adp_root,adp,cpuid);
+#else /* !CONFIG_ADEOS_THREADS */
+       adp_cpu_current[cpuid] = adp;
+       attr->entry(1);
+       adp_cpu_current[cpuid] = adp_root;
+#endif /* CONFIG_ADEOS_THREADS */
+
+       adeos_load_cpuid();     /* Processor might have changed. */
+
+       if (adp_root->cpudata[cpuid].irq_pending_hi != 0 &&
+           !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status))
+           __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+
+       adeos_unlock_cpu(flags);
+       }
+
+    return 0;
+}
+
+/* adeos_unregister_domain() -- Remove a domain from the system. All
+   client domains must call this routine to unregister themselves from
+   the ADEOS layer. */
+
+int adeos_unregister_domain (adomain_t *adp)
+
+{
+    unsigned long flags;
+    unsigned event;
+
+    if (adp_current != adp_root)
+       {
+       printk(KERN_WARNING "Adeos: Only the root domain may unregister a 
domain.\n");
+       return -EPERM;
+       }
+
+    if (adp == adp_root)
+       {
+       printk(KERN_WARNING "Adeos: Cannot unregister the root domain.\n");
+       return -EPERM;
+       }
+
+    for (event = 0; event < ADEOS_NR_EVENTS; event++)
+       /* Need this to update the monitor count. */
+       adeos_catch_event_from(adp,event,NULL);
+
+#ifdef CONFIG_SMP
+    {
+    int nr_cpus = num_online_cpus(), _cpuid;
+    unsigned irq;
+
+    /* In the SMP case, wait for the logged events to drain on other
+       processors before eventually removing the domain from the
+       pipeline. */
+
+    adeos_unstall_pipeline_from(adp);
+
+    flags = adeos_critical_enter(NULL);
+
+    for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
+       {
+       clear_bit(IPIPE_HANDLE_FLAG,&adp->irqs[irq].control);
+       clear_bit(IPIPE_STICKY_FLAG,&adp->irqs[irq].control);
+       set_bit(IPIPE_PASS_FLAG,&adp->irqs[irq].control);
+       }
+
+    adeos_critical_exit(flags);
+
+    for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)
+       {
+       for (irq = 0; irq < IPIPE_NR_IRQS; irq++)
+           while (adp->cpudata[_cpuid].irq_hits[irq] > 0)
+               cpu_relax();
+
+       while (test_bit(IPIPE_XPEND_FLAG,&adp->cpudata[_cpuid].status))
+           cpu_relax();
+
+       while (!test_bit(IPIPE_SLEEP_FLAG,&adp->cpudata[_cpuid].status))
+            cpu_relax();
+       }
+    }
+#endif /* CONFIG_SMP */
+
+    /* Simply remove the domain from the pipeline and we are almost
+       done. */
+
+    flags = adeos_critical_enter(NULL);
+    list_del_init(&adp->p_link);
+    adeos_critical_exit(flags);
+
+    __adeos_cleanup_domain(adp);
+
+    printk(KERN_WARNING "Adeos: Domain %s unregistered.\n",adp->name);
+
+    return 0;
+}
+
+/* adeos_propagate_irq() -- Force a given IRQ propagation on behalf of
+   a running interrupt handler to the next domain down the pipeline.
+   Returns non-zero if a domain has received the interrupt
+   notification, zero otherwise.
+   This call is useful for handling shared interrupts among domains.
+   e.g. pipeline = [domain-A]---[domain-B]...
+   Both domains share IRQ #X.
+   - domain-A handles IRQ #X but does not pass it down (i.e. Terminate
+   or Dynamic interrupt control mode)
+   - domain-B handles IRQ #X (i.e. Terminate or Accept interrupt
+   control modes).
+   When IRQ #X is raised, domain-A's handler determines whether it
+   should process the interrupt by identifying its source. If not,
+   adeos_propagate_irq() is called so that the next domain down the
+   pipeline which handles IRQ #X is given a chance to process it. This
+   process can be repeated until the end of the pipeline is
+   reached. */
+
+/* adeos_schedule_irq() -- Almost the same as adeos_propagate_irq(),
+   but attempts to pend the interrupt for the current domain first. */
+
+int fastcall __adeos_schedule_irq (unsigned irq, struct list_head *head)
+
+{
+    struct list_head *ln;
+    unsigned long flags;
+    adeos_declare_cpuid;
+
+    if (irq >= IPIPE_NR_IRQS ||
+       (adeos_virtual_irq_p(irq) && !test_bit(irq - 
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map)))
+       return -EINVAL;
+
+    adeos_lock_cpu(flags);
+
+    ln = head;
+
+    while (ln != &__adeos_pipeline)
+       {
+       adomain_t *adp = list_entry(ln,adomain_t,p_link);
+
+       if (test_bit(IPIPE_HANDLE_FLAG,&adp->irqs[irq].control))
+           {
+           adp->cpudata[cpuid].irq_hits[irq]++;
+           __adeos_set_irq_bit(adp,cpuid,irq);
+           adeos_unlock_cpu(flags);
+           return 1;
+           }
+
+       ln = adp->p_link.next;
+       }
+
+    adeos_unlock_cpu(flags);
+
+    return 0;
+}
+
+/* adeos_free_irq() -- Return a previously allocated virtual/soft
+   pipelined interrupt to the pool of allocatable interrupts. */
+
+int adeos_free_irq (unsigned irq)
+
+{
+    if (irq >= IPIPE_NR_IRQS)
+       return -EINVAL;
+
+    clear_bit(irq - IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map);
+
+    return 0;
+}
+
+cpumask_t adeos_set_irq_affinity (unsigned irq, cpumask_t cpumask)
+
+{
+#ifdef CONFIG_SMP
+     if (irq >= IPIPE_NR_XIRQS)
+        /* Allow changing affinity of external IRQs only. */
+        return CPU_MASK_NONE;
+
+     if (num_online_cpus() > 1)
+        /* Allow changing affinity of external IRQs only. */
+        return __adeos_set_irq_affinity(irq,cpumask);
+#endif /* CONFIG_SMP */
+
+    return CPU_MASK_NONE;
+}
+
+/* adeos_catch_event_from() -- Interpose an event handler starting
+   from a given domain. */
+
+adevhand_t adeos_catch_event_from (adomain_t *adp, unsigned event, adevhand_t 
handler)
+
+{
+    adevhand_t oldhandler;
+
+    if (event >= ADEOS_NR_EVENTS)
+       return NULL;
+
+    if ((oldhandler = (adevhand_t)xchg(&adp->events[event].handler,handler)) 
== NULL)
+       {
+       if (handler)
+           __adeos_event_monitors[event]++;
+       }
+    else if (!handler)
+       __adeos_event_monitors[event]--;
+
+    return oldhandler;
+}
+
+void adeos_init_attr (adattr_t *attr)
+
+{
+    attr->name = "Anonymous";
+    attr->domid = 1;
+    attr->entry = NULL;
+    attr->estacksz = 0;        /* Let ADEOS choose a reasonable stack size */
+    attr->priority = ADEOS_ROOT_PRI;
+    attr->dswitch = NULL;
+    attr->nptdkeys = 0;
+    attr->ptdset = NULL;
+    attr->ptdget = NULL;
+}
+
+int adeos_alloc_ptdkey (void)
+
+{
+    unsigned long flags;
+    int key = -1;
+
+    spin_lock_irqsave_hw(&__adeos_pipelock,flags);
+
+    if (adp_current->ptd_keycount < adp_current->ptd_keymax)
+       {
+       key = ffz(adp_current->ptd_keymap);
+       set_bit(key,&adp_current->ptd_keymap);
+       adp_current->ptd_keycount++;
+       }
+
+    spin_unlock_irqrestore_hw(&__adeos_pipelock,flags);
+
+    return key;
+}
+
+int adeos_free_ptdkey (int key)
+
+{
+    unsigned long flags; 
+
+    if (key < 0 || key >= adp_current->ptd_keymax)
+       return -EINVAL;
+
+    spin_lock_irqsave_hw(&__adeos_pipelock,flags);
+
+    if (test_and_clear_bit(key,&adp_current->ptd_keymap))
+       adp_current->ptd_keycount--;
+
+    spin_unlock_irqrestore_hw(&__adeos_pipelock,flags);
+
+    return 0;
+}
+
+int adeos_set_ptd (int key, void *value)
+
+{
+    if (key < 0 || key >= adp_current->ptd_keymax)
+       return -EINVAL;
+
+    if (!adp_current->ptd_setfun)
+       {
+       printk(KERN_WARNING "Adeos: No ptdset hook for %s\n",adp_current->name);
+       return -EINVAL;
+       }
+
+    adp_current->ptd_setfun(key,value);
+
+    return 0;
+}
+
+void *adeos_get_ptd (int key)
+
+{
+    if (key < 0 || key >= adp_current->ptd_keymax)
+       return NULL;
+
+    if (!adp_current->ptd_getfun)
+       {
+       printk(KERN_WARNING "Adeos: No ptdget hook for %s\n",adp_current->name);
+       return NULL;
+       }
+
+    return adp_current->ptd_getfun(key);
+}
+
+int adeos_init_mutex (admutex_t *mutex)
+
+{
+    admutex_t initm = ADEOS_MUTEX_UNLOCKED;
+    *mutex = initm;
+    return 0;
+}
+
+#ifdef CONFIG_ADEOS_THREADS
+
+int adeos_destroy_mutex (admutex_t *mutex)
+
+{
+    if (!adeos_spin_trylock(&mutex->lock) &&
+       adp_current != adp_root &&
+       mutex->owner != adp_current)
+       return -EBUSY;
+
+    return 0;
+}
+
+static inline void __adeos_sleepon_mutex (admutex_t *mutex, adomain_t 
*sleeper, int cpuid)
+
+{
+    adomain_t *owner = mutex->owner;
+
+    /* Make the current domain (== sleeper) wait for the mutex to be
+       released. Adeos' pipelined scheme guarantees that the new
+       sleeper _is_ higher priority than any aslept domain since we
+       have stalled each sleeper's stage. Must be called with local hw
+       interrupts off. */
+
+    sleeper->m_link = mutex->sleepq;
+    mutex->sleepq = sleeper;
+    __adeos_switch_to(adp_cpu_current[cpuid],owner,cpuid);
+    mutex->owner = sleeper;
+    adeos_spin_unlock(&mutex->lock);
+}
+
+unsigned long fastcall adeos_lock_mutex (admutex_t *mutex)
+
+{
+    unsigned long flags, hwflags;
+    adeos_declare_cpuid;
+    adomain_t *adp;
+
+    if (!adp_pipelined)
+       {
+       adeos_hw_local_irq_save(hwflags);
+       flags = !adeos_hw_test_iflag(hwflags);
+       adeos_spin_lock(&mutex->lock);
+       return flags;
+       }
+
+    adeos_lock_cpu(hwflags);
+
+    adp = adp_cpu_current[cpuid];
+
+    flags = __test_and_set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+
+    /* Two cases to handle here on SMP systems, only one for UP: 1) in
+       case of a conflicting access from a higher priority domain
+       running on the same cpu, make this domain sleep on the mutex,
+       and resume the current owner so it can release the lock asap.
+       2) in case of a conflicting access from any domain on a
+       different cpu than the current owner's, simply enter a spinning
+       loop. Note that testing mutex->owncpu is safe since it is only
+       changed by the current owner, and set to -1 when the mutex is
+       unlocked. */
+
+#ifdef CONFIG_SMP
+    while (!adeos_spin_trylock(&mutex->lock))
+       {
+       if (mutex->owncpu == cpuid)
+           {
+           __adeos_sleepon_mutex(mutex,adp,cpuid);
+           adeos_load_cpuid();
+           }
+       }
+
+    mutex->owncpu = cpuid;
+#else  /* !CONFIG_SMP */
+    while (mutex->owner != NULL && mutex->owner != adp)
+       __adeos_sleepon_mutex(mutex,adp,cpuid);
+#endif /* CONFIG_SMP */
+
+    mutex->owner = adp;
+
+    adeos_unlock_cpu(hwflags);
+
+    return flags;
+}
+
+void fastcall adeos_unlock_mutex (admutex_t *mutex, unsigned long flags)
+
+{
+    unsigned long hwflags;
+    adeos_declare_cpuid;
+    adomain_t *adp;
+
+    if (!adp_pipelined)
+       {
+       adeos_spin_unlock(&mutex->lock);
+
+       if (flags)
+           adeos_hw_cli();
+       else
+           adeos_hw_sti();
+
+       return;
+       }
+
+#ifdef CONFIG_SMP
+    mutex->owncpu = -1;
+#endif /* CONFIG_SMP */
+
+    if (!flags)
+       adeos_hw_sti(); /* Absolutely needed. */
+       
+    adeos_lock_cpu(hwflags);
+
+    if (mutex->sleepq != NULL)
+       {
+       adomain_t *sleeper = mutex->sleepq;
+       /* Wake up the highest priority sleeper. */
+       mutex->sleepq = sleeper->m_link;
+       __adeos_switch_to(adp_cpu_current[cpuid],sleeper,cpuid);
+       adeos_load_cpuid();
+       }
+    else
+       {
+       mutex->owner = NULL;
+       adeos_spin_unlock(&mutex->lock);
+       }
+
+    adp = adp_cpu_current[cpuid];
+
+    if (flags)
+       __set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+    else
+       {
+       __clear_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+       
+       if (adp->cpudata[cpuid].irq_pending_hi != 0)
+           __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+       }
+
+    adeos_unlock_cpu(hwflags);
+}
+
+#else /* !CONFIG_ADEOS_THREADS */
+
+int adeos_destroy_mutex (admutex_t *mutex)
+
+{
+    if (!adeos_spin_trylock(&mutex->lock) &&
+       adp_current != adp_root)
+       return -EBUSY;
+
+    return 0;
+}
+
+unsigned long fastcall adeos_lock_mutex (admutex_t *mutex)
+
+{
+    unsigned long flags; /* FIXME: won't work on SPARC */
+    spin_lock_irqsave_hw(&mutex->lock,flags);
+    return flags;
+}
+
+void fastcall adeos_unlock_mutex (admutex_t *mutex, unsigned long flags)
+
+{
+    spin_unlock_irqrestore_hw(&mutex->lock,flags);
+}
+
+#endif /* CONFIG_ADEOS_THREADS */
+
+void __adeos_takeover (void)
+
+{
+    __adeos_enable_pipeline();
+    printk(KERN_WARNING "Adeos: Pipelining started.\n");
+}
+
+#ifdef MODULE
+
+static int __init adeos_init_module (void)
+
+{
+    __adeos_takeover();
+    return 0;
+}
+
+static void __exit adeos_exit_module (void)
+
+{
+    __adeos_disable_pipeline();
+    printk(KERN_WARNING "Adeos: Pipelining stopped.\n");
+}
+
+module_init(adeos_init_module);
+module_exit(adeos_exit_module);
+
+#endif /* MODULE */
+
+EXPORT_SYMBOL(adeos_register_domain);
+EXPORT_SYMBOL(adeos_unregister_domain);
+EXPORT_SYMBOL(adeos_virtualize_irq_from);
+EXPORT_SYMBOL(adeos_control_irq);
+EXPORT_SYMBOL(__adeos_schedule_irq);
+EXPORT_SYMBOL(adeos_free_irq);
+EXPORT_SYMBOL(adeos_send_ipi);
+EXPORT_SYMBOL(adeos_catch_event_from);
+EXPORT_SYMBOL(adeos_init_attr);
+EXPORT_SYMBOL(adeos_get_sysinfo);
+EXPORT_SYMBOL(adeos_tune_timer);
+EXPORT_SYMBOL(adeos_alloc_ptdkey);
+EXPORT_SYMBOL(adeos_free_ptdkey);
+EXPORT_SYMBOL(adeos_set_ptd);
+EXPORT_SYMBOL(adeos_get_ptd);
+EXPORT_SYMBOL(adeos_set_irq_affinity);
+EXPORT_SYMBOL(adeos_init_mutex);
+EXPORT_SYMBOL(adeos_destroy_mutex);
+EXPORT_SYMBOL(adeos_lock_mutex);
+EXPORT_SYMBOL(adeos_unlock_mutex);
diff -Nru linux-2.6.10/adeos/Kconfig linux-2.6.10-adeos-ppc64-r3/adeos/Kconfig
--- linux-2.6.10/adeos/Kconfig  1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/adeos/Kconfig   2005-11-13 11:45:31.000000000 
+0200
@@ -0,0 +1,40 @@
+menu "Adeos support"
+
+config ADEOS
+       tristate "Adeos support"
+       default y
+       ---help---
+         Activate this option if you want the Adeos nanokernel to be
+         compiled in.
+
+config ADEOS_CORE
+       def_bool ADEOS
+
+config ADEOS_THREADS
+       bool "Threaded domains"
+       depends on ADEOS && !PPC64
+       default y
+       ---help---
+         This option causes the domains to run as lightweight
+         threads, which is useful for having seperate stacks
+         for domains. Enabling this option is the safest setting for
+         now; disabling it causes an experimental mode to be used
+         where interrupts/events are directly processed on behalf of
+         the preempted context. Say Y if unsure.
+
+config ADEOS_NOTHREADS
+       def_bool !ADEOS_THREADS
+
+config ADEOS_PROFILING
+       bool "Pipeline profiling"
+       depends on ADEOS
+       default n
+       ---help---
+         This option activates the profiling code which collects the
+         timestamps needed to measure the propagation time of
+         interrupts through the pipeline. Say N if unsure.
+
+config ADEOS_PREEMPT_RT
+       def_bool PREEMPT_NONE || PREEMPT_VOLUNTARY || PREEMPT_DESKTOP || 
PREEMPT_RT
+
+endmenu
diff -Nru linux-2.6.10/adeos/Makefile linux-2.6.10-adeos-ppc64-r3/adeos/Makefile
--- linux-2.6.10/adeos/Makefile 1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/adeos/Makefile  2005-11-13 11:45:31.000000000 
+0200
@@ -0,0 +1,15 @@
+#
+# Makefile for the Adeos layer.
+#
+
+obj-$(CONFIG_ADEOS)    += adeos.o
+
+adeos-objs             := generic.o
+
+adeos-$(CONFIG_X86)    += x86.o
+
+adeos-$(CONFIG_IA64)   += ia64.o
+
+adeos-$(CONFIG_PPC32)  += ppc.o
+
+adeos-$(CONFIG_PPC64)  += ppc64.o
diff -Nru linux-2.6.10/adeos/ppc64.c linux-2.6.10-adeos-ppc64-r3/adeos/ppc64.c
--- linux-2.6.10/adeos/ppc64.c  1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/adeos/ppc64.c   2005-11-13 11:45:31.000000000 
+0200
@@ -0,0 +1,527 @@
+/*
+ *   linux/adeos/ppc64.c
+ *  
+ *   Adeos 64-bit PowerPC adoption 
+ *   Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ *   based on previous work:
+ * 
+ *   Copyright (C) 2004 Philippe Gerum.
+ * 
+ *   Adeos/PPC port over 2.6 based on the previous 2.4 implementation by:
+ *
+ *   Copyright (C) 2004 Wolfgang Grandegger.
+ *
+ *   It follows closely the ARM and x86 ports of ADEOS.
+ *
+ *   Copyright (C) 2003 Philippe Gerum.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ *   Architecture-dependent ADEOS support for PowerPC.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <asm/system.h>
+#include <asm/hw_irq.h>
+#include <asm/irq.h>
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/time.h>
+#include <asm/cputable.h> /* cur_cpu_spec & CPU_FTR* */
+#include <asm/mmu_context.h> /* get_kernel_vsid */
+
+extern spinlock_t __adeos_pipelock;
+
+extern unsigned long __adeos_virtual_irq_map;
+
+extern struct list_head __adeos_pipeline;
+
+extern irq_desc_t irq_desc[];
+
+static struct hw_interrupt_type __adeos_std_irq_dtype[NR_IRQS];
+
+/*
+ * Check NULLs when calling dtype[].X ?
+ *  (.end)
+ */
+
+static void __adeos_override_irq_enable (unsigned irq)
+
+{
+    unsigned long adflags, hwflags;
+    adeos_declare_cpuid;
+
+    adeos_hw_local_irq_save(hwflags);
+    adflags = adeos_test_and_stall_pipeline();
+    preempt_disable();
+    __adeos_unlock_irq(adp_cpu_current[cpuid],irq);
+    __adeos_std_irq_dtype[irq].enable(irq);
+    preempt_enable_no_resched();
+    adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
+    adeos_hw_local_irq_restore(hwflags);
+}
+
+static void __adeos_override_irq_disable (unsigned irq)
+
+{
+    unsigned long adflags, hwflags;
+    adeos_declare_cpuid;
+
+    adeos_hw_local_irq_save(hwflags);
+    adflags = adeos_test_and_stall_pipeline();
+    preempt_disable();
+    __adeos_std_irq_dtype[irq].disable(irq);
+    __adeos_lock_irq(adp_cpu_current[cpuid],cpuid,irq);
+    preempt_enable_no_resched();
+    adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
+    adeos_hw_local_irq_restore(hwflags);
+}
+
+static void __adeos_override_irq_end (unsigned irq)
+
+{
+    unsigned long adflags, hwflags;
+    adeos_declare_cpuid;
+
+    adeos_hw_local_irq_save(hwflags);
+    adflags = adeos_test_and_stall_pipeline();
+    preempt_disable();
+
+    if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+       __adeos_unlock_irq(adp_cpu_current[cpuid],irq);
+
+    __adeos_std_irq_dtype[irq].end(irq);
+
+    preempt_enable_no_resched();
+    adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
+    adeos_hw_local_irq_restore(hwflags);
+}
+
+static void __adeos_override_irq_affinity (unsigned irq, cpumask_t mask)
+
+{
+    unsigned long adflags, hwflags;
+    adeos_declare_cpuid;
+
+    adeos_hw_local_irq_save(hwflags);
+    adflags = adeos_test_and_stall_pipeline();
+    preempt_disable();
+    __adeos_std_irq_dtype[irq].set_affinity(irq,mask);
+    preempt_enable_no_resched();
+    adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
+    adeos_hw_local_irq_restore(hwflags);
+}
+
+static void  __adeos_enable_sync (void)
+
+{
+    __adeos_decr_next[adeos_processor_id()] = __adeos_read_timebase() + 
get_dec();
+}
+
+/* __adeos_enable_pipeline() -- Take over the interrupt control from
+   the root domain (i.e. Linux). After this routine has returned, all
+   interrupts go through the pipeline. */
+
+void __adeos_enable_pipeline (void)
+
+{
+    unsigned long flags;
+    unsigned irq;
+
+    flags = adeos_critical_enter(&__adeos_enable_sync);
+
+    /* First, virtualize all interrupts from the root domain. */
+
+    for (irq = 0; irq < NR_IRQS; irq++)
+       adeos_virtualize_irq(irq,
+                            (void (*)(unsigned))&__adeos_do_IRQ,
+                            &__adeos_ack_irq,
+                            IPIPE_HANDLE_MASK|IPIPE_PASS_MASK);
+
+    /* We use a virtual IRQ to handle the timer irq (decrementer trap)
+       which has been allocated early in __adeos_init_platform(). */
+
+    adeos_virtualize_irq(ADEOS_TIMER_VIRQ,
+                        (void (*)(unsigned))&__adeos_do_timer,
+                        NULL,
+                        IPIPE_HANDLE_MASK|IPIPE_PASS_MASK);
+  
+
+    /* Interpose on the IRQ control routines so we can make them
+       atomic using hw masking and prevent the interrupt log from
+       being untimely flushed. */
+
+    for (irq = 0; irq < NR_IRQS; irq++) 
+       {
+       if (irq_desc[irq].handler != NULL)
+           __adeos_std_irq_dtype[irq] = *irq_desc[irq].handler;
+       }
+
+    /* The original controller structs are often shared, so we first
+       save them all before changing any of them. Notice that we don't
+       override the ack() handler since we will enforce the necessary
+       setup in __adeos_ack_irq(). */
+
+    for (irq = 0; irq < NR_IRQS; irq++)
+       {
+       struct hw_interrupt_type *handler = irq_desc[irq].handler;
+
+       if (handler == NULL)
+           continue;
+
+       if (handler->enable != NULL)
+           handler->enable = &__adeos_override_irq_enable;
+
+       if (handler->disable != NULL)
+           handler->disable = &__adeos_override_irq_disable;
+
+       if (handler->end != NULL)
+           handler->end = &__adeos_override_irq_end;
+
+       if (handler->set_affinity != NULL)
+           handler->set_affinity = &__adeos_override_irq_affinity;
+       }
+
+    __adeos_decr_next[adeos_processor_id()] = __adeos_read_timebase() + 
get_dec();
+
+    adp_pipelined = 1;
+
+    adeos_critical_exit(flags);
+}
+
+/* __adeos_disable_pipeline() -- Disengage the pipeline. */
+
+void __adeos_disable_pipeline (void)
+
+{
+    unsigned long flags;
+    unsigned irq;
+
+    flags = adeos_critical_enter(NULL);
+
+    /* Restore interrupt controllers. */
+
+    for (irq = 0; irq < NR_IRQS; irq++)
+       {
+       if (irq_desc[irq].handler != NULL)
+           *irq_desc[irq].handler = __adeos_std_irq_dtype[irq];
+       }
+
+    adp_pipelined = 0;
+
+    adeos_critical_exit(flags);
+}
+
+/* adeos_virtualize_irq_from() -- Attach a handler (and optionally a
+   hw acknowledge routine) to an interrupt for the given domain. */
+
+int adeos_virtualize_irq_from (adomain_t *adp,
+                              unsigned irq,
+                              void (*handler)(unsigned irq),
+                              int (*acknowledge)(unsigned irq),
+                              unsigned modemask)
+{
+    unsigned long flags;
+    int err;
+
+    if (irq >= IPIPE_NR_IRQS)
+       return -EINVAL;
+
+    if (adp->irqs[irq].control & IPIPE_SYSTEM_MASK)
+       return -EPERM;
+       
+    adeos_spin_lock_irqsave(&__adeos_pipelock,flags);
+
+    if (handler != NULL)
+       {
+       /* A bit of hack here: if we are re-virtualizing an IRQ just
+          to change the acknowledge routine by passing the special
+          ADEOS_SAME_HANDLER value, then allow to recycle the current
+          handler for the IRQ. This allows Linux device drivers
+          managing shared IRQ lines to call adeos_virtualize_irq() in
+          addition to request_irq() just for the purpose of
+          interposing their own shared acknowledge routine. */
+
+       if (handler == ADEOS_SAME_HANDLER)
+           {
+           handler = adp->irqs[irq].handler;
+
+           if (handler == NULL)
+               {
+               err = -EINVAL;
+               goto unlock_and_exit;
+               }
+           }
+       else if ((modemask & IPIPE_EXCLUSIVE_MASK) != 0 &&
+                adp->irqs[irq].handler != NULL)
+           {
+           err = -EBUSY;
+           goto unlock_and_exit;
+           }
+       
+       if ((modemask & (IPIPE_SHARED_MASK|IPIPE_PASS_MASK)) == 
IPIPE_SHARED_MASK)
+           {
+           err = -EINVAL;
+           goto unlock_and_exit;
+           }
+
+       if ((modemask & IPIPE_STICKY_MASK) != 0)
+           modemask |= IPIPE_HANDLE_MASK;
+       }
+    else
+       modemask &= ~(IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SHARED_MASK);
+
+    if (acknowledge == NULL)
+       {
+       if ((modemask & IPIPE_SHARED_MASK) == 0)
+           /* Acknowledge handler unspecified -- this is ok in
+              non-shared management mode, but we will force the use
+              of the Linux-defined handler instead. */
+           acknowledge = adp_root->irqs[irq].acknowledge;
+       else
+           {
+           /* A valid acknowledge handler to be called in shared mode
+              is required when declaring a shared IRQ. */
+           err = -EINVAL;
+           goto unlock_and_exit;
+           }
+       }
+
+    adp->irqs[irq].handler = handler;
+    adp->irqs[irq].acknowledge = acknowledge;
+    adp->irqs[irq].control = modemask;
+
+    if (irq < NR_IRQS &&
+       handler != NULL &&
+       !adeos_virtual_irq_p(irq) &&
+       (modemask & IPIPE_ENABLE_MASK) != 0)
+       {
+       if (adp != adp_current)
+           {
+           /* IRQ enable/disable state is domain-sensitive, so we may
+              not change it for another domain. What is allowed
+              however is forcing some domain to handle an interrupt
+              source, by passing the proper 'adp' descriptor which
+              thus may be different from adp_current. */
+           err = -EPERM;
+           goto unlock_and_exit;
+           }
+
+       enable_irq(irq);
+       }
+
+    err = 0;
+
+unlock_and_exit:
+
+    adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);
+
+    return err;
+}
+
+/* adeos_control_irq() -- Change an interrupt mode. This affects the
+   way a given interrupt is handled by ADEOS for the current
+   domain. setmask is a bitmask telling whether:
+   - the interrupt should be passed to the domain (IPIPE_HANDLE_MASK),
+     and/or
+   - the interrupt should be passed down to the lower priority domain(s)
+     in the pipeline (IPIPE_PASS_MASK).
+   This leads to four possibilities:
+   - PASS only => Ignore the interrupt
+   - HANDLE only => Terminate the interrupt (process but don't pass down)
+   - PASS + HANDLE => Accept the interrupt (process and pass down)
+   - <none> => Discard the interrupt
+   - DYNAMIC is currently an alias of HANDLE since it marks an interrupt
+   which is processed by the current domain but not implicitely passed
+   down to the pipeline, letting the domain's handler choose on a case-
+   by-case basis whether the interrupt propagation should be forced
+   using adeos_propagate_irq().
+   clrmask clears the corresponding bits from the control field before
+   setmask is applied.
+*/
+
+int adeos_control_irq (unsigned irq,
+                      unsigned clrmask,
+                      unsigned setmask)
+{
+    irq_desc_t *desc;
+    unsigned long flags;
+
+    if (irq >= IPIPE_NR_IRQS)
+       return -EINVAL;
+
+    if (adp_current->irqs[irq].control & IPIPE_SYSTEM_MASK)
+       return -EPERM;
+       
+    if (((setmask|clrmask) & IPIPE_SHARED_MASK) != 0)
+       return -EINVAL;
+       
+    desc = irq_desc + irq;
+
+    if (adp_current->irqs[irq].handler == NULL)
+       setmask &= ~(IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK);
+
+    if ((setmask & IPIPE_STICKY_MASK) != 0)
+       setmask |= IPIPE_HANDLE_MASK;
+
+    if ((clrmask & (IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK)) != 0)        /* If 
one goes, both go. */
+       clrmask |= (IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK);
+
+    adeos_spin_lock_irqsave(&__adeos_pipelock,flags);
+
+    adp_current->irqs[irq].control &= ~clrmask;
+    adp_current->irqs[irq].control |= setmask;
+
+    if ((setmask & IPIPE_ENABLE_MASK) != 0)
+       enable_irq(irq);
+    else if ((clrmask & IPIPE_ENABLE_MASK) != 0)
+       disable_irq(irq);
+
+    adeos_spin_unlock_irqrestore(&__adeos_pipelock,flags);
+
+    return 0;
+}
+
+#ifdef CONFIG_ADEOS_THREADS
+
+void __adeos_init_domain (adomain_t *adp, adattr_t *attr)
+
+{
+    int estacksz = attr->estacksz > 0 ? attr->estacksz : 16384, _cpuid;
+    unsigned long flags, *ksp;
+    adeos_declare_cpuid;
+
+    adeos_hw_local_irq_flags(flags);
+
+    for (_cpuid = 0; _cpuid < num_online_cpus(); _cpuid++)
+       {
+       adp->estackbase[_cpuid] = (unsigned long)kmalloc(estacksz,GFP_KERNEL);
+    
+       if (adp->estackbase[_cpuid] == 0)
+           panic("Adeos: No memory for domain stack on CPU #%d",_cpuid);
+       
+       adp->esp[_cpuid] = adp->estackbase[_cpuid];
+       ksp = (unsigned long *)((adp->esp[_cpuid] + estacksz - 16) & ~0xf);
+       *ksp = 0L; /* first stack frame back-chain */
+       ksp = ksp - STACK_FRAME_OVERHEAD; /* first stack frame (entry uses) 
+                                          * (less would do) */
+       *ksp = (unsigned long)ksp+STACK_FRAME_OVERHEAD; /* second back-chain */
+       ksp = ksp - 224; /* domain context */
+       adp->esp[_cpuid] = (unsigned long)ksp - STACK_FRAME_OVERHEAD;
+       *((unsigned long *)adp->esp[_cpuid]) = (unsigned long)ksp + 224; 
/*back-chain*/
+       /* NOTE: these depend on _adeos_switch_domain ordering */
+       ksp[18] = (unsigned long)get_paca(); /* r13 needs to hold paca */
+       ksp[19] = (_cpuid == cpuid); /* r3 */
+       ksp[20] = ((unsigned long *)attr->entry)[1]; /* r2 = TOC base */
+       ksp[25] = ((unsigned long *)attr->entry)[0]; /* lr = entry addr. */
+       ksp[26] = flags & ~MSR_EE; /* msr */    
+       }
+}
+
+#else /* !CONFIG_ADEOS_THREADS */
+
+void __adeos_init_domain (adomain_t *adp, adattr_t *attr)
+
+{}
+
+#endif /* CONFIG_ADEOS_THREADS */
+
+void __adeos_cleanup_domain (adomain_t *adp)
+
+{
+    int nr_cpus = num_online_cpus();
+    int _cpuid;
+
+    adeos_unstall_pipeline_from(adp);
+
+    for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)
+       {
+#ifdef CONFIG_SMP
+       while (adp->cpudata[_cpuid].irq_pending_hi != 0)
+           cpu_relax();
+
+       while (test_bit(IPIPE_XPEND_FLAG,&adp->cpudata[_cpuid].status))
+           cpu_relax();
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_ADEOS_THREADS
+       if (adp->estackbase[_cpuid] != 0)
+           kfree((void *)adp->estackbase[_cpuid]);
+#endif /* CONFIG_ADEOS_THREADS */
+       }
+}
+
+int adeos_get_sysinfo (adsysinfo_t *info)
+
+{
+    info->ncpus = num_online_cpus();
+    info->cpufreq = adeos_cpu_freq();
+    info->archdep.tmirq = ADEOS_TIMER_VIRQ;
+    info->archdep.tmfreq = info->cpufreq;
+
+    return 0;
+}
+
+static void __adeos_set_decr (void)
+
+{
+    adeos_declare_cpuid;
+
+    adeos_load_cpuid();
+
+    disarm_decr[cpuid] = (__adeos_decr_ticks != tb_ticks_per_jiffy);
+    __adeos_decr_next[cpuid] = __adeos_read_timebase() + __adeos_decr_ticks;
+    set_dec(__adeos_decr_ticks);
+}
+
+int adeos_tune_timer (unsigned long ns, int flags)
+
+{
+    unsigned long x, ticks;
+
+    if (flags & ADEOS_RESET_TIMER)
+       ticks = tb_ticks_per_jiffy;
+    else
+       {
+       ticks = ns * tb_ticks_per_jiffy / (1000000000 / HZ);
+
+       if (ticks > tb_ticks_per_jiffy)
+           return -EINVAL;
+       }
+
+    x = adeos_critical_enter(&__adeos_set_decr); /* Sync with all CPUs */
+    __adeos_decr_ticks = ticks;
+    __adeos_set_decr();
+    adeos_critical_exit(x);
+
+    return 0;
+}
+
+/* adeos_send_ipi() -- Send a specified service IPI to a set of
+   processors. */
+
+int adeos_send_ipi (unsigned ipi, cpumask_t cpumask)
+
+{
+    printk(KERN_WARNING "Adeos: Call to unimplemented adeos_send_ipi() from 
%s\n",adp_current->name);
+    return 0;
+}
diff -Nru linux-2.6.10/arch/ppc64/Kconfig 
linux-2.6.10-adeos-ppc64-r3/arch/ppc64/Kconfig
--- linux-2.6.10/arch/ppc64/Kconfig     2004-12-24 23:34:58.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/arch/ppc64/Kconfig      2005-11-13 
11:45:31.000000000 +0200
@@ -370,6 +370,8 @@
        depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
        default y
 
+source "adeos/Kconfig"
+
 source "arch/ppc64/oprofile/Kconfig"
 
 source "arch/ppc64/Kconfig.debug"
diff -Nru linux-2.6.10/arch/ppc64/kernel/adeos.c 
linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/adeos.c
--- linux-2.6.10/arch/ppc64/kernel/adeos.c      1970-01-01 02:00:00.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/adeos.c       2005-11-13 
11:45:31.000000000 +0200
@@ -0,0 +1,700 @@
+/*
+ *   linux/arch/ppc64/kernel/adeos.c
+ *
+ *   Adeos 64-bit PowerPC adoption
+ *   Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ *   based on previous work:
+ *     
+ *   Copyright (C) 2004 Philippe Gerum.
+ *
+ *   Adeos/PPC port over 2.6 based on the previous 2.4 implementation by:
+ *
+ *   Copyright (C) 2004 Wolfgang Grandegger.
+ *
+ *   It follows closely the ARM and x86 ports of ADEOS.
+ *
+ *   Copyright (C) 2003 Philippe Gerum.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ *   Architecture-dependent ADEOS core support for PowerPC
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/hw_irq.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/time.h>
+#include <asm/machdep.h> /* ppc_md */
+
+#ifdef CONFIG_SMP
+
+static cpumask_t __adeos_cpu_sync_map;
+
+static cpumask_t __adeos_cpu_lock_map;
+
+static spinlock_t __adeos_cpu_barrier = SPIN_LOCK_UNLOCKED;
+
+static atomic_t __adeos_critical_count = ATOMIC_INIT(0);
+
+static void (*__adeos_cpu_sync)(void);
+
+#endif /* CONFIG_SMP */
+
+void do_IRQ(struct pt_regs *regs);
+
+extern struct list_head __adeos_pipeline;
+
+struct pt_regs __adeos_irq_regs;
+
+/* Current reload value for the decrementer. */
+unsigned long __adeos_decr_ticks;
+
+/* Next tick date (timebase value). */
+unsigned long __adeos_decr_next[ADEOS_NR_CPUS];
+
+static inline unsigned long ffnz (unsigned long ul) {
+
+    __asm__ __volatile__ ("cntlzd %0, %1" : "=r" (ul) : "r" (ul & (-ul)));
+    return 63 - ul;
+}
+
+#ifdef CONFIG_SMP
+
+/* Always called with hw interrupts off. */
+
+static void __adeos_do_critical_sync (unsigned irq)
+
+{
+    adeos_declare_cpuid;
+
+    adeos_load_cpuid();
+
+    cpu_set(cpuid,__adeos_cpu_sync_map);
+
+    /* Now we are in sync with the lock requestor running on another
+       CPU. Enter a spinning wait until he releases the global
+       lock. */
+    adeos_spin_lock(&__adeos_cpu_barrier);
+
+    /* Got it. Now get out. */
+
+    if (__adeos_cpu_sync)
+       /* Call the sync routine if any. */
+       __adeos_cpu_sync();
+
+    adeos_spin_unlock(&__adeos_cpu_barrier);
+
+    cpu_clear(cpuid,__adeos_cpu_sync_map);
+}
+
+#endif /* CONFIG_SMP */
+
+/* adeos_critical_enter() -- Grab the superlock for entering a global
+   critical section. On this uniprocessor-only arch, this is identical
+   to hw cli(). */
+
+unsigned long adeos_critical_enter (void (*syncfn)(void))
+
+{
+    unsigned long flags;
+
+    adeos_hw_local_irq_save(flags);
+
+#ifdef CONFIG_SMP
+    if (num_online_cpus() > 1) /* We might be running a SMP-kernel on a UP 
box... */
+       {
+       adeos_declare_cpuid;
+       cpumask_t lock_map;
+
+       adeos_load_cpuid();
+
+       if (!cpu_test_and_set(cpuid,__adeos_cpu_lock_map))
+           {
+           while (cpu_test_and_set(BITS_PER_LONG - 1,__adeos_cpu_lock_map))
+               {
+               /* Refer to the explanations found in
+                  linux/arch/asm-i386/irq.c about
+                  SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND for more about
+                  this strange loop. */
+               int n = 0;
+               do { cpu_relax(); } while (++n < cpuid);
+               }
+
+           adeos_spin_lock(&__adeos_cpu_barrier);
+
+           __adeos_cpu_sync = syncfn;
+
+           /* Send the sync IPI to all processors but the current one. */
+           __adeos_send_IPI_allbutself(ADEOS_CRITICAL_VECTOR);
+
+           cpus_andnot(lock_map,cpu_online_map,__adeos_cpu_lock_map);
+
+           while (!cpus_equal(__adeos_cpu_sync_map,lock_map))
+               cpu_relax();
+           }
+
+       atomic_inc(&__adeos_critical_count);
+       }
+#endif /* CONFIG_SMP */
+
+    return flags;
+}
+
+/* adeos_critical_exit() -- Release the superlock. */
+
+void adeos_critical_exit (unsigned long flags)
+
+{
+#ifdef CONFIG_SMP
+    if (num_online_cpus() > 1) /* We might be running a SMP-kernel on a UP 
box... */
+       {
+       adeos_declare_cpuid;
+
+       adeos_load_cpuid();
+
+       if (atomic_dec_and_test(&__adeos_critical_count))
+           {
+           adeos_spin_unlock(&__adeos_cpu_barrier);
+
+           while (!cpus_empty(__adeos_cpu_sync_map))
+               cpu_relax();
+
+           cpu_clear(cpuid,__adeos_cpu_lock_map);
+           cpu_clear(BITS_PER_LONG - 1,__adeos_cpu_lock_map);
+           }
+       }
+#endif /* CONFIG_SMP */
+
+    adeos_hw_local_irq_restore(flags);
+}
+
+void __adeos_init_platform (void)
+
+{
+    unsigned timer_virq;
+
+    /* Allocate a virtual IRQ for the decrementer trap early to get it
+       mapped to IPIPE_VIRQ_BASE */
+
+    timer_virq = adeos_alloc_irq();
+
+    if (timer_virq != ADEOS_TIMER_VIRQ)
+       panic("Adeos: cannot reserve timer virq #%d (got #%d)",
+             ADEOS_TIMER_VIRQ,
+             timer_virq);
+
+    __adeos_decr_ticks = tb_ticks_per_jiffy;
+}
+
+void __adeos_init_stage (adomain_t *adp)
+
+{
+    int cpuid, n;
+
+    for (cpuid = 0; cpuid < ADEOS_NR_CPUS; cpuid++)
+       {
+       adp->cpudata[cpuid].irq_pending_hi = 0;
+
+       for (n = 0; n < IPIPE_IRQ_IWORDS; n++)
+           adp->cpudata[cpuid].irq_pending_lo[n] = 0;
+
+       for (n = 0; n < IPIPE_NR_IRQS; n++)
+           adp->cpudata[cpuid].irq_hits[n] = 0;
+       }
+
+    for (n = 0; n < IPIPE_NR_IRQS; n++)
+       {
+       adp->irqs[n].acknowledge = NULL;
+       adp->irqs[n].handler = NULL;
+       adp->irqs[n].control = IPIPE_PASS_MASK; /* Pass but don't handle */
+       }
+
+#ifdef CONFIG_SMP
+    adp->irqs[ADEOS_CRITICAL_IPI].acknowledge = &__adeos_ack_irq;
+    adp->irqs[ADEOS_CRITICAL_IPI].handler = &__adeos_do_critical_sync;
+    /* Immediately handle in the current domain but *never* pass */
+    adp->irqs[ADEOS_CRITICAL_IPI].control = 
IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SYSTEM_MASK;
+#endif /* CONFIG_SMP */
+}
+
+/* __adeos_sync_stage() -- Flush the pending IRQs for the current
+   domain (and processor).  This routine flushes the interrupt log
+   (see "Optimistic interrupt protection" from D. Stodolsky et al. for
+   more on the deferred interrupt scheme). Every interrupt that
+   occurred while the pipeline was stalled gets played.  WARNING:
+   callers on SMP boxen should always check for CPU migration on
+   return of this routine. One can control the kind of interrupts
+   which are going to be sync'ed using the syncmask
+   parameter. IPIPE_IRQMASK_ANY plays them all, IPIPE_IRQMASK_VIRT
+   plays virtual interrupts only. This routine must be called with hw
+   interrupts off. */
+
+void __adeos_sync_stage (unsigned long syncmask)
+
+{
+    unsigned long mask, submask;
+    struct adcpudata *cpudata;
+    int level, rank;
+    adeos_declare_cpuid;
+    adomain_t *adp;
+    unsigned irq;
+
+    adeos_load_cpuid();
+    adp = adp_cpu_current[cpuid];
+    cpudata = &adp->cpudata[cpuid];
+
+    if (__test_and_set_bit(IPIPE_SYNC_FLAG,&cpudata->status))
+               return;
+
+    /* The policy here is to keep the dispatching code interrupt-free
+       by stalling the current stage. If the upper domain handler
+       (which we call) wants to re-enable interrupts while in a safe
+       portion of the code (e.g. SA_INTERRUPT flag unset for Linux's
+       sigaction()), it will have to unstall (then stall again before
+       returning to us!) the stage when it sees fit. */
+
+    while ((mask = (cpudata->irq_pending_hi & syncmask)) != 0)
+       {
+       /* Give a slight priority advantage to high-numbered IRQs
+          like the virtual ones. */
+       level = ffnz(mask);
+       __clear_bit(level,&cpudata->irq_pending_hi);
+
+       while ((submask = cpudata->irq_pending_lo[level]) != 0)
+           {
+           rank = ffnz(submask);
+           irq = (level << IPIPE_IRQ_ISHIFT) + rank;
+
+           if (test_bit(IPIPE_LOCK_FLAG,&adp->irqs[irq].control))
+               {
+               __clear_bit(rank,&cpudata->irq_pending_lo[level]);
+               continue;
+               }
+
+           if (--cpudata->irq_hits[irq] == 0)
+               __clear_bit(rank,&cpudata->irq_pending_lo[level]);
+
+           __set_bit(IPIPE_STALL_FLAG,&cpudata->status);
+
+#ifdef CONFIG_ADEOS_PROFILING
+           __adeos_profile_data[cpuid].irqs[irq].n_synced++;
+           adeos_hw_tsc(__adeos_profile_data[cpuid].irqs[irq].t_synced);
+#endif /* CONFIG_ADEOS_PROFILING */
+
+           if (adp == adp_root)
+               {
+               adeos_hw_sti();
+               ((void (*)(unsigned, struct pt_regs 
*))adp->irqs[irq].handler)(irq,&__adeos_irq_regs);
+               adeos_hw_cli();
+               }
+           else
+               {
+               __clear_bit(IPIPE_SYNC_FLAG,&cpudata->status);
+               adp->irqs[irq].handler(irq);
+               __set_bit(IPIPE_SYNC_FLAG,&cpudata->status);            
+               }
+
+#ifdef CONFIG_SMP
+           {
+           int _cpuid = adeos_processor_id();
+
+           if (_cpuid != cpuid) /* Handle CPU migration. */
+               {
+               /* We expect any domain to clear the SYNC bit each
+                  time it switches in a new task, so that preemptions
+                  and/or CPU migrations (in the SMP case) over the
+                  ISR do not lock out the log syncer for some
+                  indefinite amount of time. In the Linux case,
+                  schedule() handles this (see kernel/sched.c). For
+                  this reason, we don't bother clearing it here for
+                  the source CPU in the migration handling case,
+                  since it must have scheduled another task in by
+                  now. */
+               cpuid = _cpuid;
+               cpudata = &adp->cpudata[cpuid];
+               __set_bit(IPIPE_SYNC_FLAG,&cpudata->status);
+               }
+           }
+#endif /* CONFIG_SMP */
+
+           __clear_bit(IPIPE_STALL_FLAG,&cpudata->status);
+           }
+       }
+
+    __clear_bit(IPIPE_SYNC_FLAG,&cpudata->status);
+}
+
+int __adeos_ack_irq (unsigned irq)
+
+{
+    irq_desc_t *desc = get_irq_desc(irq);
+
+    if (desc->handler->ack != NULL)
+       {
+       unsigned long adflags;
+       adeos_declare_cpuid;
+
+       /* No need to mask IRQs at hw level: we are always called from
+          __adeos_handle_irq(), so interrupts are already off. We
+          stall the pipeline so that spin_lock_irq*() ops won't
+          unintentionally flush it, since this could cause infinite
+          recursion. */
+
+       adeos_load_cpuid();
+       adflags = adeos_test_and_stall_pipeline();
+       preempt_disable();
+       spin_lock(&desc->lock);
+       desc->handler->ack(irq);
+       spin_unlock(&desc->lock);
+       preempt_enable_no_resched();
+       adeos_restore_pipeline_nosync(adp_cpu_current[cpuid],adflags,cpuid);
+       }
+
+    return 1;
+}
+
+static inline void __adeos_walk_pipeline (struct list_head *pos, int cpuid)
+
+{
+    adomain_t *this_domain = adp_cpu_current[cpuid];
+
+    while (pos != &__adeos_pipeline)
+       {
+       adomain_t *next_domain = list_entry(pos,adomain_t,p_link);
+
+       if (test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
+           break; /* Stalled stage -- do not go further. */
+
+       if (next_domain->cpudata[cpuid].irq_pending_hi != 0)
+           {
+           /* Since the critical IPI might be dispatched by the
+              following actions, the current domain might not be
+              linked to the pipeline anymore after its handler
+              returns on SMP boxes, even if the domain remains valid
+              (see adeos_unregister_domain()), so don't make any
+              dangerous assumptions here. */
+
+           if (next_domain == this_domain)
+               __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+           else
+               {
+               __adeos_switch_to(this_domain,next_domain,cpuid);
+
+               adeos_load_cpuid(); /* Processor might have changed. */
+
+               if (this_domain->cpudata[cpuid].irq_pending_hi != 0 &&
+                   
!test_bit(IPIPE_STALL_FLAG,&this_domain->cpudata[cpuid].status))
+                   __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+               }
+
+           break;
+           }
+       else if (next_domain == this_domain)
+           break;
+
+       pos = next_domain->p_link.next;
+       }
+}
+
+/* __adeos_handle_irq() -- ADEOS's generic IRQ handler. An optimistic
+   interrupt protection log is maintained here for each
+   domain. Interrupts are off on entry. */
+
+void __adeos_handle_irq (int irq, struct pt_regs *regs)
+
+{
+    struct list_head *head, *pos;
+    adeos_declare_cpuid;
+    int m_ack, s_ack;
+
+    m_ack = irq & ADEOS_IRQ_ACKED;
+    irq &= ADEOS_IRQ_ACKED_MASK;
+
+    if (irq >= IPIPE_NR_IRQS)
+       {
+       printk(KERN_ERR "Adeos: spurious interrupt %d\n",irq);
+       return;
+       }
+
+    adeos_load_cpuid();
+
+#ifdef CONFIG_ADEOS_PROFILING
+    __adeos_profile_data[cpuid].irqs[irq].n_handled++;
+    adeos_hw_tsc(__adeos_profile_data[cpuid].irqs[irq].t_handled);
+#endif /* CONFIG_ADEOS_PROFILING */
+
+    s_ack = m_ack;
+
+    if (test_bit(IPIPE_STICKY_FLAG,&adp_cpu_current[cpuid]->irqs[irq].control))
+       head = &adp_cpu_current[cpuid]->p_link;
+    else
+       head = __adeos_pipeline.next;
+
+    /* Ack the interrupt. */
+
+    pos = head;
+
+    while (pos != &__adeos_pipeline)
+       {
+       adomain_t *_adp = list_entry(pos,adomain_t,p_link);
+
+       /* For each domain handling the incoming IRQ, mark it as
+           pending in its log. */
+
+       if (test_bit(IPIPE_HANDLE_FLAG,&_adp->irqs[irq].control))
+           {
+           /* Domains that handle this IRQ are polled for
+              acknowledging it by decreasing priority order. The
+              interrupt must be made pending _first_ in the domain's
+              status flags before the PIC is unlocked. */
+
+           _adp->cpudata[cpuid].irq_hits[irq]++;
+           __adeos_set_irq_bit(_adp,cpuid,irq);
+
+           /* Always get the first master acknowledge available. Once
+              we've got it, allow slave acknowledge handlers to run
+              (until one of them stops us). */
+
+           if (_adp->irqs[irq].acknowledge != NULL)
+               {
+               if (!m_ack)
+                   m_ack = _adp->irqs[irq].acknowledge(irq);
+               else if (test_bit(IPIPE_SHARED_FLAG,&_adp->irqs[irq].control) 
&& !s_ack)
+                   s_ack = _adp->irqs[irq].acknowledge(irq);
+               }
+           }
+
+       /* If the domain does not want the IRQ to be passed down the
+          interrupt pipe, exit the loop now. */
+
+       if (!test_bit(IPIPE_PASS_FLAG,&_adp->irqs[irq].control))
+           break;
+
+       pos = _adp->p_link.next;
+       }
+
+    /* Now walk the pipeline, yielding control to the highest priority
+       domain that has pending interrupt(s) or immediately to the
+       current domain if the interrupt has been marked as
+       'sticky'. This search does not go beyond the current domain in
+       the pipeline. To understand this code properly, one must keep
+       in mind that domains having a higher priority than the current
+       one are sleeping on the adeos_suspend_domain() service. In
+       addition, domains having a lower priority have been preempted
+       by an interrupt dispatched to a higher priority domain. Once
+       the first and highest priority stage has been selected here,
+       the subsequent stages will be activated in turn when each
+       visited domain calls adeos_suspend_domain() to wake up its
+       neighbour down the pipeline. */
+
+    __adeos_walk_pipeline(head,cpuid);
+}
+
+/* ADEOS's version of the interrupt trap handler. */
+
+int __adeos_grab_irq (struct pt_regs *regs)
+
+{
+    extern int ppc_spurious_interrupts;
+    adeos_declare_cpuid;
+    int irq;
+
+    if (!adp_pipelined)
+       {
+       do_IRQ(regs);
+       return 1;
+       }
+
+    irq = ppc_md.get_irq(regs);
+    if (irq >= 0)
+       {
+       __adeos_handle_irq(irq,regs);
+       }
+    else 
+       ppc_spurious_interrupts++;
+
+    adeos_load_cpuid();
+
+    return (adp_cpu_current[cpuid] == adp_root &&
+           !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status));
+}
+
+/* ADEOS's version of irq.c:do_IRQ(). */
+
+void __adeos_do_IRQ (int irq, struct pt_regs *regs) {
+       irq_enter();
+       ppc_irq_dispatch_handler(regs, irq);
+       irq_exit();
+}
+
+/* ADEOS's version of the decrementer trap handler. */
+
+int __adeos_grab_timer (struct pt_regs *regs)
+
+{
+    adeos_declare_cpuid;
+
+    if (!adp_pipelined)
+       {
+       timer_interrupt(regs);
+       return 1;
+       }
+
+    /* On 970 CPUs DEC cannot be disabled and without setting DEC
+     * here, DEC interrupt would be triggered as soon as interrupts are
+     * enabled in __adeos_sync_stage 
+     */
+    set_dec(0x7fffffff);
+    
+    __adeos_irq_regs.msr = regs->msr; /* for do_timer() */
+
+    __adeos_handle_irq(ADEOS_TIMER_VIRQ,regs);
+
+    adeos_load_cpuid();
+
+    if (__adeos_decr_ticks != tb_ticks_per_jiffy)
+       {
+       unsigned long next_date, now;
+
+       next_date = __adeos_decr_next[cpuid];
+
+       while ((now = __adeos_read_timebase()) >= next_date)
+           next_date += __adeos_decr_ticks;
+
+       set_dec(next_date - now);
+
+       __adeos_decr_next[cpuid] = next_date;
+       }
+
+    return (adp_cpu_current[cpuid] == adp_root &&
+           !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status));
+}
+
+void __adeos_do_timer (int irq, struct pt_regs *regs)
+
+{
+    timer_interrupt(regs);
+}
+
+asmlinkage int __adeos_check_root (struct pt_regs *regs)
+
+{
+    adeos_declare_cpuid;
+    /* This routine is called with hw interrupts off, so no migration
+       can occur while checking the identity of the current domain. */
+    adeos_load_cpuid();
+    return (adp_cpu_current[cpuid] == adp_root &&
+           !test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status));
+}
+
+/* adeos_trigger_irq() -- Push the interrupt to the pipeline entry
+   just like if it has been actually received from a hw source. This
+   both works for real and virtual interrupts. This also means that
+   the current domain might be immediately preempted by a higher
+   priority domain who happens to handle this interrupt. */
+
+int adeos_trigger_irq (unsigned irq)
+
+{
+    struct pt_regs regs;
+    unsigned long flags;
+
+    if (irq >= IPIPE_NR_IRQS ||
+       (adeos_virtual_irq_p(irq) && !test_bit(irq - 
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map)))
+       return -EINVAL;
+
+    adeos_hw_local_irq_save(flags);
+
+    regs.msr = flags;
+
+    __adeos_handle_irq(irq | ADEOS_IRQ_ACKED, &regs);
+
+    adeos_hw_local_irq_restore(flags);
+
+    return 1;
+}
+
+int __adeos_enter_syscall (struct pt_regs *regs)
+
+{
+    adeos_declare_cpuid;
+    unsigned long flags;
+
+    /* This routine either returns:
+       0 -- if the syscall is to be passed to Linux;
+       1 -- if the syscall should not be passed to Linux, and no
+       tail work should be performed;
+       -1 -- if the syscall should not be passed to Linux but the
+       tail work has to be performed. */
+
+    if (__adeos_event_monitors[ADEOS_SYSCALL_PROLOGUE] > 0 &&
+       __adeos_handle_event(ADEOS_SYSCALL_PROLOGUE,regs) > 0)
+       {
+       if (adp_current == adp_root && !in_atomic())
+           {
+           /* Sync pending VIRQs before _TIF_NEED_RESCHED is
+            * tested. */
+
+           adeos_lock_cpu(flags);
+
+           if ((adp_root->cpudata[cpuid].irq_pending_hi & IPIPE_IRQMASK_VIRT) 
!= 0)
+               __adeos_sync_stage(IPIPE_IRQMASK_VIRT);
+
+           adeos_unlock_cpu(flags);
+
+           return -1;
+           }
+
+       return 1;
+       }
+
+    return 0;
+}
+
+int __adeos_exit_syscall (void) 
+
+{
+    if (__adeos_event_monitors[ADEOS_SYSCALL_EPILOGUE] > 0)
+       return __adeos_handle_event(ADEOS_SYSCALL_EPILOGUE,NULL);
+
+    return 0;
+}
+
+EXPORT_SYMBOL(__adeos_init_stage);
+EXPORT_SYMBOL(__adeos_sync_stage);
+EXPORT_SYMBOL(__adeos_irq_regs);
+#ifdef CONFIG_ADEOS_THREADS
+EXPORT_SYMBOL(__adeos_switch_domain);
+#endif /* CONFIG_ADEOS_THREADS */
+EXPORT_SYMBOL(__adeos_do_IRQ);
+EXPORT_SYMBOL(__adeos_do_timer);
+EXPORT_SYMBOL(__adeos_decr_ticks);
+EXPORT_SYMBOL(__adeos_decr_next);
+EXPORT_SYMBOL(__adeos_current_threadinfo);
+EXPORT_SYMBOL(adeos_critical_enter);
+EXPORT_SYMBOL(adeos_critical_exit);
+EXPORT_SYMBOL(adeos_trigger_irq);
diff -Nru linux-2.6.10/arch/ppc64/kernel/entry.S 
linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/entry.S
--- linux-2.6.10/arch/ppc64/kernel/entry.S      2004-12-24 23:33:49.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/entry.S       2005-11-13 
11:45:31.000000000 +0200
@@ -108,6 +108,23 @@
        ori     r11,r11,MSR_EE
        mtmsrd  r11,1
 
+#ifdef CONFIG_ADEOS_CORE
+       addi    r3,r1,GPR0
+       bl      .__adeos_enter_syscall
+       cmpdi   r3,0
+       ld      r0,GPR0(r1)
+       ld      r3,GPR3(r1)
+       ld      r4,GPR4(r1)
+       ld      r5,GPR5(r1)
+       ld      r6,GPR6(r1)
+       ld      r7,GPR7(r1)
+       ld      r8,GPR8(r1)
+       ld      r9,GPR9(r1)
+       bgt     adeos_end_syscall
+       blt     syscall_exit
+       addi    r9,r1,STACK_FRAME_OVERHEAD
+#endif /* CONFIG_ADEOS_CORE */
+
 #ifdef SHOW_SYSCALLS
        bl      .do_show_syscall
        REST_GPR(0,r1)
@@ -145,7 +162,13 @@
        ldx     r10,r11,r0      /* Fetch system call handler [ptr] */
        mtctr   r10
        bctrl                   /* Call handler */
-
+#ifdef CONFIG_ADEOS_CORE
+       std     r3,RESULT(r1)
+       bl      .__adeos_exit_syscall
+       cmpdi   r3,0
+       ld      r3,RESULT(r1)
+       bne-    syscall_exit_adeos
+#endif /* CONFIG_ADEOS_CORE */
 syscall_exit:
 #ifdef SHOW_SYSCALLS
        std     r3,GPR3(r1)
@@ -195,6 +218,39 @@
        mtspr   SRR1,r8
        rfid
        b       .       /* prevent speculative execution */
+#ifdef CONFIG_ADEOS_CORE
+syscall_exit_adeos:
+       ld      r5,_CCR(r1)
+       ld      r8,_MSR(r1)
+       ld      r7,_NIP(r1)
+       stdcx.  r0,0,r1                 /* to clear pending reservations */
+       andi.   r6,r8,MSR_PR
+       ld      r4,_LINK(r1)
+       beq-    1f                      /* only restore r13 if */
+       ld      r13,GPR13(r1)           /* returning to usermode */
+1:     ld      r2,GPR2(r1)
+       ld      r1,GPR1(r1)
+       li      r12,MSR_RI
+       mfmsr   r10                     /* should this be done here? */
+       andc    r10,r10,r12
+       mtmsrd  r10,1                   /* clear MSR.RI */
+       mtlr    r4
+       mtcr    r5
+       mtspr   SRR0,r7
+       mtspr   SRR1,r8
+       rfid
+       b       .       /* prevent speculative execution */
+#endif /* CONFIG_ADEOS_CORE */
+
+#ifdef CONFIG_ADEOS_CORE
+       .globl  adeos_end_syscall
+adeos_end_syscall:
+       mfmsr   r10
+       rldicl  r10,r10,48,1
+       rotldi  r10,r10,16
+       mtmsrd  r10,1
+       b       syscall_exit_adeos
+#endif /* CONFIG_ADEOS_CORE */
 
 syscall_enosys:
        li      r3,-ENOSYS
@@ -400,6 +456,14 @@
        beq     2f              /* if yes, don't slbie it */
        oris    r0,r6,0x0800    /* set C (class) bit */
 
+#ifdef CONFIG_ADEOS_CORE
+       /* disable interrupts so that SLB and impl. specific 
+        * address translation optimizations stay sane */
+       mfmsr   r10
+       rldicl  r9,r10,48,1     /* clear MSR_EE */
+       rotldi  r9,r9,16
+       mtmsrd  r9,1
+#endif /* CONFIG_ADEOS_CORE */
        /* Bolt in the new stack SLB entry */
        ld      r7,KSP_VSID(r4) /* Get new stack's VSID */
        oris    r6,r6,(SLB_ESID_V)@h
@@ -408,7 +472,9 @@
        slbie   r0              /* Workaround POWER5 < DD2.1 issue */
        slbmte  r7,r6
        isync
-
+#ifdef CONFIG_ADEOS_CORE
+       mtmsrd  r10,1           /* remember old interrupt state */
+#endif /* CONFIG_ADEOS_CORE */
 2:
 END_FTR_SECTION_IFSET(CPU_FTR_SLB)
        clrrdi  r7,r8,THREAD_SHIFT      /* base of new stack */
@@ -468,6 +534,13 @@
        rotldi  r9,r9,16
        mtmsrd  r9,1            /* Update machine state */
 
+#ifdef CONFIG_ADEOS_CORE
+       bl      .__adeos_check_root
+       cmpdi   r3,0
+       mfmsr   r10             /* this is used later, might be messed */
+       beq-    restore
+#endif /* CONFIG_ADEOS_CORE */
+
 #ifdef CONFIG_PREEMPT
        clrrdi  r9,r1,THREAD_SHIFT      /* current_thread_info() */
        li      r0,_TIF_NEED_RESCHED    /* bits to check */
@@ -844,3 +917,124 @@
         blr
        
 #endif /* CONFIG_PPC_MULTIPLATFORM */
+
+#ifdef CONFIG_ADEOS_CORE
+
+_GLOBAL(__adeos_ret_from_except_lite)
+       cmpdi   r3,0
+       bne+    .ret_from_except_lite
+       b       restore
+
+#ifdef CONFIG_ADEOS_THREADS
+
+/*
+ * r3 = adp_next, r4 = adp_cpu_current[adeos_processor_id()].
+ * NOTE: This code is _not_ SMP-compliant. Always called with hw
+ * interrupts off.
+ * TODO: implement (configure time) support for different ABIs?
+ */   
+_GLOBAL(__adeos_switch_domain)
+
+       /* 27*8 = 216 for registers
+        * +8 padding for quad-word alignment as required by spec
+        * = 224 */
+       /* alloc stack frame (store and update r1) */
+       stdu    r1,-224-STACK_FRAME_OVERHEAD(r1)
+
+       /* Save general purpose registers. (22) */
+       std     r31,STACK_FRAME_OVERHEAD+0*8(r1)
+       std     r30,STACK_FRAME_OVERHEAD+1*8(r1)
+       std     r29,STACK_FRAME_OVERHEAD+2*8(r1)
+       std     r28,STACK_FRAME_OVERHEAD+3*8(r1)
+       std     r27,STACK_FRAME_OVERHEAD+4*8(r1)
+       std     r26,STACK_FRAME_OVERHEAD+5*8(r1)
+       std     r25,STACK_FRAME_OVERHEAD+6*8(r1)
+       std     r24,STACK_FRAME_OVERHEAD+7*8(r1)
+       std     r23,STACK_FRAME_OVERHEAD+8*8(r1)
+       std     r22,STACK_FRAME_OVERHEAD+9*8(r1)
+       std     r21,STACK_FRAME_OVERHEAD+10*8(r1)
+       std     r20,STACK_FRAME_OVERHEAD+11*8(r1)
+       std     r19,STACK_FRAME_OVERHEAD+12*8(r1)
+       std     r18,STACK_FRAME_OVERHEAD+13*8(r1)
+       std     r17,STACK_FRAME_OVERHEAD+14*8(r1)
+       std     r16,STACK_FRAME_OVERHEAD+15*8(r1)
+       std     r15,STACK_FRAME_OVERHEAD+16*8(r1)
+       std     r14,STACK_FRAME_OVERHEAD+17*8(r1)
+       std     r13,STACK_FRAME_OVERHEAD+18*8(r1)
+       std      r3,STACK_FRAME_OVERHEAD+19*8(r1)
+       std      r2,STACK_FRAME_OVERHEAD+20*8(r1)
+       std      r0,STACK_FRAME_OVERHEAD+21*8(r1)
+
+       /* Save special registers. (5) */
+       mfctr    r2
+       std      r2,STACK_FRAME_OVERHEAD+22*8(r1)
+       mfcr     r2
+       std      r2,STACK_FRAME_OVERHEAD+23*8(r1)
+       mfxer    r2
+       std      r2,STACK_FRAME_OVERHEAD+24*8(r1)
+       mflr     r2
+       std      r2,STACK_FRAME_OVERHEAD+25*8(r1)
+       mfmsr    r2
+       std      r2,STACK_FRAME_OVERHEAD+26*8(r1)
+
+       /* Actual switch block. */
+       ld       r2,0(r4)       /* r2 = old_adp = adp_cpu_current[cpuid] */
+       std      r1,0(r2)       /* old_adp->esp[0] = sp */
+       std      r3,0(r4)       /* adp_cpu_current[cpuid] = new_adp */
+       /* CONFIG_SMP should sync here; but first, accesses to esp[]
+       would require cpuid-indexing. */
+       ld       r1,0(r3)       /* sp = new_adp->esp[0] */
+
+       /* Restore special registers. */
+       ld       r2,STACK_FRAME_OVERHEAD+26*8(r1)
+       mtmsrd   r2
+       ld       r2,STACK_FRAME_OVERHEAD+25*8(r1)
+       mtlr     r2
+       ld       r2,STACK_FRAME_OVERHEAD+24*8(r1)
+       mtxer    r2
+       ld       r2,STACK_FRAME_OVERHEAD+23*8(r1)
+       mtcr     r2
+       ld       r2,STACK_FRAME_OVERHEAD+22*8(r1)
+       mtctr    r2
+
+       /* Restore general purpose registers. */
+       ld       r0,STACK_FRAME_OVERHEAD+21*8(r1)
+       ld       r2,STACK_FRAME_OVERHEAD+20*8(r1)
+       ld       r3,STACK_FRAME_OVERHEAD+19*8(r1)
+       ld      r13,STACK_FRAME_OVERHEAD+18*8(r1)
+       ld      r14,STACK_FRAME_OVERHEAD+17*8(r1)
+       ld      r15,STACK_FRAME_OVERHEAD+16*8(r1)
+       ld      r16,STACK_FRAME_OVERHEAD+15*8(r1)
+       ld      r17,STACK_FRAME_OVERHEAD+14*8(r1)
+       ld      r18,STACK_FRAME_OVERHEAD+13*8(r1)
+       ld      r19,STACK_FRAME_OVERHEAD+12*8(r1)
+       ld      r20,STACK_FRAME_OVERHEAD+11*8(r1)
+       ld      r21,STACK_FRAME_OVERHEAD+10*8(r1)
+       ld      r22,STACK_FRAME_OVERHEAD+9*8(r1)
+       ld      r23,STACK_FRAME_OVERHEAD+8*8(r1)
+       ld      r24,STACK_FRAME_OVERHEAD+7*8(r1)
+       ld      r25,STACK_FRAME_OVERHEAD+6*8(r1)
+       ld      r26,STACK_FRAME_OVERHEAD+5*8(r1)
+       ld      r27,STACK_FRAME_OVERHEAD+4*8(r1)
+       ld      r28,STACK_FRAME_OVERHEAD+3*8(r1)
+       ld      r29,STACK_FRAME_OVERHEAD+2*8(r1)
+       ld      r30,STACK_FRAME_OVERHEAD+1*8(r1)
+       ld      r31,STACK_FRAME_OVERHEAD+0*8(r1)
+
+       addi    r1,r1,224+STACK_FRAME_OVERHEAD
+
+       blr
+
+#endif /* CONFIG_ADEOS_THREADS */
+
+/* Returns the current threadinfo pointer in a way which is
+   insensitive to the underlying stack, by directly reading the
+   special purpose register #3. */
+/* could probably just use r13 and forget loading paca */      
+_GLOBAL(__adeos_current_threadinfo)
+       mfspr   r3,SPRG3                /* get PACA */
+       ld      r3,PACACURRENT(r3)
+       blr
+       
+#endif /* CONFIG_ADEOS_CORE */
+
diff -Nru linux-2.6.10/arch/ppc64/kernel/head.S 
linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/head.S
--- linux-2.6.10/arch/ppc64/kernel/head.S       2004-12-24 23:34:48.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/head.S        2005-11-13 
11:45:31.000000000 +0200
@@ -381,6 +381,18 @@
        bl      hdlr;                                   \
        b       .ret_from_except_lite
 
+#ifdef CONFIG_ADEOS_CORE
+#define ADEOS_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
+       .align  7;                                      \
+       .globl label##_common;                          \
+label##_common:                                                \
+       EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN);      \
+       DISABLE_INTS;                                   \
+       addi    r3,r1,STACK_FRAME_OVERHEAD;             \
+       bl      hdlr;                                   \
+       b       .__adeos_ret_from_except_lite
+#endif /* CONFIG_ADEOS_CORE */
+
 /*
  * Start of pSeries system interrupt routines
  */
@@ -761,7 +773,12 @@
        bl      .MachineCheckException
        b       .ret_from_except
 
+
+#ifdef CONFIG_ADEOS_CORE
+       ADEOS_EXCEPTION_COMMON_LITE(0x900, Decrementer, .__adeos_grab_timer) 
+#else /* !CONFIG_ADEOS_CORE */
        STD_EXCEPTION_COMMON_LITE(0x900, Decrementer, .timer_interrupt)
+#endif /* CONFIG_ADEOS_CORE */
        STD_EXCEPTION_COMMON(0xa00, Trap_0a, .UnknownException)
        STD_EXCEPTION_COMMON(0xb00, Trap_0b, .UnknownException)
        STD_EXCEPTION_COMMON(0xd00, SingleStep, .SingleStepException)
@@ -890,8 +907,13 @@
 HardwareInterrupt_entry:
        DISABLE_INTS
        addi    r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_ADEOS_CORE
+       bl      .__adeos_grab_irq
+       b       .__adeos_ret_from_except_lite
+#else /* !CONFIG_ADEOS_CORE */
        bl      .do_IRQ
        b       .ret_from_except_lite
+#endif /* CONFIG_ADEOS_CORE */
 
        .align  7
        .globl Alignment_common
diff -Nru linux-2.6.10/arch/ppc64/kernel/idle.c 
linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/idle.c
--- linux-2.6.10/arch/ppc64/kernel/idle.c       2004-12-24 23:35:24.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/idle.c        2005-11-13 
11:45:31.000000000 +0200
@@ -131,6 +131,9 @@
 
                        while (!need_resched() && !cpu_is_offline(cpu)) {
                                barrier();
+#ifdef CONFIG_ADEOS_CORE
+                               adeos_suspend_domain();
+#endif /* CONFIG_ADEOS_CORE */
                                /*
                                 * Go into low thread priority and possibly
                                 * low power mode.
@@ -288,8 +291,15 @@
 {
        while(1) {
                /* check CPU type here */
-               if (!need_resched())
+               if (!need_resched()) 
+#ifdef CONFIG_ADEOS_CORE
+               {
+                       adeos_suspend_domain();
                        power4_idle();
+               }
+#else /* !CONFIG_ADEOS_CORE */
+                       power4_idle();
+#endif /* CONFIG_ADEOS_CORE */         
                if (need_resched())
                        schedule();
        }
diff -Nru linux-2.6.10/arch/ppc64/kernel/irq.c 
linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/irq.c
--- linux-2.6.10/arch/ppc64/kernel/irq.c        2004-12-24 23:34:32.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/irq.c 2005-11-13 
11:45:31.000000000 +0200
@@ -134,14 +134,25 @@
 
        if (desc->status & IRQ_PER_CPU) {
                /* no locking required for CPU-local interrupts: */
+#ifdef CONFIG_ADEOS_CORE
+               if (!adp_pipelined)
+                       ack_irq(irq);
+#else
                ack_irq(irq);
+#endif /* CONFIG_ADEOS_CORE */
                action_ret = handle_IRQ_event(irq, regs, desc->action);
                desc->handler->end(irq);
                return;
        }
 
        spin_lock(&desc->lock);
+#ifdef CONFIG_ADEOS_CORE
+       if (!adp_pipelined)
+               ack_irq(irq);
+#else
        ack_irq(irq);   
+#endif /* CONFIG_ADEOS_CORE */
+       
        /*
           REPLAY is when Linux resends an IRQ that was dropped earlier
           WAITING is used by probe to mark irqs that are being tested
diff -Nru linux-2.6.10/arch/ppc64/kernel/Makefile 
linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/Makefile
--- linux-2.6.10/arch/ppc64/kernel/Makefile     2004-12-24 23:35:39.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/Makefile      2005-11-13 
11:45:31.000000000 +0200
@@ -62,4 +62,6 @@
 
 obj-$(CONFIG_ALTIVEC)          += vecemu.o vector.o
 
+obj-$(CONFIG_ADEOS_CORE)       += adeos.o
+
 CFLAGS_ioctl32.o += -Ifs/
diff -Nru linux-2.6.10/arch/ppc64/kernel/ppc_ksyms.c 
linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/ppc_ksyms.c
--- linux-2.6.10/arch/ppc64/kernel/ppc_ksyms.c  2004-12-24 23:34:26.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/ppc_ksyms.c   2005-11-13 
11:46:07.000000000 +0200
@@ -163,3 +163,30 @@
 EXPORT_SYMBOL(paca);
 EXPORT_SYMBOL(cur_cpu_spec);
 EXPORT_SYMBOL(systemcfg);
+
+#ifdef CONFIG_ADEOS_CORE
+/* The following are per-platform convenience exports which are needed
+   by some Adeos domains loaded as kernel modules. */
+extern unsigned long disarm_decr[NR_CPUS];
+EXPORT_SYMBOL(disarm_decr);
+EXPORT_SYMBOL(tb_ticks_per_jiffy);
+EXPORT_SYMBOL(__switch_to);
+void show_stack(struct task_struct *task,
+               unsigned long *esp);
+EXPORT_SYMBOL(show_stack);
+EXPORT_SYMBOL(udbg_printf);
+
+/* these two are needed by the task switching code in fusion */
+extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
+extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
+EXPORT_SYMBOL(switch_stab);
+EXPORT_SYMBOL(switch_slb);
+
+/* flush_tlb_pending() */
+EXPORT_PER_CPU_SYMBOL(ppc64_tlb_batch);
+EXPORT_SYMBOL(__flush_tlb_pending);
+
+EXPORT_SYMBOL(_switch);
+extern struct task_struct *last_task_used_math;
+EXPORT_SYMBOL(last_task_used_math);
+#endif /* CONFIG_ADEOS_CORE */
diff -Nru linux-2.6.10/arch/ppc64/kernel/time.c 
linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/time.c
--- linux-2.6.10/arch/ppc64/kernel/time.c       2004-12-24 23:35:28.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/time.c        2005-11-13 
11:45:31.000000000 +0200
@@ -73,6 +73,9 @@
 
 EXPORT_SYMBOL(jiffies_64);
 
+#ifdef CONFIG_ADEOS_CORE
+unsigned long disarm_decr[NR_CPUS];
+#endif /* CONFIG_ADEOS_CORE */
 /* keep track of when we need to update the rtc */
 time_t last_rtc_update;
 extern int piranha_simulator;
@@ -293,6 +296,9 @@
        next_dec = lpaca->next_jiffy_update_tb - cur_tb;
        if (next_dec > lpaca->default_decr)
                next_dec = lpaca->default_decr;
+#ifdef CONFIG_ADEOS_CORE
+       if (!disarm_decr[smp_processor_id()])
+#endif /* CONFIG_ADEOS_CORE */ 
        set_dec(next_dec);
 
 #ifdef CONFIG_PPC_ISERIES
diff -Nru linux-2.6.10/arch/ppc64/kernel/traps.c 
linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/traps.c
--- linux-2.6.10/arch/ppc64/kernel/traps.c      2004-12-24 23:34:47.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/arch/ppc64/kernel/traps.c       2005-11-13 
11:45:31.000000000 +0200
@@ -75,6 +75,11 @@
        if (debugger(regs))
                return 1;
 
+#ifdef CONFIG_ADEOS_CORE
+       /* lets us see Oopses from other domains, too */
+       if (adp_current != adp_root)
+               adeos_set_printk_sync(adp_current);
+#endif /* CONFIG_ADEOS_CORE */
        console_verbose();
        spin_lock_irq(&die_lock);
        bust_spinlocks(1);
@@ -185,9 +190,20 @@
 }
 #endif
 
+#ifdef CONFIG_ADEOS_CORE
+static inline int __adeos_pipeline_trap(int trap, struct pt_regs *regs)
+{
+    return __adeos_event_monitors[trap] > 0 ? __adeos_handle_event(trap,regs) 
: 0;
+}
+#endif /* CONFIG_ADEOS_CORE */
+
 void
 SystemResetException(struct pt_regs *regs)
 {
+#ifdef CONFIG_ADEOS_CORE
+       if (__adeos_pipeline_trap(ADEOS_SYSRESET_TRAP,regs))
+               return;
+#endif /* CONFIG_ADEOS_CORE */
 #ifdef CONFIG_PPC_PSERIES
        if (fwnmi_active) {
                struct rtas_error_log *errhdr = FWNMI_get_errinfo(regs);
@@ -265,7 +281,11 @@
                        return;
        }
 #endif
-
+#ifdef CONFIG_ADEOS_CORE
+       if (__adeos_pipeline_trap(ADEOS_MCE_TRAP,regs))
+           return;
+#endif /* CONFIG_ADEOS_CORE */
+       
        if (debugger_fault_handler(regs))
                return;
        die("Machine check", regs, 0);
@@ -278,6 +298,11 @@
 void
 UnknownException(struct pt_regs *regs)
 {
+#ifdef CONFIG_ADEOS_CORE
+       if (__adeos_pipeline_trap(ADEOS_UNKNOWN_TRAP,regs))
+          return;
+#endif /* CONFIG_ADEOS_CORE */
+
        printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
               regs->nip, regs->msr, regs->trap);
 
@@ -287,6 +312,10 @@
 void
 InstructionBreakpointException(struct pt_regs *regs)
 {
+#ifdef CONFIG_ADEOS_CORE
+       if (__adeos_pipeline_trap(ADEOS_IABR_TRAP,regs))
+           return;
+#endif /* CONFIG_ADEOS_CORE */
        if (debugger_iabr_match(regs))
                return;
        _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
@@ -296,7 +325,10 @@
 SingleStepException(struct pt_regs *regs)
 {
        regs->msr &= ~MSR_SE;  /* Turn off 'trace' bit */
-
+#ifdef CONFIG_ADEOS_CORE
+       if (__adeos_pipeline_trap(ADEOS_SSTEP_TRAP,regs))
+           return;
+#endif /* CONFIG_ADEOS_CORE */
        if (debugger_sstep(regs))
                return;
 
@@ -459,6 +491,11 @@
 void
 ProgramCheckException(struct pt_regs *regs)
 {
+#ifdef CONFIG_ADEOS_CORE
+       if (__adeos_pipeline_trap(ADEOS_PCE_TRAP,regs))
+           return;
+#endif /* CONFIG_ADEOS_CORE */
+
        if (regs->msr & 0x100000) {
                /* IEEE FP exception */
                parse_fpe(regs);
@@ -500,6 +537,10 @@
 
 void KernelFPUnavailableException(struct pt_regs *regs)
 {
+#ifdef CONFIG_ADEOS_CORE
+       if (__adeos_pipeline_trap(ADEOS_KFPUNAVAIL_TRAP,regs))
+               return;
+#endif /* CONFIG_ADEOS_CORE */
        printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
                          "%lx at %lx\n", regs->trap, regs->nip);
        die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
@@ -507,6 +548,11 @@
 
 void AltivecUnavailableException(struct pt_regs *regs)
 {
+#ifdef CONFIG_ADEOS_CORE
+       if (__adeos_pipeline_trap(ADEOS_ALTUNAVAIL_TRAP,regs))
+           return;
+#endif /* CONFIG_ADEOS_CORE */
+
 #ifndef CONFIG_ALTIVEC
        if (user_mode(regs)) {
                /* A user program has executed an altivec instruction,
@@ -539,6 +585,10 @@
 void
 PerformanceMonitorException(struct pt_regs *regs)
 {
+#ifdef CONFIG_ADEOS_CORE
+       if (__adeos_pipeline_trap(ADEOS_PERFMON_TRAP,regs))
+               return;
+#endif /* CONFIG_ADEOS_CORE */
        perf_irq(regs);
 }
 
@@ -554,7 +604,12 @@
                emulate_single_step(regs);
                return;
        }
-
+#ifdef CONFIG_ADEOS_CORE
+       /* Assume that fixing alignment can always be done regardless
+          of the current domain. */
+       if (__adeos_pipeline_trap(ADEOS_ALIGNMENT_TRAP,regs))
+           return;
+#endif /* CONFIG_ADEOS_CORE */
        /* Operand address was bad */   
        if (fixed == -EFAULT) {
                if (user_mode(regs)) {
@@ -577,6 +632,11 @@
        int err;
        siginfo_t info;
 
+#ifdef CONFIG_ADEOS_CORE
+       if (__adeos_pipeline_trap(ADEOS_ALTASSIST_TRAP,regs))
+           return;
+#endif /* CONFIG_ADEOS_CORE */
+
        if (!user_mode(regs)) {
                printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
                       " at %lx\n", regs->nip);
@@ -618,6 +678,10 @@
  */
 void unrecoverable_exception(struct pt_regs *regs)
 {
+#ifdef CONFIG_ADEOS_CORE
+       if (__adeos_pipeline_trap(ADEOS_NREC_TRAP,regs))
+           return;
+#endif /* CONFIG_ADEOS_CORE */
        printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
               regs->trap, regs->nip);
        die("Unrecoverable exception", regs, SIGABRT);
diff -Nru linux-2.6.10/arch/ppc64/mm/fault.c 
linux-2.6.10-adeos-ppc64-r3/arch/ppc64/mm/fault.c
--- linux-2.6.10/arch/ppc64/mm/fault.c  2004-12-24 23:35:23.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/arch/ppc64/mm/fault.c   2005-11-13 
11:45:31.000000000 +0200
@@ -95,6 +95,12 @@
 
        BUG_ON((trap == 0x380) || (trap == 0x480));
 
+#ifdef CONFIG_ADEOS_CORE
+       if (__adeos_event_monitors[ADEOS_ACCESS_TRAP] > 0 &&
+           __adeos_handle_event(ADEOS_ACCESS_TRAP,regs) != 0)
+           return 0;
+#endif /* CONFIG_ADEOS_CORE */
+
        if (trap == 0x300) {
                if (debugger_fault_handler(regs))
                        return 0;
diff -Nru linux-2.6.10/arch/ppc64/mm/hash_native.c 
linux-2.6.10-adeos-ppc64-r3/arch/ppc64/mm/hash_native.c
--- linux-2.6.10/arch/ppc64/mm/hash_native.c    2004-12-24 23:34:30.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/arch/ppc64/mm/hash_native.c     2005-11-13 
11:45:31.000000000 +0200
@@ -278,7 +278,7 @@
        if (large)
                avpn &= ~0x1UL;
 
-       local_irq_save(flags);
+       adeos_hw_local_irq_save(flags);
        native_lock_hpte(hptep);
 
        dw0 = hptep->dw0.dw0;
@@ -301,7 +301,7 @@
                if (lock_tlbie)
                        spin_unlock(&native_tlbie_lock);
        }
-       local_irq_restore(flags);
+       adeos_hw_local_irq_restore(flags);
 }
 
 static void native_flush_hash_range(unsigned long context,
@@ -316,7 +316,7 @@
        /* XXX fix for large ptes */
        unsigned long large = 0;
 
-       local_irq_save(flags);
+       adeos_hw_local_irq_save(flags);
 
        j = 0;
        for (i = 0; i < number; i++) {
@@ -384,7 +384,7 @@
                        spin_unlock(&native_tlbie_lock);
        }
 
-       local_irq_restore(flags);
+       adeos_hw_local_irq_restore(flags);
 }
 
 #ifdef CONFIG_PPC_PSERIES
diff -Nru linux-2.6.10/arch/ppc64/mm/slb.c 
linux-2.6.10-adeos-ppc64-r3/arch/ppc64/mm/slb.c
--- linux-2.6.10/arch/ppc64/mm/slb.c    2004-12-24 23:34:57.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/arch/ppc64/mm/slb.c     2005-11-13 
11:45:31.000000000 +0200
@@ -83,6 +83,9 @@
        unsigned long pc = KSTK_EIP(tsk);
        unsigned long stack = KSTK_ESP(tsk);
        unsigned long unmapped_base;
+       unsigned long flags;
+
+       adeos_hw_local_irq_save(flags);
 
        if (offset <= SLB_CACHE_ENTRIES) {
                int i;
@@ -115,24 +118,35 @@
        else
                unmapped_base = TASK_UNMAPPED_BASE_USER64;
 
-       if (pc >= KERNELBASE)
+       if (pc >= KERNELBASE) {
+               adeos_hw_local_irq_restore(flags);
                return;
+       }
        slb_allocate(pc);
 
-       if (GET_ESID(pc) == GET_ESID(stack))
+       if (GET_ESID(pc) == GET_ESID(stack)) {
+               adeos_hw_local_irq_restore(flags);
                return;
+       }
 
-       if (stack >= KERNELBASE)
+       if (stack >= KERNELBASE) {
+               adeos_hw_local_irq_restore(flags);
                return;
+       }
        slb_allocate(stack);
 
        if ((GET_ESID(pc) == GET_ESID(unmapped_base))
-           || (GET_ESID(stack) == GET_ESID(unmapped_base)))
+           || (GET_ESID(stack) == GET_ESID(unmapped_base))) {
+               adeos_hw_local_irq_restore(flags);
                return;
+       }
 
-       if (unmapped_base >= KERNELBASE)
+       if (unmapped_base >= KERNELBASE) {
+               adeos_hw_local_irq_restore(flags);
                return;
+       }
        slb_allocate(unmapped_base);
+       adeos_hw_local_irq_restore(flags);
 }
 
 void slb_initialize(void)
diff -Nru linux-2.6.10/arch/ppc64/mm/tlb.c 
linux-2.6.10-adeos-ppc64-r3/arch/ppc64/mm/tlb.c
--- linux-2.6.10/arch/ppc64/mm/tlb.c    2004-12-24 23:34:45.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/arch/ppc64/mm/tlb.c     2005-11-13 
11:45:31.000000000 +0200
@@ -122,7 +122,11 @@
        cpumask_t tmp;
        int local = 0;
 
+#ifdef CONFIG_ADEOS_CORE
+       BUG_ON(adp_current==adp_root && in_interrupt());
+#else /* !CONFIG_ADEOS_CORE */
        BUG_ON(in_interrupt());
+#endif /* CONFIG_ADEOS_CORE */
 
        cpu = get_cpu();
        i = batch->index;
diff -Nru linux-2.6.10/Documentation/adeos.txt 
linux-2.6.10-adeos-ppc64-r3/Documentation/adeos.txt
--- linux-2.6.10/Documentation/adeos.txt        1970-01-01 02:00:00.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/Documentation/adeos.txt 2005-11-13 
11:45:31.000000000 +0200
@@ -0,0 +1,176 @@
+
+The Adeos nanokernel is based on research and publications made in the
+early '90s on the subject of nanokernels. Our basic method was to
+reverse the approach described in most of the papers on the subject.
+Instead of first building the nanokernel and then building the client
+OSes, we started from a live and known-to-be-functional OS, Linux, and
+inserted a nanokernel beneath it. Starting from Adeos, other client
+OSes can now be put side-by-side with the Linux kernel.
+
+To this end, Adeos enables multiple domains to exist simultaneously on
+the same hardware. None of these domains see each other, but all of
+them see Adeos. A domain is most probably a complete OS, but there is
+no assumption being made regarding the sophistication of what's in
+a domain.
+
+To share the hardware among the different OSes, Adeos implements an
+interrupt pipeline (ipipe). Every OS domain has an entry in the ipipe.
+Each interrupt that comes in the ipipe is passed on to every domain
+in the ipipe. Instead of disabling/enabling interrupts, each domain
+in the pipeline only needs to stall/unstall his pipeline stage. If
+an ipipe stage is stalled, then the interrupts do not progress in the
+ipipe until that stage has been unstalled. Each stage of the ipipe
+can, of course, decide to do a number of things with an interrupt.
+Among other things, it can decide that it's the last recipient of the
+interrupt. In that case, the ipipe does not propagate the interrupt
+to the rest of the domains in the ipipe.
+
+Regardless of the operations being done in the ipipe, the Adeos code
+does __not__ play with the interrupt masks. The only case where the
+hardware masks are altered is during the addition/removal of a domain
+from the ipipe. This also means that no OS is allowed to use the real
+hardware cli/sti. But this is OK, since the stall/unstall calls
+achieve the same functionality.
+
+Our approach is based on the following papers (links to these
+papers are provided at the bottom of this message):
+[1] D. Probert, J. Bruno, and M. Karzaorman. "Space: a new approach to
+operating system abstraction." In: International Workshop on Object
+Orientation in Operating Systems, pages 133-137, October 1991.
+[2] D. Probert, J. Bruno. "Building fundamentally extensible application-
+specific operating systems in Space", March 1995.
+[3] D. Cheriton, K. Duda. "A caching model of operating system kernel
+functionality". In: Proc. Symp. on Operating Systems Design and
+Implementation, pages 179-194, Monterey CA (USA), 1994.
+[4] D. Engler, M. Kaashoek, and J. O'Toole Jr. "Exokernel: an operating
+system architecture for application-specific resource management",
+December 1995.
+
+If you don't want to go fetch the complete papers, here's a summary.
+The first 2 discuss the Space nanokernel, the 3rd discussed the cache
+nanokernel, and the last discusses exokernel.
+
+The complete Adeos approach has been thoroughly documented in a whitepaper
+published more than a year ago entitled "Adaptive Domain Environment
+for Operating Systems" and available here: http://www.opersys.com/adeos
+The current implementation is slightly different. Mainly, we do not
+implement the functionality to move Linux out of ring 0. Although of
+interest, this approach is not very portable.
+
+Instead, our patch taps right into Linux's main source of control
+over the hardware, the interrupt dispatching code, and inserts an
+interrupt pipeline which can then serve all the nanokernel's clients,
+including Linux.
+
+This is not a novelty in itself. Other OSes have been modified in such
+a way for a wide range of purposes. One of the most interesting
+examples is described by Stodolsky, Chen, and Bershad in a paper
+entitled "Fast Interrupt Priority Management in Operating System
+Kernels" published in 1993 as part of the Usenix Microkernels and
+Other Kernel Architectures Symposium. In that case, cli/sti were
+replaced by virtual cli/sti which did not modify the real interrupt
+mask in any way. Instead, interrupts were defered and delivered to
+the OS upon a call to the virtualized sti.
+
+Mainly, this resulted in increased performance for the OS. Although
+we haven't done any measurements on Linux's interrupt handling
+performance with Adeos, our nanokernel includes by definition the
+code implementing the technique described in the abovementioned
+Stodolsky paper, which we use to redirect the hardware interrupt flow
+to the pipeline.
+
+i386 and armnommu are currently supported. Most of the
+architecture-dependent code is easily portable to other architectures.
+
+Aside of adding the Adeos module (driver/adeos), we also modified some
+files to tap into Linux interrupt and system event dispatching (all
+the modifications are encapsulated in #ifdef CONFIG_ADEOS_*/#endif).
+
+We modified the idle task so it gives control back to Adeos in order for
+the ipipe to continue propagation.
+
+We modified init/main.c to initialize Adeos very early in the startup.
+
+Of course, we also added the appropriate makefile modifications and
+config options so that you can choose to enable/disable Adeos as
+part of the kernel build configuration.
+
+Adeos' public API is fully documented here:
+http://www.freesoftware.fsf.org/adeos/doc/api/index.html.
+
+In Linux's case, adeos_register_domain() is called very early during
+system startup.
+
+To add your domain to the ipipe, you need to:
+1) Register your domain with Adeos using adeos_register_domain()
+2) Call adeos_virtualize_irq() for all the IRQs you wish to be
+notified about in the ipipe.
+
+That's it. Provided you gave Adeos appropriate handlers in step
+#2, your interrupts will be delivered via the ipipe.
+
+During runtime, you may change your position in the ipipe using
+adeos_renice_domain(). You may also stall/unstall the pipeline
+and change the ipipe's handling of the interrupts according to your
+needs.
+
+Adeos supports SMP, and APIC support on UP.
+
+Here are some of the possible uses for Adeos (this list is far
+from complete):
+1) Much like User-Mode Linux, it should now be possible to have 2
+Linux kernels living side-by-side on the same hardware. In contrast
+to UML, this would not be 2 kernels one ontop of the other, but
+really side-by-side. Since Linux can be told at boot time to use
+only one portion of the available RAM, on a 128MB machine this
+would mean that the first could be made to use the 0-64MB space and
+the second would use the 64-128MB space. We realize that many
+modifications are required. Among other things, one of the 2 kernels
+will not need to conduct hardware initialization. Nevertheless, this
+possibility should be studied closer.
+
+2) It follows from #1 that adding other kernels beside Linux should
+be feasible. BSD is a prime candidate, but it would also be nice to
+see what virtualizers such as VMWare and Plex86 could do with Adeos.
+Proprietary operating systems could potentially also be accomodated.
+
+3) All the previous work that has been done on nanokernels should now
+be easily ported to Linux. Mainly, we would be very interested to
+hear about extensions to Adeos. Primarily, we have no mechanisms
+currently enabling multiple domains to share information. The papers
+mentioned earlier provide such mechanisms, but we'd like to see
+actual practical examples.
+
+4) Kernel debuggers' main problem (tapping into the kernel's
+interrupts) is solved and it should then be possible to provide
+patchless kernel debuggers. They would then become loadable kernel
+modules.
+
+5) Drivers who require absolute priority and dislike other kernel
+portions who use cli/sti can now create a domain of their own
+and place themselves before Linux in the ipipe. This provides a
+mechanism for the implementation of systems that can provide guaranteed
+realtime response.
+
+Philippe Gerum <[EMAIL PROTECTED]>
+Karim Yaghmour <[EMAIL PROTECTED]>
+
+----------------------------------------------------------------------
+Links to papers:
+1-
+http://citeseer.nj.nec.com/probert91space.html
+ftp://ftp.cs.ucsb.edu/pub/papers/space/iwooos91.ps.gz (not working)
+http://www4.informatik.uni-erlangen.de/~tsthiel/Papers/Space-iwooos91.ps.gz
+
+2-
+http://www.cs.ucsb.edu/research/trcs/abstracts/1995-06.shtml
+http://www4.informatik.uni-erlangen.de/~tsthiel/Papers/Space-trcs95-06.ps.gz
+
+3-
+http://citeseer.nj.nec.com/kenneth94caching.html
+http://guir.cs.berkeley.edu/projects/osprelims/papers/cachmodel-OSkernel.ps.gz
+
+4-
+http://citeseer.nj.nec.com/engler95exokernel.html
+ftp://ftp.cag.lcs.mit.edu/multiscale/exokernel.ps.Z
+----------------------------------------------------------------------
diff -Nru linux-2.6.10/include/asm-ppc64/adeos.h 
linux-2.6.10-adeos-ppc64-r3/include/asm-ppc64/adeos.h
--- linux-2.6.10/include/asm-ppc64/adeos.h      1970-01-01 02:00:00.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/include/asm-ppc64/adeos.h       2005-11-13 
11:58:38.000000000 +0200
@@ -0,0 +1,444 @@
+/*
+ *   include/asm-ppc64/adeos.h
+ *
+ *   Adeos 64-bit PowerPC adoption
+ *   Copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ *   based on previous work:
+ *     
+ *   Copyright (C) 2004 Philippe Gerum.
+ *
+ *   Adeos/PPC port over 2.6 based on the previous 2.4 implementation by:
+ *
+ *   Copyright (C) 2004 Wolfgang Grandegger.
+ *
+ *   It follows closely the ARM and x86 ports of ADEOS.
+ *
+ *   Copyright (C) 2002 Philippe Gerum.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __PPC64_ADEOS_H
+#define __PPC64_ADEOS_H
+
+#include <asm/ptrace.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <linux/list.h>
+#include <linux/cpumask.h>
+#include <linux/threads.h>
+
+#define ADEOS_ARCH_STRING     "r3/ppc64"
+#define ADEOS_MAJOR_NUMBER    3
+#define ADEOS_MINOR_NUMBER    255
+
+#define ADEOS_IRQ_ACKED                0x1000
+#define ADEOS_IRQ_ACKED_MASK   (ADEOS_IRQ_ACKED - 1)
+
+#ifdef CONFIG_SMP
+
+#error "Adeos/ppc64: SMP not yet implemented"
+
+#define ADEOS_NR_CPUS          NR_CPUS
+#define ADEOS_CRITICAL_IPI     0
+
+#define adeos_processor_id()   (__adeos_current_threadinfo()->cpu)
+
+#define adeos_declare_cpuid    int cpuid
+#define adeos_load_cpuid()     do { \
+                                  (cpuid) = adeos_processor_id();      \
+                               } while(0)
+#define adeos_lock_cpu(flags)  do { \
+                                  adeos_hw_local_irq_save(flags); \
+                                  (cpuid) = adeos_processor_id(); \
+                               } while(0)
+#define adeos_unlock_cpu(flags) adeos_hw_local_irq_restore(flags)
+#define adeos_get_cpu(flags)    adeos_lock_cpu(flags)
+#define adeos_put_cpu(flags)    adeos_unlock_cpu(flags)
+#define adp_current             (adp_cpu_current[adeos_processor_id()])
+
+#else  /* !CONFIG_SMP */
+
+#define ADEOS_NR_CPUS          1
+#define adeos_processor_id()   0
+/* Array references using this index should be optimized out. */
+#define adeos_declare_cpuid    const int cpuid = 0
+#define adeos_load_cpuid()      /* nop */
+#define adeos_lock_cpu(flags)   adeos_hw_local_irq_save(flags)
+#define adeos_unlock_cpu(flags) adeos_hw_local_irq_restore(flags)
+#define adeos_get_cpu(flags)    do { flags = flags; } while(0)
+#define adeos_put_cpu(flags)    /* nop */
+#define adp_current             (adp_cpu_current[0])
+
+#endif /* CONFIG_SMP */
+
+ /* PPC traps */
+#define ADEOS_ACCESS_TRAP     0        /* Data or instruction access exception 
*/
+#define ADEOS_ALIGNMENT_TRAP  1        /* Alignment exception */
+#define ADEOS_ALTUNAVAIL_TRAP 2        /* Altivec unavailable */
+#define ADEOS_PCE_TRAP        3        /* Program check exception */
+#define ADEOS_MCE_TRAP        4        /* Machine check exception */
+#define ADEOS_UNKNOWN_TRAP    5        /* Unknown exception */
+#define ADEOS_IABR_TRAP       6        /* Instruction breakpoint */
+#define ADEOS_SSTEP_TRAP      7        /* Single-step exception  */
+#define ADEOS_NREC_TRAP       8        /* Non-recoverable exception  */
+#define ADEOS_ALTASSIST_TRAP  9 /* Altivec assist exception */
+#define ADEOS_SYSRESET_TRAP   10 /* System reset exception */
+#define ADEOS_KFPUNAVAIL_TRAP 11 /* Kernel FP Unavailable exception */
+#define ADEOS_PERFMON_TRAP    12 /* Performance Monitor exception */
+#define ADEOS_NR_FAULTS       13
+/* Pseudo-vectors used for kernel events */
+#define ADEOS_FIRST_KEVENT      ADEOS_NR_FAULTS
+#define ADEOS_SYSCALL_PROLOGUE  (ADEOS_FIRST_KEVENT)
+#define ADEOS_SYSCALL_EPILOGUE  (ADEOS_FIRST_KEVENT + 1)
+#define ADEOS_SCHEDULE_HEAD     (ADEOS_FIRST_KEVENT + 2)
+#define ADEOS_SCHEDULE_TAIL     (ADEOS_FIRST_KEVENT + 3)
+#define ADEOS_ENTER_PROCESS     (ADEOS_FIRST_KEVENT + 4)
+#define ADEOS_EXIT_PROCESS      (ADEOS_FIRST_KEVENT + 5)
+#define ADEOS_SIGNAL_PROCESS    (ADEOS_FIRST_KEVENT + 6)
+#define ADEOS_KICK_PROCESS      (ADEOS_FIRST_KEVENT + 7)
+#define ADEOS_RENICE_PROCESS    (ADEOS_FIRST_KEVENT + 8)
+#define ADEOS_USER_EVENT        (ADEOS_FIRST_KEVENT + 9)
+#define ADEOS_LAST_KEVENT       (ADEOS_USER_EVENT)
+
+#define ADEOS_NR_EVENTS         (ADEOS_LAST_KEVENT + 1)
+
+typedef struct adevinfo {
+
+    unsigned domid;
+    unsigned event;
+    void *evdata;
+
+    volatile int propagate;    /* Private */
+
+} adevinfo_t;
+
+typedef struct adsysinfo {
+
+    int ncpus;                 /* Number of CPUs on board */
+
+    u64 cpufreq;               /* CPU frequency (in Hz) */
+
+    /* Arch-dependent block */
+
+    struct {
+       unsigned tmirq;         /* Decrementer virtual IRQ */
+       u64 tmfreq;             /* Timebase frequency */
+    } archdep;
+
+} adsysinfo_t;
+
+#define IPIPE_NR_XIRQS   NR_IRQS
+/* Number of virtual IRQs */
+#define IPIPE_NR_VIRQS   BITS_PER_LONG
+/* First virtual IRQ # */
+#define IPIPE_VIRQ_BASE  (((IPIPE_NR_XIRQS + BITS_PER_LONG - 1) / 
BITS_PER_LONG) * BITS_PER_LONG)
+/* Total number of IRQ slots */
+#define IPIPE_NR_IRQS     (IPIPE_VIRQ_BASE + IPIPE_NR_VIRQS)
+/* Number of indirect words needed to map the whole IRQ space. */
+#define IPIPE_IRQ_IWORDS  ((IPIPE_NR_IRQS + BITS_PER_LONG - 1) / BITS_PER_LONG)
+#define IPIPE_IRQ_IMASK   (BITS_PER_LONG - 1)
+#define IPIPE_IRQ_ISHIFT  6    /* 2^6 for 64bits arch. */
+
+#define IPIPE_IRQMASK_ANY   (~0L)
+#define IPIPE_IRQMASK_VIRT  (IPIPE_IRQMASK_ANY << (IPIPE_VIRQ_BASE / 
BITS_PER_LONG))
+
+/* The first virtual interrupt is reserved for the timer (see
+   __adeos_init_platform). */
+#define ADEOS_TIMER_VIRQ    IPIPE_VIRQ_BASE
+
+typedef struct adomain {
+
+    /* -- Section: offset-based references are made on these fields
+       from inline assembly code. Please don't move or reorder. */
+#ifdef CONFIG_ADEOS_THREADS
+    unsigned long esp[ADEOS_NR_CPUS];  /* Domain stack pointers */
+#endif /* CONFIG_ADEOS_THREADS */
+    void (*dswitch)(void);     /* Domain switch hook */
+    /* -- End of section. */
+
+    struct list_head p_link;   /* Link in pipeline */
+
+    struct adcpudata {
+       unsigned long status;
+       unsigned long irq_pending_hi;
+       unsigned long irq_pending_lo[IPIPE_IRQ_IWORDS];
+       unsigned irq_hits[IPIPE_NR_IRQS];
+#ifdef CONFIG_ADEOS_THREADS
+       adevinfo_t event_info;
+#endif /* CONFIG_ADEOS_THREADS */
+    } cpudata[ADEOS_NR_CPUS];
+
+    struct {
+       int (*acknowledge)(unsigned irq);
+       void (*handler)(unsigned irq);
+       unsigned long control;
+    } irqs[IPIPE_NR_IRQS];
+
+    struct {
+       void (*handler)(adevinfo_t *evinfo);
+    } events[ADEOS_NR_EVENTS];
+
+    struct adomain *m_link;    /* Link in mutex sleep queue */
+
+    unsigned long flags;
+
+    unsigned domid;
+
+    const char *name;
+
+    int priority;
+
+    int ptd_keymax;
+    int ptd_keycount;
+    unsigned long ptd_keymap;
+    void (*ptd_setfun)(int, void *);
+    void *(*ptd_getfun)(int);
+
+#ifdef CONFIG_ADEOS_THREADS
+    unsigned long estackbase[ADEOS_NR_CPUS];
+#endif /* CONFIG_ADEOS_THREADS */
+
+} adomain_t;
+
+/* The following macros must be used hw interrupts off. */
+
+#define __adeos_set_irq_bit(adp,cpuid,irq) \
+do { \
+    if (!test_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control)) { \
+        __set_bit(irq & 
IPIPE_IRQ_IMASK,&(adp)->cpudata[cpuid].irq_pending_lo[irq >> 
IPIPE_IRQ_ISHIFT]); \
+        __set_bit(irq >> 
IPIPE_IRQ_ISHIFT,&(adp)->cpudata[cpuid].irq_pending_hi); \
+       } \
+} while(0)
+
+#define __adeos_clear_pend(adp,cpuid,irq) \
+do { \
+    __clear_bit(irq & 
IPIPE_IRQ_IMASK,&(adp)->cpudata[cpuid].irq_pending_lo[irq >> 
IPIPE_IRQ_ISHIFT]); \
+    if ((adp)->cpudata[cpuid].irq_pending_lo[irq >> IPIPE_IRQ_ISHIFT] == 0) \
+        __clear_bit(irq >> 
IPIPE_IRQ_ISHIFT,&(adp)->cpudata[cpuid].irq_pending_hi); \
+} while(0)
+
+#define __adeos_lock_irq(adp,cpuid,irq) \
+do { \
+    if (!test_and_set_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control)) \
+       __adeos_clear_pend(adp,cpuid,irq); \
+} while(0)
+
+#define __adeos_unlock_irq(adp,irq) \
+do { \
+    if (test_and_clear_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control)) { \
+        int __cpuid, __nr_cpus = num_online_cpus();          \
+       for (__cpuid = 0; __cpuid < __nr_cpus; __cpuid++)      \
+         if ((adp)->cpudata[__cpuid].irq_hits[irq] > 0) { /* We need atomic 
ops next. */ \
+           set_bit(irq & 
IPIPE_IRQ_IMASK,&(adp)->cpudata[__cpuid].irq_pending_lo[irq >> 
IPIPE_IRQ_ISHIFT]); \
+           set_bit(irq >> 
IPIPE_IRQ_ISHIFT,&(adp)->cpudata[__cpuid].irq_pending_hi); \
+         } \
+    } \
+} while(0)
+
+#define __adeos_clear_irq(adp,irq) \
+do { \
+    int __cpuid, __nr_cpus = num_online_cpus(); \
+    clear_bit(IPIPE_LOCK_FLAG,&(adp)->irqs[irq].control); \
+    for (__cpuid = 0; __cpuid < __nr_cpus; __cpuid++) {        \
+       (adp)->cpudata[__cpuid].irq_hits[irq] = 0; \
+       __adeos_clear_pend(adp,__cpuid,irq); \
+    } \
+} while(0)
+
+#define adeos_virtual_irq_p(irq) ((irq) >= IPIPE_VIRQ_BASE && \
+                                 (irq) < IPIPE_NR_IRQS)
+
+static inline void adeos_hw_local_irq_save_ptr(unsigned long *flags)
+{
+    unsigned long msr;
+    msr = mfmsr();
+    *flags = msr;
+    __mtmsrd(msr & ~MSR_EE, 1);
+    __asm__ __volatile__("": : :"memory");
+}
+
+#define adeos_hw_local_irq_save_flags(flags) 
adeos_hw_local_irq_save_ptr(&(flags))
+#define adeos_hw_local_irq_restore(flags)    do { \
+       __asm__ __volatile__("": : :"memory"); \
+       __mtmsrd((flags), 1); \
+} while(0)
+
+static inline void adeos_hw_local_irq_disable(void)
+{
+    unsigned long msr;
+    msr = mfmsr();
+    __mtmsrd(msr & ~MSR_EE, 1);
+    __asm__ __volatile__("": : :"memory");
+}
+
+static inline void adeos_hw_local_irq_enable(void)
+{
+    unsigned long msr;
+    __asm__ __volatile__("": : :"memory");
+    msr = mfmsr();
+    __mtmsrd(msr | MSR_EE, 1);
+}
+
+#define adeos_hw_local_irq_save(flags) 
({adeos_hw_local_irq_save_flags(flags);adeos_hw_local_irq_disable();})
+#define adeos_hw_save_flags_and_sti(flags) 
({adeos_hw_local_irq_save_flags(flags);adeos_hw_local_irq_enable();})
+
+#define adeos_hw_cli() adeos_hw_local_irq_disable()
+#define adeos_hw_sti() adeos_hw_local_irq_enable()
+
+#define adeos_hw_local_irq_flags(flags)        ((flags) = mfmsr())
+#define adeos_hw_test_iflag(x)         ((x) & MSR_EE)
+#define adeos_hw_irqs_disabled()       \
+({                                     \
+       unsigned long flags;            \
+       adeos_hw_local_irq_flags(flags);\
+       !adeos_hw_test_iflag(flags);    \
+})
+
+#define adeos_hw_tsc(t) (t = mftb())
+
+extern unsigned long tb_ticks_per_jiffy;
+
+#define adeos_cpu_freq() (HZ * tb_ticks_per_jiffy)
+
+#define adeos_spin_lock(x)     _spin_lock(x)
+#define adeos_spin_unlock(x)   _spin_unlock(x)
+#define adeos_spin_trylock(x)  _spin_trylock(x)
+#define adeos_write_lock(x)    _write_lock(x)
+#define adeos_write_unlock(x)  _write_unlock(x)
+#define adeos_write_trylock(x) _write_trylock(x)
+#define adeos_read_lock(x)     _read_lock(x)
+#define adeos_read_unlock(x)   _read_unlock(x)
+#define raw_spinlock_t         spinlock_t
+#define RAW_SPIN_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
+#define raw_rwlock_t           rwlock_t
+#define RAW_RW_LOCK_UNLOCKED   RW_LOCK_UNLOCKED
+
+#define spin_lock_irqsave_hw(lock,flags)      adeos_spin_lock_irqsave(lock, 
flags)
+#define spin_unlock_irqrestore_hw(lock,flags) 
adeos_spin_unlock_irqrestore(lock, flags)
+
+#define adeos_spin_lock_irqsave(x,flags)  \
+do { \
+   adeos_hw_local_irq_save(flags); \
+   adeos_spin_lock(x); \
+} while (0)
+
+#define adeos_spin_unlock_irqrestore(x,flags)  \
+do { \
+   adeos_spin_unlock(x); \
+   adeos_hw_local_irq_restore(flags); \
+} while (0)
+
+#define adeos_spin_lock_disable(x)  \
+do { \
+   adeos_hw_cli(); \
+   adeos_spin_lock(x); \
+} while (0)
+
+#define adeos_spin_unlock_enable(x)  \
+do { \
+   adeos_spin_unlock(x); \
+   adeos_hw_sti(); \
+} while (0)
+
+#define adeos_read_lock_irqsave(lock, flags) \
+do { \
+   adeos_hw_local_irq_save(flags); \
+   adeos_read_lock(lock); \
+} while (0)
+
+#define adeos_read_unlock_irqrestore(lock, flags) \
+do { \
+   adeos_read_unlock(lock); \
+   adeos_hw_local_irq_restore(flags); \
+} while (0)
+
+#define adeos_write_lock_irqsave(lock, flags) \
+do { \
+   adeos_hw_local_irq_save(flags); \
+   adeos_write_lock(lock); \
+} while (0)
+
+#define adeos_write_unlock_irqrestore(lock, flags) \
+do { \
+   adeos_write_unlock(lock); \
+   adeos_hw_local_irq_restore(flags); \
+} while (0)
+
+/* Private interface -- Internal use only */
+
+struct adattr;
+
+void __adeos_init(void);
+
+void __adeos_init_domain(adomain_t *adp,
+                        struct adattr *attr);
+
+void __adeos_cleanup_domain(adomain_t *adp);
+
+#define __adeos_check_platform() do { } while(0)
+
+#define __adeos_read_timebase() ({ unsigned long t; adeos_hw_tsc(t); t; })
+
+void __adeos_init_platform(void);
+
+void __adeos_enable_pipeline(void);
+
+void __adeos_disable_pipeline(void);
+
+void __adeos_init_stage(adomain_t *adp);
+
+void __adeos_sync_stage(unsigned long syncmask);
+
+int __adeos_ack_irq(unsigned irq);
+
+void __adeos_do_IRQ(int irq,
+                   struct pt_regs *regs);
+
+void __adeos_do_timer(int irq,
+                     struct pt_regs *regs);
+
+struct thread_info *__adeos_current_threadinfo(void);
+
+#ifdef CONFIG_ADEOS_THREADS
+
+int __adeos_switch_domain(adomain_t *adp,
+                         adomain_t **currentp);
+
+/* Called with hw interrupts off. */
+static inline void __adeos_switch_to (adomain_t *out,
+                                     adomain_t *in,
+                                     int cpuid)
+{
+    extern adomain_t *adp_cpu_current[];
+
+    __adeos_switch_domain(in,&adp_cpu_current[cpuid]);
+
+    if (out->dswitch != NULL)
+       out->dswitch();
+}
+
+#endif /* CONFIG_ADEOS_THREADS */
+
+extern struct pt_regs __adeos_irq_regs;
+
+extern unsigned long __adeos_virtual_irq_map;
+
+extern unsigned long __adeos_decr_ticks;
+
+extern unsigned long __adeos_decr_next[];
+
+#endif /* !__PPC64_ADEOS_H */
diff -Nru linux-2.6.10/include/asm-ppc64/hw_irq.h 
linux-2.6.10-adeos-ppc64-r3/include/asm-ppc64/hw_irq.h
--- linux-2.6.10/include/asm-ppc64/hw_irq.h     2004-12-24 23:35:40.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/include/asm-ppc64/hw_irq.h      2005-11-13 
11:45:31.000000000 +0200
@@ -19,6 +19,37 @@
 int timer_interrupt(struct pt_regs *);
 extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
 
+#ifdef CONFIG_ADEOS_CORE
+
+void __adeos_stall_root(void);
+void __adeos_unstall_root(void);
+unsigned long __adeos_test_root(void);
+unsigned long __adeos_test_and_stall_root(void);
+void __adeos_restore_root(unsigned long flags);
+
+#define irqs_disabled()  __adeos_test_root()
+
+static inline void local_irq_disable(void) {
+    __adeos_stall_root();
+}
+
+static inline void local_irq_enable(void) {
+    __adeos_unstall_root();
+}
+
+static inline void local_irq_save_ptr(unsigned long *flags) {
+    *flags = __adeos_test_and_stall_root();
+}
+
+static inline void local_irq_restore(unsigned long flags) {
+    __adeos_restore_root(flags);
+}
+
+#define local_save_flags(flags)                ((flags) = __adeos_test_root())
+#define local_irq_save(flags)          local_irq_save_ptr(&flags)
+
+#else /* !CONFIG_ADEOS_CORE */
+
 #ifdef CONFIG_PPC_ISERIES
 
 extern unsigned long local_get_flags(void);
@@ -75,6 +106,8 @@
 
 #endif /* CONFIG_PPC_ISERIES */
 
+#endif /* CONFIG_ADEOS_CORE */
+
 #define mask_irq(irq)                                          \
        ({                                                      \
                irq_desc_t *desc = get_irq_desc(irq);           \
diff -Nru linux-2.6.10/include/asm-ppc64/mmu_context.h 
linux-2.6.10-adeos-ppc64-r3/include/asm-ppc64/mmu_context.h
--- linux-2.6.10/include/asm-ppc64/mmu_context.h        2004-12-24 
23:34:31.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/include/asm-ppc64/mmu_context.h 2005-11-13 
11:45:31.000000000 +0200
@@ -82,9 +82,17 @@
 {
        unsigned long flags;
 
+#ifdef CONFIG_ADEOS_CORE
+       adeos_hw_local_irq_save(flags);
+#else /* !CONFIG_ADEOS_CORE */
        local_irq_save(flags);
+#endif /* CONFIG_ADEOS_CORE */
        switch_mm(prev, next, current);
+#ifdef CONFIG_ADEOS_CORE
+       adeos_hw_local_irq_restore(flags);
+#else /* !CONFIG_ADEOS_CORE */
        local_irq_restore(flags);
+#endif /* CONFIG_ADEOS_CORE */
 }
 
 /* VSID allocation
diff -Nru linux-2.6.10/include/asm-ppc64/smp.h 
linux-2.6.10-adeos-ppc64-r3/include/asm-ppc64/smp.h
--- linux-2.6.10/include/asm-ppc64/smp.h        2004-12-24 23:33:47.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/include/asm-ppc64/smp.h 2005-11-13 
11:45:31.000000000 +0200
@@ -37,8 +37,12 @@
 struct pt_regs;
 extern void smp_message_recv(int, struct pt_regs *);
 
-
+#ifdef CONFIG_ADEOS_CORE
+#include <asm/adeos.h>
+#define smp_processor_id() adeos_processor_id()
+#else /* !CONFIG_ADEOS_CORE */
 #define smp_processor_id() (get_paca()->paca_index)
+#endif /* CONFIG_ADEOS_CORE */
 #define hard_smp_processor_id() (get_paca()->hw_cpu_id)
 
 extern cpumask_t cpu_sibling_map[NR_CPUS];
diff -Nru linux-2.6.10/include/asm-ppc64/time.h 
linux-2.6.10-adeos-ppc64-r3/include/asm-ppc64/time.h
--- linux-2.6.10/include/asm-ppc64/time.h       2004-12-24 23:34:44.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/include/asm-ppc64/time.h        2005-11-13 
11:45:31.000000000 +0200
@@ -23,6 +23,9 @@
 #include <asm/iSeries/HvCall.h>
 
 /* time.c */
+#ifdef CONFIG_ADEOS_CORE
+extern unsigned long disarm_decr[NR_CPUS];
+#endif /* CONFIG_ADEOS_CORE */
 extern unsigned long tb_ticks_per_jiffy;
 extern unsigned long tb_ticks_per_usec;
 extern unsigned long tb_ticks_per_sec;
diff -Nru linux-2.6.10/include/linux/adeos.h 
linux-2.6.10-adeos-ppc64-r3/include/linux/adeos.h
--- linux-2.6.10/include/linux/adeos.h  1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/include/linux/adeos.h   2005-11-13 
11:45:31.000000000 +0200
@@ -0,0 +1,553 @@
+/*
+ *   include/linux/adeos.h
+ *
+ *   Copyright (C) 2002,2003,2004 Philippe Gerum.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LINUX_ADEOS_H
+#define __LINUX_ADEOS_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_ADEOS_CORE
+
+#include <linux/spinlock.h>
+#include <asm/adeos.h>
+
+#define ADEOS_VERSION_PREFIX  "2.6"
+#define ADEOS_VERSION_STRING  (ADEOS_VERSION_PREFIX ADEOS_ARCH_STRING)
+#define ADEOS_RELEASE_NUMBER  
(0x02060000|((ADEOS_MAJOR_NUMBER&0xff)<<8)|(ADEOS_MINOR_NUMBER&0xff))
+
+#define ADEOS_ROOT_PRI       100
+#define ADEOS_ROOT_ID        0
+#define ADEOS_ROOT_NPTDKEYS  4 /* Must be <= 32 */
+
+#define ADEOS_RESET_TIMER  0x1
+#define ADEOS_SAME_HANDLER ((void (*)(unsigned))(-1))
+
+/* Global domain flags */
+#define ADEOS_SPRINTK_FLAG 0   /* Synchronous printk() allowed */
+#define ADEOS_PPRINTK_FLAG 1   /* Asynchronous printk() request pending */
+
+/* Per-cpu pipeline flags.
+   WARNING: some implementation might refer to those flags
+   non-symbolically in assembly portions (e.g. x86). */
+#define IPIPE_STALL_FLAG   0   /* Stalls a pipeline stage */
+#define IPIPE_XPEND_FLAG   1   /* Exception notification is pending */
+#define IPIPE_SLEEP_FLAG   2   /* Domain has self-suspended */
+#define IPIPE_SYNC_FLAG    3   /* The interrupt syncer is running for the 
domain */
+
+#define IPIPE_HANDLE_FLAG    0
+#define IPIPE_PASS_FLAG      1
+#define IPIPE_ENABLE_FLAG    2
+#define IPIPE_DYNAMIC_FLAG   IPIPE_HANDLE_FLAG
+#define IPIPE_EXCLUSIVE_FLAG 3
+#define IPIPE_STICKY_FLAG    4
+#define IPIPE_SYSTEM_FLAG    5
+#define IPIPE_LOCK_FLAG      6
+#define IPIPE_SHARED_FLAG    7
+#define IPIPE_CALLASM_FLAG   8 /* Arch-dependent -- might be unused. */
+
+#define IPIPE_HANDLE_MASK    (1 << IPIPE_HANDLE_FLAG)
+#define IPIPE_PASS_MASK      (1 << IPIPE_PASS_FLAG)
+#define IPIPE_ENABLE_MASK    (1 << IPIPE_ENABLE_FLAG)
+#define IPIPE_DYNAMIC_MASK   IPIPE_HANDLE_MASK
+#define IPIPE_EXCLUSIVE_MASK (1 << IPIPE_EXCLUSIVE_FLAG)
+#define IPIPE_STICKY_MASK    (1 << IPIPE_STICKY_FLAG)
+#define IPIPE_SYSTEM_MASK    (1 << IPIPE_SYSTEM_FLAG)
+#define IPIPE_LOCK_MASK      (1 << IPIPE_LOCK_FLAG)
+#define IPIPE_SHARED_MASK    (1 << IPIPE_SHARED_FLAG)
+#define IPIPE_SYNC_MASK      (1 << IPIPE_SYNC_FLAG)
+#define IPIPE_CALLASM_MASK   (1 << IPIPE_CALLASM_FLAG)
+
+#define IPIPE_DEFAULT_MASK  (IPIPE_HANDLE_MASK|IPIPE_PASS_MASK)
+
+typedef struct adattr {
+
+    unsigned domid;            /* Domain identifier -- Magic value set by 
caller */
+    const char *name;          /* Domain name -- Warning: won't be dup'ed! */
+    int priority;              /* Priority in interrupt pipeline */
+    void (*entry)(int);                /* Domain entry point */
+    int estacksz;              /* Stack size for entry context -- 0 means 
unspec */
+    void (*dswitch)(void);     /* Handler called each time the domain is 
switched in */
+    int nptdkeys;              /* Max. number of per-thread data keys */
+    void (*ptdset)(int,void *);        /* Routine to set pt values */
+    void *(*ptdget)(int);      /* Routine to get pt values */
+
+} adattr_t;
+
+typedef struct admutex {
+
+    raw_spinlock_t lock;
+
+#ifdef CONFIG_ADEOS_THREADS
+    adomain_t *sleepq, /* Pending domain queue */
+             *owner;   /* Domain owning the mutex */
+#ifdef CONFIG_SMP
+    volatile int owncpu;
+#define ADEOS_MUTEX_UNLOCKED { RAW_SPIN_LOCK_UNLOCKED, NULL, NULL, -1 }
+#else  /* !CONFIG_SMP */
+#define ADEOS_MUTEX_UNLOCKED { RAW_SPIN_LOCK_UNLOCKED, NULL, NULL }
+#endif /* CONFIG_SMP */
+#else /* !CONFIG_ADEOS_THREADS */
+#define ADEOS_MUTEX_UNLOCKED { RAW_SPIN_LOCK_UNLOCKED }
+#endif /* CONFIG_ADEOS_THREADS */
+
+} admutex_t;
+
+typedef void (*adevhand_t)(adevinfo_t *);
+
+extern int adp_pipelined;
+
+extern adomain_t *adp_cpu_current[],
+                 *adp_root;
+
+extern int __adeos_event_monitors[];
+
+extern unsigned __adeos_printk_virq;
+
+extern unsigned long __adeos_virtual_irq_map;
+
+extern struct list_head __adeos_pipeline;
+
+extern raw_spinlock_t __adeos_pipelock;
+
+#ifdef CONFIG_ADEOS_PROFILING
+
+typedef struct adprofdata {
+
+    struct {
+       unsigned long long t_handled;
+       unsigned long long t_synced;
+       unsigned long n_handled;
+       unsigned long n_synced;
+    } irqs[IPIPE_NR_IRQS];
+
+} adprofdata_t;
+
+extern adprofdata_t __adeos_profile_data[ADEOS_NR_CPUS];
+
+#endif /* CONFIG_ADEOS_PROFILING */
+
+/* Private interface */
+
+#ifdef CONFIG_PROC_FS
+void __adeos_init_proc(void);
+#endif /* CONFIG_PROC_FS */
+
+void __adeos_takeover(void);
+
+asmlinkage int __adeos_handle_event(unsigned event,
+                                   void *evdata);
+
+void __adeos_flush_printk(unsigned irq);
+
+void __adeos_dump_state(void);
+
+static inline void __adeos_schedule_head(void *evdata) {
+
+    if (__adeos_event_monitors[ADEOS_SCHEDULE_HEAD] > 0)
+       __adeos_handle_event(ADEOS_SCHEDULE_HEAD,evdata);
+}
+
+static inline int __adeos_schedule_tail(void *evdata) {
+
+    if (__adeos_event_monitors[ADEOS_SCHEDULE_TAIL] > 0)
+       return __adeos_handle_event(ADEOS_SCHEDULE_TAIL,evdata);
+
+    return 0;
+}
+
+static inline void __adeos_enter_process(void) {
+
+    if (__adeos_event_monitors[ADEOS_ENTER_PROCESS] > 0)
+       __adeos_handle_event(ADEOS_ENTER_PROCESS,NULL);
+}
+
+static inline void __adeos_exit_process(void *evdata) {
+
+    if (__adeos_event_monitors[ADEOS_EXIT_PROCESS] > 0)
+       __adeos_handle_event(ADEOS_EXIT_PROCESS,evdata);
+}
+
+static inline int __adeos_signal_process(void *evdata) {
+
+    if (__adeos_event_monitors[ADEOS_SIGNAL_PROCESS] > 0)
+       return __adeos_handle_event(ADEOS_SIGNAL_PROCESS,evdata);
+
+    return 0;
+}
+
+static inline void __adeos_kick_process(void *evdata) {
+
+    if (__adeos_event_monitors[ADEOS_KICK_PROCESS] > 0)
+       __adeos_handle_event(ADEOS_KICK_PROCESS,evdata);
+}
+
+static inline int __adeos_renice_process(void *evdata) {
+
+    if (__adeos_event_monitors[ADEOS_RENICE_PROCESS] > 0)
+       return __adeos_handle_event(ADEOS_RENICE_PROCESS,evdata);
+
+    return 0;
+}
+
+void __adeos_stall_root(void);
+
+void __adeos_unstall_root(void);
+
+unsigned long __adeos_test_root(void);
+
+unsigned long __adeos_test_and_stall_root(void);
+
+void fastcall __adeos_restore_root(unsigned long flags);
+
+void __adeos_schedule_back_root(struct task_struct *prev);
+
+int __adeos_setscheduler_root(struct task_struct *p,
+                             int policy,
+                             int prio);
+
+void __adeos_reenter_root(struct task_struct *prev,
+                         int policy,
+                         int prio);
+
+int fastcall __adeos_schedule_irq(unsigned irq,
+                                 struct list_head *head);
+
+#define __adeos_pipeline_head_p(adp) (&(adp)->p_link == __adeos_pipeline.next)
+
+#ifdef CONFIG_ADEOS_THREADS
+
+static inline int __adeos_domain_work_p (adomain_t *adp, int cpuid)
+
+{
+    return (!test_bit(IPIPE_SLEEP_FLAG,&adp->cpudata[cpuid].status) ||
+           (!test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status) &&
+            adp->cpudata[cpuid].irq_pending_hi != 0) ||
+           test_bit(IPIPE_XPEND_FLAG,&adp->cpudata[cpuid].status));
+}
+
+#else /* !CONFIG_ADEOS_THREADS */
+
+static inline int __adeos_domain_work_p (adomain_t *adp, int cpuid)
+
+{
+    return (!test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status) &&
+           adp->cpudata[cpuid].irq_pending_hi != 0);
+}
+
+static inline void __adeos_switch_to (adomain_t *out, adomain_t *in, int cpuid)
+
+{
+    void adeos_suspend_domain(void);
+
+    /* "in" is guaranteed to be closer than "out" from the head of the
+       pipeline (and obviously different). */
+
+    adp_cpu_current[cpuid] = in;
+
+    if (in->dswitch)
+       in->dswitch();
+
+    adeos_suspend_domain(); /* Sync stage and propagate interrupts. */
+    adeos_load_cpuid(); /* Processor might have changed. */
+
+    if (adp_cpu_current[cpuid] == in)
+       /* Otherwise, something has changed the current domain under
+          our feet recycling the register set; do not override. */
+       adp_cpu_current[cpuid] = out;
+}
+
+#endif /* CONFIG_ADEOS_THREADS */
+
+/* Public interface */
+
+int adeos_register_domain(adomain_t *adp,
+                         adattr_t *attr);
+
+int adeos_unregister_domain(adomain_t *adp);
+
+void adeos_suspend_domain(void);
+
+int adeos_virtualize_irq_from(adomain_t *adp,
+                             unsigned irq,
+                             void (*handler)(unsigned irq),
+                             int (*acknowledge)(unsigned irq),
+                             unsigned modemask);
+
+static inline int adeos_virtualize_irq(unsigned irq,
+                                      void (*handler)(unsigned irq),
+                                      int (*acknowledge)(unsigned irq),
+                                      unsigned modemask) {
+
+    return adeos_virtualize_irq_from(adp_current,
+                                    irq,
+                                    handler,
+                                    acknowledge,
+                                    modemask);
+}
+
+int adeos_control_irq(unsigned irq,
+                     unsigned clrmask,
+                     unsigned setmask);
+
+cpumask_t adeos_set_irq_affinity(unsigned irq,
+                                cpumask_t cpumask);
+
+static inline int adeos_share_irq (unsigned irq, int (*acknowledge)(unsigned 
irq)) {
+
+    return adeos_virtualize_irq(irq,
+                               ADEOS_SAME_HANDLER,
+                               acknowledge,
+                               
IPIPE_SHARED_MASK|IPIPE_HANDLE_MASK|IPIPE_PASS_MASK);
+}
+
+unsigned adeos_alloc_irq(void);
+
+int adeos_free_irq(unsigned irq);
+
+int fastcall adeos_trigger_irq(unsigned irq);
+
+static inline int adeos_propagate_irq(unsigned irq) {
+
+    return __adeos_schedule_irq(irq,adp_current->p_link.next);
+}
+
+static inline int adeos_schedule_irq(unsigned irq) {
+
+    return __adeos_schedule_irq(irq,&adp_current->p_link);
+}
+
+int fastcall adeos_send_ipi(unsigned ipi,
+                           cpumask_t cpumask);
+
+static inline void adeos_stall_pipeline_from (adomain_t *adp)
+
+{
+    adeos_declare_cpuid;
+#ifdef CONFIG_SMP
+    unsigned long flags;
+
+    adeos_lock_cpu(flags);
+
+    __set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+
+    if (!__adeos_pipeline_head_p(adp))
+       adeos_unlock_cpu(flags);
+#else /* CONFIG_SMP */
+    set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+
+    if (__adeos_pipeline_head_p(adp))
+       adeos_hw_cli();
+#endif /* CONFIG_SMP */
+}
+
+static inline unsigned long adeos_test_pipeline_from (adomain_t *adp)
+
+{
+    unsigned long flags, s;
+    adeos_declare_cpuid;
+    
+    adeos_get_cpu(flags);
+    s = test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+    adeos_put_cpu(flags);
+
+    return s;
+}
+
+static inline unsigned long adeos_test_and_stall_pipeline_from (adomain_t *adp)
+
+{
+    adeos_declare_cpuid;
+    unsigned long s;
+#ifdef CONFIG_SMP
+    unsigned long flags;
+
+    adeos_lock_cpu(flags);
+
+    s = __test_and_set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+
+    if (!__adeos_pipeline_head_p(adp))
+       adeos_unlock_cpu(flags);
+#else /* CONFIG_SMP */
+    s = test_and_set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+
+    if (__adeos_pipeline_head_p(adp))
+       adeos_hw_cli();
+#endif /* CONFIG_SMP */
+    
+    return s;
+}
+
+void fastcall adeos_unstall_pipeline_from(adomain_t *adp);
+
+static inline unsigned long adeos_test_and_unstall_pipeline_from(adomain_t 
*adp)
+
+{
+    unsigned long flags, s;
+    adeos_declare_cpuid;
+    
+    adeos_get_cpu(flags);
+    s = test_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+    adeos_unstall_pipeline_from(adp);
+    adeos_put_cpu(flags);
+
+    return s;
+}
+
+static inline void adeos_unstall_pipeline(void)
+
+{
+    adeos_unstall_pipeline_from(adp_current);
+}
+
+static inline unsigned long adeos_test_and_unstall_pipeline(void)
+
+{
+    return adeos_test_and_unstall_pipeline_from(adp_current);
+}
+
+static inline unsigned long adeos_test_pipeline (void)
+
+{
+    return adeos_test_pipeline_from(adp_current);
+}
+
+static inline unsigned long adeos_test_and_stall_pipeline (void)
+
+{
+    return adeos_test_and_stall_pipeline_from(adp_current);
+}
+
+static inline void adeos_restore_pipeline_from (adomain_t *adp, unsigned long 
flags)
+
+{
+    if (flags)
+       adeos_stall_pipeline_from(adp);
+    else
+       adeos_unstall_pipeline_from(adp);
+}
+
+static inline void adeos_stall_pipeline (void)
+
+{
+    adeos_stall_pipeline_from(adp_current);
+}
+
+static inline void adeos_restore_pipeline (unsigned long flags)
+
+{
+    adeos_restore_pipeline_from(adp_current,flags);
+}
+
+static inline void adeos_restore_pipeline_nosync (adomain_t *adp, unsigned 
long flags, int cpuid)
+
+{
+    /* If cpuid is current, then it must be held on entry
+       (adeos_get_cpu/adeos_hw_local_irq_save/adeos_hw_cli). */
+
+    if (flags)
+       __set_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+    else
+       __clear_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+}
+
+adevhand_t adeos_catch_event_from(adomain_t *adp,
+                                 unsigned event,
+                                 adevhand_t handler);
+
+static inline adevhand_t adeos_catch_event (unsigned event, adevhand_t handler)
+
+{
+    return adeos_catch_event_from(adp_current,event,handler);
+}
+
+static inline void adeos_propagate_event(adevinfo_t *evinfo)
+
+{
+    evinfo->propagate = 1;
+}
+
+void adeos_init_attr(adattr_t *attr);
+
+int adeos_get_sysinfo(adsysinfo_t *sysinfo);
+
+int adeos_tune_timer(unsigned long ns,
+                    int flags);
+
+int adeos_alloc_ptdkey(void);
+
+int adeos_free_ptdkey(int key);
+
+int adeos_set_ptd(int key,
+                 void *value);
+
+void *adeos_get_ptd(int key);
+
+unsigned long adeos_critical_enter(void (*syncfn)(void));
+
+void adeos_critical_exit(unsigned long flags);
+
+int adeos_init_mutex(admutex_t *mutex);
+
+int adeos_destroy_mutex(admutex_t *mutex);
+
+unsigned long fastcall adeos_lock_mutex(admutex_t *mutex);
+
+void fastcall adeos_unlock_mutex(admutex_t *mutex,
+                                unsigned long flags);
+
+static inline void adeos_set_printk_sync (adomain_t *adp) {
+    set_bit(ADEOS_SPRINTK_FLAG,&adp->flags);
+}
+
+static inline void adeos_set_printk_async (adomain_t *adp) {
+    clear_bit(ADEOS_SPRINTK_FLAG,&adp->flags);
+}
+
+#define spin_lock_irqsave_hw_cond(lock,flags)      
spin_lock_irqsave_hw(lock,flags)
+#define spin_unlock_irqrestore_hw_cond(lock,flags) 
spin_unlock_irqrestore_hw(lock,flags)
+
+#define pic_irq_lock(irq)      \
+       do {            \
+               adeos_declare_cpuid; \
+               adeos_load_cpuid();             \
+               __adeos_lock_irq(adp_cpu_current[cpuid], cpuid, irq); \
+       } while(0)
+
+#define pic_irq_unlock(irq)    \
+       do {            \
+               adeos_declare_cpuid; \
+               adeos_load_cpuid();          \
+               __adeos_unlock_irq(adp_cpu_current[cpuid], irq); \
+       } while(0)
+
+#else  /* !CONFIG_ADEOS_CORE */
+
+#define spin_lock_irqsave_hw(lock,flags)      spin_lock_irqsave(lock, flags)
+#define spin_unlock_irqrestore_hw(lock,flags) spin_unlock_irqrestore(lock, 
flags)
+#define spin_lock_irqsave_hw_cond(lock,flags)      do { flags = 0; 
spin_lock(lock); } while(0)
+#define spin_unlock_irqrestore_hw_cond(lock,flags) spin_unlock(lock)
+
+#define pic_irq_lock(irq)      do { } while(0)
+#define pic_irq_unlock(irq)    do { } while(0)
+
+#endif /* CONFIG_ADEOS_CORE */
+
+#endif /* !__LINUX_ADEOS_H */
diff -Nru linux-2.6.10/include/linux/preempt.h 
linux-2.6.10-adeos-ppc64-r3/include/linux/preempt.h
--- linux-2.6.10/include/linux/preempt.h        2004-12-24 23:34:26.000000000 
+0200
+++ linux-2.6.10-adeos-ppc64-r3/include/linux/preempt.h 2005-11-13 
11:45:31.000000000 +0200
@@ -25,6 +25,47 @@
 
 asmlinkage void preempt_schedule(void);
 
+#ifdef CONFIG_ADEOS_CORE
+
+#include <asm/adeos.h>
+
+extern adomain_t *adp_cpu_current[],
+                 *adp_root;
+
+#define preempt_disable() \
+do { \
+       if (adp_current == adp_root) { \
+           inc_preempt_count();       \
+           barrier(); \
+        } \
+} while (0)
+
+#define preempt_enable_no_resched() \
+do { \
+        if (adp_current == adp_root) { \
+           barrier(); \
+           dec_preempt_count(); \
+        } \
+} while (0)
+
+#define preempt_check_resched() \
+do { \
+        if (adp_current == adp_root) { \
+           if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+               preempt_schedule(); \
+        } \
+} while (0)
+
+#define preempt_enable() \
+do { \
+       if (adp_current == adp_root) { \
+           preempt_enable_no_resched(); \
+           preempt_check_resched(); \
+        } \
+} while (0)
+
+#else /* !CONFIG_ADEOS_CORE */
+
 #define preempt_disable() \
 do { \
        inc_preempt_count(); \
@@ -49,6 +90,8 @@
        preempt_check_resched(); \
 } while (0)
 
+#endif /* CONFIG_ADEOS_CORE */
+
 #else
 
 #define preempt_disable()              do { } while (0)
diff -Nru linux-2.6.10/include/linux/sched.h 
linux-2.6.10-adeos-ppc64-r3/include/linux/sched.h
--- linux-2.6.10/include/linux/sched.h  2004-12-24 23:33:59.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/include/linux/sched.h   2005-11-13 
11:45:31.000000000 +0200
@@ -4,6 +4,9 @@
 #include <asm/param.h> /* for HZ */
 
 #include <linux/config.h>
+#ifdef CONFIG_ADEOS_CORE
+#include <linux/adeos.h>
+#endif /* CONFIG_ADEOS_CORE */
 #include <linux/capability.h>
 #include <linux/threads.h>
 #include <linux/kernel.h>
@@ -664,6 +667,10 @@
        struct mempolicy *mempolicy;
        short il_next;          /* could be shared with used_math */
 #endif
+
+#ifdef CONFIG_ADEOS_CORE
+        void *ptd[ADEOS_ROOT_NPTDKEYS];
+#endif /* CONFIG_ADEOS_CORE */
 };
 
 static inline pid_t process_group(struct task_struct *tsk)
diff -Nru linux-2.6.10/init/main.c linux-2.6.10-adeos-ppc64-r3/init/main.c
--- linux-2.6.10/init/main.c    2004-12-24 23:34:01.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/init/main.c     2005-11-13 11:45:31.000000000 
+0200
@@ -526,6 +526,11 @@
        init_timers();
        softirq_init();
        time_init();
+#ifdef CONFIG_ADEOS_CORE
+       /* On PPC, we need calibrated values for the decrementer to
+          initialize, so run time_init() first. */
+       __adeos_init();
+#endif /* CONFIG_ADEOS_CORE */
 
        /*
         * HACK ALERT! This is early. We're enabling the console before
@@ -652,6 +657,11 @@
        sock_init();
 
        do_initcalls();
+
+#ifdef CONFIG_ADEOS
+       /* i.e. Permanent pipelining from boot onwards. */
+       __adeos_takeover();
+#endif /* CONFIG_ADEOS */
 }
 
 static void do_pre_smp_initcalls(void)
diff -Nru linux-2.6.10/kernel/adeos.c linux-2.6.10-adeos-ppc64-r3/kernel/adeos.c
--- linux-2.6.10/kernel/adeos.c 1970-01-01 02:00:00.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/kernel/adeos.c  2005-11-13 11:45:31.000000000 
+0200
@@ -0,0 +1,826 @@
+/*
+ *   linux/kernel/adeos.c
+ *
+ *   Copyright (C) 2002,2003,2004 Philippe Gerum.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ *   Architecture-independent ADEOS core support.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#endif /* CONFIG_PROC_FS */
+
+/* The pre-defined domain slot for the root domain. */
+static adomain_t adeos_root_domain;
+
+/* A constant pointer to the root domain. */
+adomain_t *adp_root = &adeos_root_domain;
+
+/* A pointer to the current domain. */
+adomain_t *adp_cpu_current[ADEOS_NR_CPUS] = { [ 0 ... ADEOS_NR_CPUS - 1] = 
&adeos_root_domain };
+
+/* The spinlock protecting from races while modifying the pipeline. */
+raw_spinlock_t __adeos_pipelock = RAW_SPIN_LOCK_UNLOCKED;
+
+/* The pipeline data structure. Enqueues adomain_t objects by priority. */
+struct list_head __adeos_pipeline;
+
+/* A global flag telling whether Adeos pipelining is engaged. */
+int adp_pipelined;
+
+/* An array of global counters tracking domains monitoring events. */
+int __adeos_event_monitors[ADEOS_NR_EVENTS] = { [ 0 ... ADEOS_NR_EVENTS - 1] = 
0 };
+
+/* The allocated VIRQ map. */
+unsigned long __adeos_virtual_irq_map = 0;
+
+/* A VIRQ to kick printk() output out when the root domain is in control. */
+unsigned __adeos_printk_virq;
+
+#ifdef CONFIG_ADEOS_PROFILING
+adprofdata_t __adeos_profile_data[ADEOS_NR_CPUS];
+#endif /* CONFIG_ADEOS_PROFILING */
+
+static void __adeos_set_root_ptd (int key, void *value) {
+
+    current->ptd[key] = value;
+}
+
+static void *__adeos_get_root_ptd (int key) {
+
+    return current->ptd[key];
+}
+
+/* adeos_init() -- Initialization routine of the ADEOS layer. Called
+   by the host kernel early during the boot procedure. */
+
+void __adeos_init (void)
+
+{
+    adomain_t *adp = &adeos_root_domain;
+
+    __adeos_check_platform();  /* Do platform dependent checks first. */
+
+    /*
+      A lightweight registration code for the root domain. Current
+      assumptions are:
+      - We are running on the boot CPU, and secondary CPUs are still
+      lost in space.
+      - adeos_root_domain has been zero'ed.
+    */
+
+    INIT_LIST_HEAD(&__adeos_pipeline);
+
+    adp->name = "Linux";
+    adp->domid = ADEOS_ROOT_ID;
+    adp->priority = ADEOS_ROOT_PRI;
+    adp->ptd_setfun = &__adeos_set_root_ptd;
+    adp->ptd_getfun = &__adeos_get_root_ptd;
+    adp->ptd_keymax = ADEOS_ROOT_NPTDKEYS;
+
+    __adeos_init_stage(adp);
+
+    INIT_LIST_HEAD(&adp->p_link);
+    list_add_tail(&adp->p_link,&__adeos_pipeline);
+
+    __adeos_init_platform();
+
+    __adeos_printk_virq = adeos_alloc_irq(); /* Cannot fail here. */
+    adp->irqs[__adeos_printk_virq].handler = &__adeos_flush_printk; 
+    adp->irqs[__adeos_printk_virq].acknowledge = NULL; 
+    adp->irqs[__adeos_printk_virq].control = IPIPE_HANDLE_MASK; 
+
+    printk(KERN_INFO "Adeos %s: Root domain %s registered.\n",
+          ADEOS_VERSION_STRING,
+          adp->name);
+}
+
+/* adeos_handle_event() -- Adeos' generic event handler. This routine
+   calls the per-domain handlers registered for a given
+   exception/event. Each domain before the one which raised the event
+   in the pipeline will get a chance to process the event. The latter
+   will eventually be allowed to process its own event too if a valid
+   handler exists for it.  Handler executions are always scheduled by
+   the domain which raised the event for the higher priority domains
+   wanting to be notified of such event.  Note: evdata might be
+   NULL. */
+
+#ifdef CONFIG_ADEOS_THREADS
+
+asmlinkage int __adeos_handle_event (unsigned event, void *evdata)
+/* asmlinkage is there just in case CONFIG_REGPARM is enabled... */
+{
+    struct list_head *pos, *npos;
+    adomain_t *this_domain;
+    unsigned long flags;
+    adeos_declare_cpuid;
+    adevinfo_t evinfo;
+    int propagate = 1;
+
+    adeos_lock_cpu(flags);
+
+    this_domain = adp_cpu_current[cpuid];
+
+    list_for_each_safe(pos,npos,&__adeos_pipeline) {
+
+       adomain_t *next_domain = list_entry(pos,adomain_t,p_link);
+
+       if (next_domain->events[event].handler != NULL)
+           {
+           if (next_domain == this_domain)
+               {
+               adeos_unlock_cpu(flags);
+               evinfo.domid = this_domain->domid;
+               evinfo.event = event;
+               evinfo.evdata = evdata;
+               evinfo.propagate = 0;
+               this_domain->events[event].handler(&evinfo);
+               propagate = evinfo.propagate;
+               goto done;
+               }
+
+           next_domain->cpudata[cpuid].event_info.domid = this_domain->domid;
+           next_domain->cpudata[cpuid].event_info.event = event;
+           next_domain->cpudata[cpuid].event_info.evdata = evdata;
+           next_domain->cpudata[cpuid].event_info.propagate = 0;
+           __set_bit(IPIPE_XPEND_FLAG,&next_domain->cpudata[cpuid].status);
+
+           /* Let the higher priority domain process the event. */
+           __adeos_switch_to(this_domain,next_domain,cpuid);
+           
+           adeos_load_cpuid(); /* Processor might have changed. */
+
+           if (!next_domain->cpudata[cpuid].event_info.propagate)
+               {
+               propagate = 0;
+               break;
+               }
+           }
+
+       if (next_domain->cpudata[cpuid].irq_pending_hi != 0 &&
+           !test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
+           {
+           if (next_domain != this_domain)
+               __adeos_switch_to(this_domain,next_domain,cpuid);
+           else
+               __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+
+           adeos_load_cpuid(); /* Processor might have changed. */
+           }
+
+       if (next_domain == this_domain)
+           break;
+    }
+
+    adeos_unlock_cpu(flags);
+
+ done:
+
+    return !propagate;
+}
+
+#else /* !CONFIG_ADEOS_THREADS */
+
+asmlinkage int __adeos_handle_event (unsigned event, void *evdata)
+/* asmlinkage is there just in case CONFIG_REGPARM is enabled... */
+{
+    adomain_t *start_domain, *this_domain, *next_domain;
+    struct list_head *pos, *npos;
+    unsigned long flags;
+    adeos_declare_cpuid;
+    adevinfo_t evinfo;
+    int propagate = 1;
+
+    adeos_lock_cpu(flags);
+
+    start_domain = this_domain = adp_cpu_current[cpuid];
+
+    list_for_each_safe(pos,npos,&__adeos_pipeline) {
+
+       next_domain = list_entry(pos,adomain_t,p_link);
+
+       /*  Note: Domain migration may occur while running event or
+           interrupt handlers, in which case the current register set
+           is going to be recycled for a different domain than the
+           initiating one. We do care for that, always tracking the
+           current domain descriptor upon return from those
+           handlers. */
+
+       if (next_domain->events[event].handler != NULL)
+           {
+           adp_cpu_current[cpuid] = next_domain;
+           evinfo.domid = start_domain->domid;
+           adeos_unlock_cpu(flags);
+           evinfo.event = event;
+           evinfo.evdata = evdata;
+           evinfo.propagate = 0;
+           next_domain->events[event].handler(&evinfo);
+           adeos_lock_cpu(flags);
+
+           if (adp_cpu_current[cpuid] != next_domain)
+               this_domain = adp_cpu_current[cpuid];
+
+           propagate = evinfo.propagate;
+           }
+
+       if (next_domain->cpudata[cpuid].irq_pending_hi != 0 &&
+           !test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
+           {
+           adp_cpu_current[cpuid] = next_domain;
+           __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+           adeos_load_cpuid();
+
+           if (adp_cpu_current[cpuid] != next_domain)
+               this_domain = adp_cpu_current[cpuid];
+           }
+
+       adp_cpu_current[cpuid] = this_domain;
+
+       if (next_domain == this_domain || !propagate)
+           break;
+    }
+
+    adeos_unlock_cpu(flags);
+
+    return !propagate;
+}
+
+#endif /* CONFIG_ADEOS_THREADS */
+
+void __adeos_stall_root (void)
+
+{
+    if (adp_pipelined)
+       {
+       adeos_declare_cpuid;
+
+#ifdef CONFIG_SMP
+       unsigned long flags;
+       adeos_lock_cpu(flags);
+       __set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
+       adeos_unlock_cpu(flags);
+#else /* !CONFIG_SMP */
+       set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
+#endif /* CONFIG_SMP */
+       }
+    else
+       adeos_hw_cli();
+}
+
+void __adeos_unstall_root (void)
+
+{
+    if (adp_pipelined)
+       {
+       adeos_declare_cpuid;
+
+       adeos_hw_cli();
+
+       adeos_load_cpuid();
+
+       __clear_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
+
+       if (adp_root->cpudata[cpuid].irq_pending_hi != 0)
+           __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+       }
+
+    adeos_hw_sti();    /* Needed in both cases. */
+}
+
+unsigned long __adeos_test_root (void)
+
+{
+    if (adp_pipelined)
+       {
+       adeos_declare_cpuid;
+       unsigned long s;
+
+#ifdef CONFIG_SMP
+       unsigned long flags;
+       adeos_lock_cpu(flags);
+       s = test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
+       adeos_unlock_cpu(flags);
+#else /* !CONFIG_SMP */
+       s = test_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
+#endif /* CONFIG_SMP */
+
+       return s;
+       }
+
+    return adeos_hw_irqs_disabled();
+}
+
+unsigned long __adeos_test_and_stall_root (void)
+
+{
+    unsigned long flags;
+
+    if (adp_pipelined)
+       {
+       adeos_declare_cpuid;
+       unsigned long s;
+
+#ifdef CONFIG_SMP
+       adeos_lock_cpu(flags);
+       s = 
__test_and_set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
+       adeos_unlock_cpu(flags);
+#else /* !CONFIG_SMP */
+       s = test_and_set_bit(IPIPE_STALL_FLAG,&adp_root->cpudata[cpuid].status);
+#endif /* CONFIG_SMP */
+
+       return s;
+       }
+
+    adeos_hw_local_irq_save(flags);
+
+    return !adeos_hw_test_iflag(flags);
+}
+
+void fastcall __adeos_restore_root (unsigned long flags)
+
+{
+    if (flags)
+       __adeos_stall_root();
+    else
+       __adeos_unstall_root();
+}
+
+/* adeos_unstall_pipeline_from() -- Unstall the interrupt pipeline and
+   synchronize pending events from a given domain. */
+
+void fastcall adeos_unstall_pipeline_from (adomain_t *adp)
+
+{
+    adomain_t *this_domain;
+    struct list_head *pos;
+    unsigned long flags;
+    adeos_declare_cpuid;
+
+    adeos_lock_cpu(flags);
+
+    __clear_bit(IPIPE_STALL_FLAG,&adp->cpudata[cpuid].status);
+
+    this_domain = adp_cpu_current[cpuid];
+
+    if (adp == this_domain)
+       {
+       if (adp->cpudata[cpuid].irq_pending_hi != 0)
+           __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+
+       goto release_cpu_and_exit;
+       }
+
+    /* Attempt to flush all events that might be pending at the
+       unstalled domain level. This code is roughly lifted from
+       __adeos_walk_pipeline(). */
+
+    list_for_each(pos,&__adeos_pipeline) {
+
+       adomain_t *next_domain = list_entry(pos,adomain_t,p_link);
+
+       if (test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
+           break; /* Stalled stage -- do not go further. */
+
+       if (next_domain->cpudata[cpuid].irq_pending_hi != 0)
+           {
+           /* Since the critical IPI might be triggered by the
+              following actions, the current domain might not be
+              linked to the pipeline anymore after its handler
+              returns on SMP boxen, even if the domain remains valid
+              (see adeos_unregister_domain()), so don't make any
+              hazardous assumptions here. */
+
+           if (next_domain == this_domain)
+               __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+           else
+               {
+               __adeos_switch_to(this_domain,next_domain,cpuid);
+
+               adeos_load_cpuid(); /* Processor might have changed. */
+
+               if (this_domain->cpudata[cpuid].irq_pending_hi != 0 &&
+                   
!test_bit(IPIPE_STALL_FLAG,&this_domain->cpudata[cpuid].status))
+                   __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+               }
+           
+           break;
+           }
+       else if (next_domain == this_domain)
+           break;
+    }
+
+release_cpu_and_exit:
+
+    if (__adeos_pipeline_head_p(adp))
+       adeos_hw_sti();
+    else
+       adeos_unlock_cpu(flags);
+}
+
+/* adeos_suspend_domain() -- tell the ADEOS layer that the current
+   domain is now dormant. The calling domain is switched out, while
+   the next domain with work in progress or pending in the pipeline is
+   switched in. */
+
+#ifdef CONFIG_ADEOS_THREADS
+
+#define __flush_pipeline_stage() \
+do { \
+    if (!test_bit(IPIPE_STALL_FLAG,&cpudata->status) && \
+       cpudata->irq_pending_hi != 0) \
+       { \
+       __adeos_sync_stage(IPIPE_IRQMASK_ANY); \
+       adeos_load_cpuid(); \
+       cpudata = &this_domain->cpudata[cpuid]; \
+       } \
+} while(0)
+
+void adeos_suspend_domain (void)
+
+{
+    adomain_t *this_domain, *next_domain;
+    struct adcpudata *cpudata;
+    struct list_head *ln;
+    unsigned long flags;
+    adeos_declare_cpuid;
+
+    adeos_lock_cpu(flags);
+
+    this_domain = next_domain = adp_cpu_current[cpuid];
+    cpudata = &this_domain->cpudata[cpuid];
+
+    /* A suspending domain implicitely unstalls the pipeline. */
+    __clear_bit(IPIPE_STALL_FLAG,&cpudata->status);
+
+    /* Make sure that no event remains stuck in the pipeline. This
+       could happen with emerging SMP instances, or domains which
+       forget to unstall their stage before calling us. */
+    __flush_pipeline_stage();
+
+    for (;;)
+       {
+       ln = next_domain->p_link.next;
+
+       if (ln == &__adeos_pipeline)    /* End of pipeline reached? */
+           /* Caller should loop on its idle task on return. */
+           goto release_cpu_and_exit;
+
+       next_domain = list_entry(ln,adomain_t,p_link);
+
+       /* Make sure the domain was preempted (i.e. not sleeping) or
+          has some event to process before switching to it. */
+
+       if (__adeos_domain_work_p(next_domain,cpuid))
+           break;
+       }
+
+    /* Mark the outgoing domain as aslept (i.e. not preempted). */
+    __set_bit(IPIPE_SLEEP_FLAG,&cpudata->status);
+
+    /* Suspend the calling domain, switching to the next one. */
+    __adeos_switch_to(this_domain,next_domain,cpuid);
+
+#ifdef CONFIG_SMP
+    adeos_load_cpuid();        /* Processor might have changed. */
+    cpudata = &this_domain->cpudata[cpuid];
+#endif /* CONFIG_SMP */
+
+    /* Clear the sleep bit for the incoming domain. */
+    __clear_bit(IPIPE_SLEEP_FLAG,&cpudata->status);
+
+    /* Now, we are back into the calling domain. Flush the interrupt
+       log and fire the event interposition handler if needed.  CPU
+       migration is allowed in SMP-mode on behalf of an event handler
+       provided that the current domain raised it. Otherwise, it's
+       not. */
+
+    __flush_pipeline_stage();
+
+    if (__test_and_clear_bit(IPIPE_XPEND_FLAG,&cpudata->status))
+       {
+       adeos_unlock_cpu(flags);
+       
this_domain->events[cpudata->event_info.event].handler(&cpudata->event_info);
+       return;
+       }
+
+release_cpu_and_exit:
+
+    adeos_unlock_cpu(flags);
+
+    /* Return to the point of suspension in the calling domain. */
+}
+
+#else /* !CONFIG_ADEOS_THREADS */
+
+void adeos_suspend_domain (void)
+
+{
+    adomain_t *this_domain, *next_domain;
+    struct list_head *ln;
+    unsigned long flags;
+    adeos_declare_cpuid;
+
+    adeos_lock_cpu(flags);
+
+    this_domain = next_domain = adp_cpu_current[cpuid];
+
+    __clear_bit(IPIPE_STALL_FLAG,&this_domain->cpudata[cpuid].status);
+
+    if (this_domain->cpudata[cpuid].irq_pending_hi != 0)
+       goto sync_stage;
+
+    for (;;)
+       {
+       ln = next_domain->p_link.next;
+
+       if (ln == &__adeos_pipeline)
+           break;
+
+       next_domain = list_entry(ln,adomain_t,p_link);
+
+       if (test_bit(IPIPE_STALL_FLAG,&next_domain->cpudata[cpuid].status))
+           break;
+
+       if (next_domain->cpudata[cpuid].irq_pending_hi == 0)
+           continue;
+
+       adp_cpu_current[cpuid] = next_domain;
+
+       if (next_domain->dswitch)
+           next_domain->dswitch();
+
+ sync_stage:
+
+       __adeos_sync_stage(IPIPE_IRQMASK_ANY);
+
+       adeos_load_cpuid();     /* Processor might have changed. */
+
+       if (adp_cpu_current[cpuid] != next_domain)
+           /* Something has changed the current domain under our feet
+              recycling the register set; take note. */
+           this_domain = adp_cpu_current[cpuid];
+       }
+
+    adp_cpu_current[cpuid] = this_domain;
+
+    adeos_unlock_cpu(flags);
+}
+
+#endif /* CONFIG_ADEOS_THREADS */
+
+/* adeos_alloc_irq() -- Allocate a virtual/soft pipelined interrupt.
+   Virtual interrupts are handled in exactly the same way than their
+   hw-generated counterparts. This is a very basic, one-way only,
+   inter-domain communication system (see adeos_trigger_irq()).  Note:
+   it is not necessary for a domain to allocate a virtual interrupt to
+   trap it using adeos_virtualize_irq(). The newly allocated VIRQ
+   number which can be passed to other IRQ-related services is
+   returned on success, zero otherwise (i.e. no more virtual interrupt
+   channel is available). We need this service as part of the Adeos
+   bootstrap code, hence it must reside in a built-in area. */
+
+unsigned adeos_alloc_irq (void)
+
+{
+    unsigned long flags, irq = 0;
+    int ipos;
+
+    spin_lock_irqsave_hw(&__adeos_pipelock,flags);
+
+    if (__adeos_virtual_irq_map != ~0)
+       {
+       ipos = ffz(__adeos_virtual_irq_map);
+       set_bit(ipos,&__adeos_virtual_irq_map);
+       irq = ipos + IPIPE_VIRQ_BASE;
+       }
+
+    spin_unlock_irqrestore_hw(&__adeos_pipelock,flags);
+
+    return irq;
+}
+
+#ifdef CONFIG_PROC_FS
+
+#include <linux/proc_fs.h>
+
+static struct proc_dir_entry *adeos_proc_entry;
+
+static int __adeos_read_proc (char *page,
+                             char **start,
+                             off_t off,
+                             int count,
+                             int *eof,
+                             void *data)
+{
+    unsigned long ctlbits;
+    struct list_head *pos;
+    unsigned irq, _irq;
+    char *p = page;
+    int len;
+
+#ifdef CONFIG_ADEOS_MODULE
+    p += sprintf(p,"Adeos %s -- Pipelining: 
%s",ADEOS_VERSION_STRING,adp_pipelined ? "active" : "stopped");
+#else /* !CONFIG_ADEOS_MODULE */
+    p += sprintf(p,"Adeos %s -- Pipelining: permanent",ADEOS_VERSION_STRING);
+#endif /* CONFIG_ADEOS_MODULE */
+#ifdef CONFIG_ADEOS_THREADS
+    p += sprintf(p, " (threaded)\n\n");
+#else                          /* CONFIG_ADEOS_THREADS */
+    p += sprintf(p, "\n\n");
+#endif                         /* CONFIG_ADEOS_THREADS */
+
+    spin_lock(&__adeos_pipelock);
+
+    list_for_each(pos,&__adeos_pipeline) {
+
+       adomain_t *adp = list_entry(pos,adomain_t,p_link);
+
+       p += sprintf(p,"%8s: priority=%d, id=0x%.8x, ptdkeys=%d/%d\n",
+                    adp->name,
+                    adp->priority,
+                    adp->domid,
+                    adp->ptd_keycount,
+                    adp->ptd_keymax);
+       irq = 0;
+
+       while (irq < IPIPE_NR_IRQS)
+           {
+           ctlbits = (adp->irqs[irq].control & 
(IPIPE_HANDLE_MASK|IPIPE_PASS_MASK|IPIPE_STICKY_MASK));
+
+           if (irq >= IPIPE_NR_XIRQS && !adeos_virtual_irq_p(irq))
+               {
+               /* There might be a hole between the last external IRQ
+                  and the first virtual one; skip it. */
+               irq++;
+               continue;
+               }
+
+           if (adeos_virtual_irq_p(irq) && !test_bit(irq - 
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map))
+               {
+               /* Non-allocated virtual IRQ; skip it. */
+               irq++;
+               continue;
+               }
+
+           /* Attempt to group consecutive IRQ numbers having the
+              same virtualization settings in a single line. */
+
+           _irq = irq;
+
+           while (++_irq < IPIPE_NR_IRQS)
+               {
+               if (adeos_virtual_irq_p(_irq) != adeos_virtual_irq_p(irq) ||
+                   (adeos_virtual_irq_p(_irq) &&
+                    !test_bit(_irq - 
IPIPE_VIRQ_BASE,&__adeos_virtual_irq_map)) ||
+                   ctlbits != (adp->irqs[_irq].control & 
(IPIPE_HANDLE_MASK|IPIPE_PASS_MASK|IPIPE_STICKY_MASK)))
+                   break;
+               }
+
+           if (_irq == irq + 1)
+               p += sprintf(p,"\tirq%u: ",irq);
+           else
+               p += sprintf(p,"\tirq%u-%u: ",irq,_irq - 1);
+
+           /* Statuses are as follows:
+              o "accepted" means handled _and_ passed down the
+              pipeline.
+              o "grabbed" means handled, but the interrupt might be
+              terminated _or_ passed down the pipeline depending on
+              what the domain handler asks for to Adeos.
+              o "passed" means unhandled by the domain but passed
+              down the pipeline.
+              o "discarded" means unhandled and _not_ passed down the
+              pipeline. The interrupt merely disappears from the
+              current domain down to the end of the pipeline. */
+
+           if (ctlbits & IPIPE_HANDLE_MASK)
+               {
+               if (ctlbits & IPIPE_PASS_MASK)
+                   p += sprintf(p,"accepted");
+               else
+                   p += sprintf(p,"grabbed");
+               }
+           else if (ctlbits & IPIPE_PASS_MASK)
+               p += sprintf(p,"passed");
+           else
+               p += sprintf(p,"discarded");
+
+           if (ctlbits & IPIPE_STICKY_MASK)
+               p += sprintf(p,", sticky");
+
+           if (adeos_virtual_irq_p(irq))
+               p += sprintf(p,", virtual");
+
+           p += sprintf(p,"\n");
+
+           irq = _irq;
+           }
+    }
+
+    spin_unlock(&__adeos_pipelock);
+
+    len = p - page;
+
+    if (len <= off + count)
+       *eof = 1;
+
+    *start = page + off;
+
+    len -= off;
+
+    if (len > count)
+       len = count;
+
+    if (len < 0)
+       len = 0;
+
+    return len;
+}
+
+void __adeos_init_proc (void) {
+
+    adeos_proc_entry = create_proc_read_entry("adeos",
+                                             0444,
+                                             NULL,
+                                             &__adeos_read_proc,
+                                             NULL);
+}
+
+#endif /* CONFIG_PROC_FS */
+
+void __adeos_dump_state (void)
+
+{
+    int _cpuid, nr_cpus = num_online_cpus();
+    struct list_head *pos;
+    unsigned long flags;
+    adeos_declare_cpuid;
+
+    adeos_lock_cpu(flags);
+
+    printk(KERN_WARNING "Adeos: Current domain=%s on CPU #%d [stackbase=%p]\n",
+          adp_current->name,
+          cpuid,
+#ifdef CONFIG_ADEOS_THREADS
+          (void *)adp_current->estackbase[cpuid]
+#else /* !CONFIG_ADEOS_THREADS */
+          current
+#endif /* CONFIG_ADEOS_THREADS */
+          );
+
+    list_for_each(pos,&__adeos_pipeline) {
+
+        adomain_t *adp = list_entry(pos,adomain_t,p_link);
+
+        for (_cpuid = 0; _cpuid < nr_cpus; _cpuid++)
+            printk(KERN_WARNING "%8s[cpuid=%d]: priority=%d, status=0x%lx, 
pending_hi=0x%lx\n",
+                   adp->name,
+                   _cpuid,
+                   adp->priority,
+                   adp->cpudata[_cpuid].status,
+                   adp->cpudata[_cpuid].irq_pending_hi);
+    }
+
+    adeos_unlock_cpu(flags);
+}
+
+EXPORT_SYMBOL(adeos_suspend_domain);
+EXPORT_SYMBOL(adeos_alloc_irq);
+EXPORT_SYMBOL(adp_cpu_current);
+EXPORT_SYMBOL(adp_root);
+EXPORT_SYMBOL(adp_pipelined);
+EXPORT_SYMBOL(__adeos_handle_event);
+EXPORT_SYMBOL(__adeos_unstall_root);
+EXPORT_SYMBOL(__adeos_stall_root);
+EXPORT_SYMBOL(__adeos_restore_root);
+EXPORT_SYMBOL(__adeos_test_and_stall_root);
+EXPORT_SYMBOL(__adeos_test_root);
+EXPORT_SYMBOL(__adeos_dump_state);
+EXPORT_SYMBOL(__adeos_pipeline);
+EXPORT_SYMBOL(__adeos_pipelock);
+EXPORT_SYMBOL(__adeos_virtual_irq_map);
+EXPORT_SYMBOL(__adeos_event_monitors);
+EXPORT_SYMBOL(adeos_unstall_pipeline_from);
+#ifdef CONFIG_ADEOS_PROFILING
+EXPORT_SYMBOL(__adeos_profile_data);
+#endif /* CONFIG_ADEOS_PROFILING */
+/* The following are convenience exports which are needed by some
+   Adeos domains loaded as kernel modules. */
+EXPORT_SYMBOL(do_exit);
diff -Nru linux-2.6.10/kernel/exit.c linux-2.6.10-adeos-ppc64-r3/kernel/exit.c
--- linux-2.6.10/kernel/exit.c  2004-12-24 23:35:27.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/kernel/exit.c   2005-11-13 11:45:31.000000000 
+0200
@@ -809,6 +809,9 @@
        group_dead = atomic_dec_and_test(&tsk->signal->live);
        if (group_dead)
                acct_process(code);
+#ifdef CONFIG_ADEOS_CORE
+       __adeos_exit_process(tsk);
+#endif /* CONFIG_ADEOS_CORE */
        __exit_mm(tsk);
 
        exit_sem(tsk);
diff -Nru linux-2.6.10/kernel/fork.c linux-2.6.10-adeos-ppc64-r3/kernel/fork.c
--- linux-2.6.10/kernel/fork.c  2004-12-24 23:33:59.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/kernel/fork.c   2005-11-13 11:45:31.000000000 
+0200
@@ -1021,6 +1021,14 @@
 
        nr_threads++;
        write_unlock_irq(&tasklist_lock);
+#ifdef CONFIG_ADEOS_CORE
+       {
+       int k;
+
+       for (k = 0; k < ADEOS_ROOT_NPTDKEYS; k++)
+           p->ptd[k] = NULL;
+       }
+#endif /* CONFIG_ADEOS_CORE */
        retval = 0;
 
 fork_out:
diff -Nru linux-2.6.10/kernel/Makefile 
linux-2.6.10-adeos-ppc64-r3/kernel/Makefile
--- linux-2.6.10/kernel/Makefile        2004-12-24 23:34:26.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/kernel/Makefile 2005-11-13 11:45:31.000000000 
+0200
@@ -9,6 +9,7 @@
            rcupdate.o intermodule.o extable.o params.o posix-timers.o \
            kthread.o wait.o kfifo.o sys_ni.o
 
+obj-$(CONFIG_ADEOS_CORE) += adeos.o
 obj-$(CONFIG_FUTEX) += futex.o
 obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
 obj-$(CONFIG_SMP) += cpu.o spinlock.o
diff -Nru linux-2.6.10/kernel/panic.c linux-2.6.10-adeos-ppc64-r3/kernel/panic.c
--- linux-2.6.10/kernel/panic.c 2004-12-24 23:35:29.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/kernel/panic.c  2005-11-13 11:45:31.000000000 
+0200
@@ -70,6 +70,9 @@
        va_end(args);
        printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
        bust_spinlocks(0);
+#ifdef CONFIG_ADEOS_CORE
+       __adeos_dump_state();
+#endif /* CONFIG_ADEOS_CORE */
 
 #ifdef CONFIG_SMP
        smp_send_stop();
diff -Nru linux-2.6.10/kernel/printk.c 
linux-2.6.10-adeos-ppc64-r3/kernel/printk.c
--- linux-2.6.10/kernel/printk.c        2004-12-24 23:35:40.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/kernel/printk.c 2005-11-13 11:45:31.000000000 
+0200
@@ -509,6 +509,66 @@
  * then changes console_loglevel may break. This is because console_loglevel
  * is inspected when the actual printing occurs.
  */
+#ifdef CONFIG_ADEOS_CORE
+
+static raw_spinlock_t __adeos_printk_lock = RAW_SPIN_LOCK_UNLOCKED;
+
+static int __adeos_printk_fill;
+
+static char __adeos_printk_buf[__LOG_BUF_LEN];
+
+void __adeos_flush_printk (unsigned virq)
+{
+       char *p = __adeos_printk_buf;
+       int out = 0, len;
+
+       clear_bit(ADEOS_PPRINTK_FLAG,&adp_root->flags);
+
+       while (out < __adeos_printk_fill) {
+               len = strlen(p) + 1;
+               printk("%s",p);
+               p += len;
+               out += len;
+       }
+       __adeos_printk_fill = 0;
+}
+
+asmlinkage int printk(const char *fmt, ...)
+{
+       unsigned long flags;
+       int r, fbytes;
+       va_list args;
+
+       va_start(args, fmt);
+
+       if (adp_current == adp_root ||
+           test_bit(ADEOS_SPRINTK_FLAG,&adp_current->flags) ||
+           oops_in_progress) {
+               r = vprintk(fmt, args);
+               goto out;
+       }
+
+       adeos_spin_lock_irqsave(&__adeos_printk_lock,flags);
+
+       fbytes = __LOG_BUF_LEN - __adeos_printk_fill;
+
+       if (fbytes > 1) {
+               r = vscnprintf(__adeos_printk_buf + __adeos_printk_fill,
+                              fbytes, fmt, args) + 1; /* account for the null 
byte */
+               __adeos_printk_fill += r;
+       } else
+               r = 0;
+       
+       adeos_spin_unlock_irqrestore(&__adeos_printk_lock,flags);
+
+       if (!test_and_set_bit(ADEOS_PPRINTK_FLAG,&adp_root->flags))
+               adeos_trigger_irq(__adeos_printk_virq);
+out: 
+       va_end(args);
+
+       return r;
+}
+#else /* !CONFIG_ADEOS_CORE */
 asmlinkage int printk(const char *fmt, ...)
 {
        va_list args;
@@ -520,6 +580,7 @@
 
        return r;
 }
+#endif /* CONFIG_ADEOS_CORE */
 
 asmlinkage int vprintk(const char *fmt, va_list args)
 {
diff -Nru linux-2.6.10/kernel/sched.c linux-2.6.10-adeos-ppc64-r3/kernel/sched.c
--- linux-2.6.10/kernel/sched.c 2004-12-24 23:35:24.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/kernel/sched.c  2005-11-13 11:45:32.000000000 
+0200
@@ -302,7 +302,16 @@
  * Default context-switch locking:
  */
 #ifndef prepare_arch_switch
+#ifdef CONFIG_ADEOS_CORE
+#define prepare_arch_switch(rq,prev,next) \
+do { \
+    struct { struct task_struct *prev, *next; } arg = { (prev), (next) }; \
+    __adeos_schedule_head(&arg); \
+    adeos_hw_cli(); \
+} while(0)
+#else /* !CONFIG_ADEOS_CORE */
 # define prepare_arch_switch(rq, next) do { } while (0)
+#endif /* CONFIG_ADEOS_CORE */
 # define finish_arch_switch(rq, next)  spin_unlock_irq(&(rq)->lock)
 # define task_running(rq, p)           ((rq)->curr == (p))
 #endif
@@ -1367,6 +1376,9 @@
 
        if (current->set_child_tid)
                put_user(current->pid, current->set_child_tid);
+#ifdef CONFIG_ADEOS_CORE
+       __adeos_enter_process();
+#endif /* CONFIG_ADEOS_CORE */
 }
 
 /*
@@ -2535,6 +2547,11 @@
        unsigned long run_time;
        int cpu, idx;
 
+#ifdef CONFIG_ADEOS_CORE
+       if (adp_current != adp_root) /* Let's be helpful and conservative. */
+           return;
+#endif /* CONFIG_ADEOS_CORE */
+
        /*
         * Test if we are atomic.  Since do_exit() needs to call into
         * schedule() atomically, we ignore that path for now.
@@ -2684,9 +2701,28 @@
                rq->curr = next;
                ++*switch_count;
 
-               prepare_arch_switch(rq, next);
+#ifdef CONFIG_ADEOS_CORE
+               prepare_arch_switch(rq, prev, next);
+#else /* !CONFIG_ADEOS_CORE */
+               prepare_arch_switch(rq, next);
+#endif /* CONFIG_ADEOS_CORE */
                prev = context_switch(rq, prev, next);
                barrier();
+#ifdef CONFIG_ADEOS_CORE
+               if (adp_pipelined)
+                   {
+                   
__clear_bit(IPIPE_SYNC_FLAG,&adp_root->cpudata[task_cpu(current)].status);
+                   adeos_hw_sti();
+                   }
+
+               if (__adeos_schedule_tail(prev) > 0 || adp_current != adp_root)
+                   /* Someone has just recycled the register set of
+                      prev for running over a non-root domain, or
+                      some event handler in the pipeline asked for a
+                      truncated scheduling tail. Don't perform the
+                      Linux housekeeping chores, at least not now. */
+                   return;
+#endif /* CONFIG_ADEOS_CORE */
 
                finish_task_switch(prev);
        } else
@@ -3148,6 +3184,16 @@
        retval = security_task_setscheduler(p, policy, &lp);
        if (retval)
                goto out_unlock;
+#ifdef CONFIG_ADEOS_CORE
+       {
+       struct { struct task_struct *task; int policy; struct sched_param 
*param; } evdata = { p, policy, &lp };
+       if (__adeos_renice_process(&evdata))
+           {
+           retval = 0;
+           goto out_unlock;
+           }
+       }
+#endif /* CONFIG_ADEOS_CORE */
        /*
         * To be able to change p->policy safely, the apropriate
         * runqueue lock must be held.
@@ -4676,3 +4722,62 @@
 }
 
 #endif /* CONFIG_MAGIC_SYSRQ */
+
+#ifdef CONFIG_ADEOS_CORE
+
+int __adeos_setscheduler_root (struct task_struct *p, int policy, int prio)
+{
+       prio_array_t *array;
+       unsigned long flags;
+       runqueue_t *rq;
+       int oldprio;
+
+       if (prio < 1 || prio > MAX_RT_PRIO-1)
+           return -EINVAL;
+
+       read_lock_irq(&tasklist_lock);
+       rq = task_rq_lock(p, &flags);
+       array = p->array;
+       if (array)
+               deactivate_task(p, rq);
+       oldprio = p->prio;
+       __setscheduler(p, policy, prio);
+       if (array) {
+               __activate_task(p, rq);
+               if (task_running(rq, p)) {
+                       if (p->prio > oldprio)
+                               resched_task(rq->curr);
+               } else if (TASK_PREEMPTS_CURR(p, rq))
+                       resched_task(rq->curr);
+       }
+       task_rq_unlock(rq, &flags);
+       read_unlock_irq(&tasklist_lock);
+
+       return 0;
+}
+
+EXPORT_SYMBOL(__adeos_setscheduler_root);
+
+void __adeos_reenter_root (struct task_struct *prev,
+                          int policy,
+                          int prio)
+{
+       finish_task_switch(prev);
+       if (reacquire_kernel_lock(current) < 0)
+           ;
+       preempt_enable_no_resched();
+
+       if (current->policy != policy || current->rt_priority != prio)
+           __adeos_setscheduler_root(current,policy,prio);
+}
+
+EXPORT_SYMBOL(__adeos_reenter_root);
+
+void __adeos_schedule_back_root (struct task_struct *prev)
+{
+    __adeos_reenter_root(prev,current->policy,current->rt_priority);
+}
+
+EXPORT_SYMBOL(__adeos_schedule_back_root);
+
+#endif /* CONFIG_ADEOS_CORE */
diff -Nru linux-2.6.10/kernel/signal.c 
linux-2.6.10-adeos-ppc64-r3/kernel/signal.c
--- linux-2.6.10/kernel/signal.c        2004-12-24 23:34:32.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/kernel/signal.c 2005-11-13 11:45:32.000000000 
+0200
@@ -576,6 +576,13 @@
 
        set_tsk_thread_flag(t, TIF_SIGPENDING);
 
+#ifdef CONFIG_ADEOS_CORE
+       {
+       struct { struct task_struct *t; } evdata = { t };
+       __adeos_kick_process(&evdata);
+       }
+#endif /* CONFIG_ADEOS_CORE */
+
        /*
         * If resume is set, we want to wake it up in the TASK_STOPPED case.
         * We don't check for TASK_STOPPED because there is a race with it
@@ -823,6 +830,17 @@
                BUG();
 #endif
 
+#ifdef CONFIG_ADEOS_CORE
+       /* If some domain handler in the pipeline doesn't ask for
+          propagation, return success pretending that 'sig' was
+          delivered. */
+       {
+       struct { struct task_struct *task; int sig; } evdata = { t, sig };
+       if (__adeos_signal_process(&evdata))
+           goto out;
+       }
+#endif /* CONFIG_ADEOS_CORE */
+
        if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
                /*
                 * Set up a return to indicate that we dropped the signal.
diff -Nru linux-2.6.10/kernel/sysctl.c 
linux-2.6.10-adeos-ppc64-r3/kernel/sysctl.c
--- linux-2.6.10/kernel/sysctl.c        2004-12-24 23:33:59.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/kernel/sysctl.c 2005-11-13 11:45:32.000000000 
+0200
@@ -946,6 +946,9 @@
 #ifdef CONFIG_PROC_FS
        register_proc_table(root_table, proc_sys_root);
        init_irq_proc();
+#ifdef CONFIG_ADEOS_CORE
+       __adeos_init_proc();
+#endif /* CONFIG_ADEOS_CORE */
 #endif
 }
 
diff -Nru linux-2.6.10/Makefile linux-2.6.10-adeos-ppc64-r3/Makefile
--- linux-2.6.10/Makefile       2004-12-24 23:35:01.000000000 +0200
+++ linux-2.6.10-adeos-ppc64-r3/Makefile        2005-11-13 11:45:32.000000000 
+0200
@@ -558,6 +558,8 @@
 ifeq ($(KBUILD_EXTMOD),)
 core-y         += kernel/ mm/ fs/ ipc/ security/ crypto/
 
+core-$(CONFIG_ADEOS) += adeos/
+
 vmlinux-dirs   := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
                     $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
                     $(net-y) $(net-m) $(libs-y) $(libs-m)))

Reply via email to