Module: xenomai-3
Branch: stable-3.0.x
Commit: b88368afe7545b6900660785b1cb131d68e72839
URL:    
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=b88368afe7545b6900660785b1cb131d68e72839

Author: Philippe Gerum <r...@xenomai.org>
Date:   Fri Sep  9 18:59:32 2016 +0200

cobalt/powerpc: upgrade I-pipe support

---

 ...-6.patch => ipipe-core-3.18.20-powerpc-8.patch} |   94 +-
 ...c-3.patch => ipipe-core-4.1.18-powerpc-7.patch} | 1133 +++++++++++++++-----
 2 files changed, 924 insertions(+), 303 deletions(-)

diff --git 
a/kernel/cobalt/arch/powerpc/patches/ipipe-core-3.18.20-powerpc-6.patch 
b/kernel/cobalt/arch/powerpc/patches/ipipe-core-3.18.20-powerpc-8.patch
similarity index 99%
rename from 
kernel/cobalt/arch/powerpc/patches/ipipe-core-3.18.20-powerpc-6.patch
rename to kernel/cobalt/arch/powerpc/patches/ipipe-core-3.18.20-powerpc-8.patch
index a3792aa..f92259f 100644
--- a/kernel/cobalt/arch/powerpc/patches/ipipe-core-3.18.20-powerpc-6.patch
+++ b/kernel/cobalt/arch/powerpc/patches/ipipe-core-3.18.20-powerpc-8.patch
@@ -304,7 +304,7 @@ index b59ac27..9cc1d25 100644
   * or should we not care like we do now ? --BenH.
 diff --git a/arch/powerpc/include/asm/ipipe.h 
b/arch/powerpc/include/asm/ipipe.h
 new file mode 100644
-index 0000000..f79050e
+index 0000000..40aca5b
 --- /dev/null
 +++ b/arch/powerpc/include/asm/ipipe.h
 @@ -0,0 +1,157 @@
@@ -349,7 +349,7 @@ index 0000000..f79050e
 +#include <linux/cache.h>
 +#include <linux/threads.h>
 +
-+#define IPIPE_CORE_RELEASE    6
++#define IPIPE_CORE_RELEASE    8
 +
 +struct ipipe_domain;
 +
@@ -1640,7 +1640,7 @@ index 22b45a4..e973007 100644
   * PROM code for specific machines follows.  Put it
   * here so it's easy to add arch-specific sections later.
 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
-index 0905c8d..df2cc87 100644
+index 0905c8d..f9293c9 100644
 --- a/arch/powerpc/kernel/entry_64.S
 +++ b/arch/powerpc/kernel/entry_64.S
 @@ -33,6 +33,7 @@
@@ -1879,25 +1879,21 @@ index 0905c8d..df2cc87 100644
  #endif /* CONFIG_PREEMPT */
  
        .globl  fast_exc_return_irq
-@@ -751,6 +806,17 @@ restore:
+@@ -751,6 +806,13 @@ restore:
         * are about to re-enable interrupts
         */
        ld      r5,SOFTE(r1)
 +#ifdef CONFIG_IPIPE
-+      cmpwi   cr0,r5,0
-+      nor     r5,r5,r5        /* IPIPE_STALL_FLAG = !SOFTE */
 +      ld      r4,PACAROOTPCPU(r13)
-+      ld      r3,0(r4)
-+      insrdi  r3,r5,1,63
-+      std     r3,0(r4)
-+      beq     1f
++      cmpwi   cr0,r4,0
++      bne     1f
 +      TRACE_ENABLE_INTS
 +1:
 +#else /* !CONFIG_IPIPE */
        lbz     r6,PACASOFTIRQEN(r13)
        cmpwi   cr0,r5,0
        beq     restore_irq_off
-@@ -777,6 +843,7 @@ restore_no_replay:
+@@ -777,6 +839,7 @@ restore_no_replay:
        TRACE_ENABLE_INTS
        li      r0,1
        stb     r0,PACASOFTIRQEN(r13);
@@ -1905,7 +1901,7 @@ index 0905c8d..df2cc87 100644
  
        /*
         * Final return path. BookE is handled in a different file
-@@ -867,6 +934,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+@@ -867,6 +930,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  
  #endif /* CONFIG_PPC_BOOK3E */
  
@@ -1913,7 +1909,7 @@ index 0905c8d..df2cc87 100644
        /*
         * We are returning to a context with interrupts soft disabled.
         *
-@@ -948,6 +1016,8 @@ restore_check_irq_replay:
+@@ -948,6 +1012,8 @@ restore_check_irq_replay:
        b       ret_from_except
  #endif /* CONFIG_PPC_DOORBELL */
  1:    b       ret_from_except /* What else to do here ? */
@@ -1922,7 +1918,7 @@ index 0905c8d..df2cc87 100644
   
  unrecov_restore:
        addi    r3,r1,STACK_FRAME_OVERHEAD
-@@ -994,7 +1064,7 @@ _GLOBAL(enter_rtas)
+@@ -994,7 +1060,7 @@ _GLOBAL(enter_rtas)
        li      r0,0
        mtcr    r0
  
@@ -2835,7 +2831,7 @@ index 0000000..292fed5
 +#endif        /* !CONFIG_PPC64 */
 +#endif /* !CONFIG_IPIPE_LEGACY */
 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
-index c143835..6b92a8b 100644
+index c143835..82279ee 100644
 --- a/arch/powerpc/kernel/irq.c
 +++ b/arch/powerpc/kernel/irq.c
 @@ -95,6 +95,36 @@ extern int tau_interrupts(int);
@@ -2867,7 +2863,7 @@ index c143835..6b92a8b 100644
 +static inline notrace int decrementer_check_overflow(void)
 +{
 +      u64 now = get_tb_or_rtc();
-+      u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
++      u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
 + 
 +      return now >= *next_tb;
 +}
@@ -6004,6 +6000,18 @@ index abcafaa..a8440e4 100644
  } ____cacheline_aligned;
  
  /*
+diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
+index 662697b..6a9b6ad 100644
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -108,6 +108,7 @@ enum {
+       FTRACE_OPS_FL_ADDING                    = 1 << 9,
+       FTRACE_OPS_FL_REMOVING                  = 1 << 10,
+       FTRACE_OPS_FL_MODIFYING                 = 1 << 11,
++      FTRACE_OPS_FL_IPIPE_EXCLUSIVE           = 1 << 12,
+ };
+ 
+ #ifdef CONFIG_DYNAMIC_FTRACE
 diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
 index cba442e..b513a46 100644
 --- a/include/linux/hardirq.h
@@ -11949,7 +11957,7 @@ index 0000000..143f9e6
 +#endif /* CONFIG_IPIPE_HAVE_HOSTRT */
 diff --git a/kernel/ipipe/tracer.c b/kernel/ipipe/tracer.c
 new file mode 100644
-index 0000000..da272c50
+index 0000000..8388671
 --- /dev/null
 +++ b/kernel/ipipe/tracer.c
 @@ -0,0 +1,1468 @@
@@ -13287,7 +13295,7 @@ index 0000000..da272c50
 +
 +static struct ftrace_ops ipipe_trace_ops = {
 +      .func = ipipe_trace_function,
-+      .flags = FTRACE_OPS_FL_RECURSION_SAFE,
++      .flags = FTRACE_OPS_FL_IPIPE_EXCLUSIVE,
 +};
 +
 +static ssize_t __ipipe_wr_enable(struct file *file, const char __user *buffer,
@@ -14848,7 +14856,7 @@ index a5da09c..6650799 100644
        help
          This option will modify all the calls to function tracing
 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index d1eff3d..2a324bc 100644
+index d1eff3d..f8b9472 100644
 --- a/kernel/trace/ftrace.c
 +++ b/kernel/trace/ftrace.c
 @@ -32,6 +32,7 @@
@@ -14859,7 +14867,33 @@ index d1eff3d..2a324bc 100644
  
  #include <trace/events/sched.h>
  
-@@ -2298,6 +2299,9 @@ void __weak arch_ftrace_update_code(int command)
+@@ -251,8 +252,17 @@ static inline void update_function_graph_func(void) { }
+ 
+ static void update_ftrace_function(void)
+ {
++      struct ftrace_ops *ops;
+       ftrace_func_t func;
+ 
++      for (ops = ftrace_ops_list;
++           ops != &ftrace_list_end; ops = ops->next)
++              if (ops->flags & FTRACE_OPS_FL_IPIPE_EXCLUSIVE) {
++                      set_function_trace_op = ops;
++                      func = ops->func;
++                      goto set_pointers;
++              }
++
+       /*
+        * Prepare the ftrace_ops that the arch callback will use.
+        * If there's only one ftrace_ops registered, the ftrace_ops_list
+@@ -280,6 +290,7 @@ static void update_ftrace_function(void)
+ 
+       update_function_graph_func();
+ 
++  set_pointers:
+       /* If there's no change, then do nothing more here */
+       if (ftrace_trace_function == func)
+               return;
+@@ -2298,6 +2309,9 @@ void __weak arch_ftrace_update_code(int command)
  
  static void ftrace_run_update_code(int command)
  {
@@ -14869,7 +14903,7 @@ index d1eff3d..2a324bc 100644
        int ret;
  
        ret = ftrace_arch_code_modify_prepare();
-@@ -2311,7 +2315,13 @@ static void ftrace_run_update_code(int command)
+@@ -2311,7 +2325,13 @@ static void ftrace_run_update_code(int command)
         * is safe. The stop_machine() is the safest, but also
         * produces the most overhead.
         */
@@ -14883,7 +14917,7 @@ index d1eff3d..2a324bc 100644
  
        ret = ftrace_arch_code_modify_post_process();
        FTRACE_WARN_ON(ret);
-@@ -4621,10 +4631,10 @@ static int ftrace_process_locs(struct module *mod,
+@@ -4621,10 +4641,10 @@ static int ftrace_process_locs(struct module *mod,
         * reason to cause large interrupt latencies while we do it.
         */
        if (!mod)
@@ -14896,7 +14930,7 @@ index d1eff3d..2a324bc 100644
        ret = 0;
   out:
        mutex_unlock(&ftrace_lock);
-@@ -4723,9 +4733,11 @@ void __init ftrace_init(void)
+@@ -4723,9 +4743,11 @@ void __init ftrace_init(void)
        unsigned long count, flags;
        int ret;
  
@@ -14910,7 +14944,7 @@ index d1eff3d..2a324bc 100644
        if (ret)
                goto failed;
  
-@@ -4891,7 +4903,16 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long 
parent_ip,
+@@ -4891,7 +4913,16 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long 
parent_ip,
                }
        } while_for_each_ftrace_op(op);
  out:
@@ -15276,24 +15310,22 @@ index 0c9216c..00a9a30 100644
        return err;
  }
 diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
-index 1afec32..5803111 100644
+index 1afec32..f7c1a2a 100644
 --- a/lib/smp_processor_id.c
 +++ b/lib/smp_processor_id.c
-@@ -12,10 +12,13 @@ notrace static unsigned int 
check_preemption_disabled(const char *what1,
+@@ -12,6 +12,12 @@ notrace static unsigned int check_preemption_disabled(const 
char *what1,
  {
        int this_cpu = raw_smp_processor_id();
  
++      if (hard_irqs_disabled())
++              goto out;
++
 +      if (!ipipe_root_p)
 +              goto out;
 +
        if (likely(preempt_count()))
                goto out;
  
--      if (irqs_disabled())
-+      if (irqs_disabled() || hard_irqs_disabled())
-               goto out;
- 
-       /*
 diff --git a/mm/memory.c b/mm/memory.c
 index 90fb265..8a1fd79 100644
 --- a/mm/memory.c
diff --git 
a/kernel/cobalt/arch/powerpc/patches/ipipe-core-4.1.18-powerpc-3.patch 
b/kernel/cobalt/arch/powerpc/patches/ipipe-core-4.1.18-powerpc-7.patch
similarity index 94%
rename from kernel/cobalt/arch/powerpc/patches/ipipe-core-4.1.18-powerpc-3.patch
rename to kernel/cobalt/arch/powerpc/patches/ipipe-core-4.1.18-powerpc-7.patch
index c524961..49f4856 100644
--- a/kernel/cobalt/arch/powerpc/patches/ipipe-core-4.1.18-powerpc-3.patch
+++ b/kernel/cobalt/arch/powerpc/patches/ipipe-core-4.1.18-powerpc-7.patch
@@ -51,43 +51,44 @@ index 4eec430..c6a528b 100644
  ifeq ($(call cc-option-yn, -fstack-protector),y)
  BOOTCFLAGS    += -fno-stack-protector
  endif
+diff --git a/arch/powerpc/include/asm/exception-64e.h 
b/arch/powerpc/include/asm/exception-64e.h
+index a8b52b6..6df92057 100644
+--- a/arch/powerpc/include/asm/exception-64e.h
++++ b/arch/powerpc/include/asm/exception-64e.h
+@@ -11,6 +11,8 @@
+ #ifndef _ASM_POWERPC_EXCEPTION_64E_H
+ #define _ASM_POWERPC_EXCEPTION_64E_H
+ 
++#include <asm/irq_softstate.h>
++
+ /*
+  * SPRGs usage an other considerations...
+  *
 diff --git a/arch/powerpc/include/asm/exception-64s.h 
b/arch/powerpc/include/asm/exception-64s.h
-index 77f52b2..8eb3473 100644
+index 77f52b2..4650dc38 100644
 --- a/arch/powerpc/include/asm/exception-64s.h
 +++ b/arch/powerpc/include/asm/exception-64s.h
-@@ -332,6 +332,20 @@ do_kvm_##n:                                               
                \
-       GET_CTR(r10, area);                                                \
-       std     r10,_CTR(r1);
+@@ -35,6 +35,8 @@
+  * implementations as possible.
+  */
  
-+#ifdef CONFIG_IPIPE
-+/* Do NOT alter Rc(eq) in this code;  our caller uses it. */
-+#define COPY_SOFTISTATE(mreg)                 \
-+      ld      mreg,PACAROOTPCPU(r13);         \
-+      ld      mreg,0(mreg);                   \
-+      nor     mreg,mreg,mreg;                 \
-+      clrldi  mreg,mreg,63;                   \
-+      std     mreg,SOFTE(r1)
-+#else /* !CONFIG_IPIPE */
-+#define COPY_SOFTISTATE(mreg)                 \
-+      lbz     mreg,PACASOFTIRQEN(r13);        \
-+      std     mreg,SOFTE(r1)
-+#endif /* !CONFIG_IPIPE */
++#include <asm/irq_softstate.h>
 +
- #define EXCEPTION_PROLOG_COMMON_3(n)                                     \
-       std     r2,GPR2(r1);            /* save r2 in stackframe        */ \
-       SAVE_4GPRS(3, r1);              /* save r3 - r6 in stackframe   */ \
-@@ -339,9 +353,8 @@ do_kvm_##n:                                                
                \
+ #define EX_R9         0
+ #define EX_R10                8
+ #define EX_R11                16
+@@ -339,9 +341,8 @@ do_kvm_##n:                                                
                \
        mflr    r9;                     /* Get LR, later save to stack  */ \
        ld      r2,PACATOC(r13);        /* get kernel TOC into r2       */ \
        std     r9,_LINK(r1);                                              \
 -      lbz     r10,PACASOFTIRQEN(r13);                            \
-+      COPY_SOFTISTATE(r10);                                              \
++      EXC_SAVE_SOFTISTATE(r10);                                          \
        mfspr   r11,SPRN_XER;           /* save XER in stackframe       */ \
 -      std     r10,SOFTE(r1);                                             \
        std     r11,_XER(r1);                                              \
        li      r9,(n)+1;                                                  \
        std     r9,_TRAP(r1);           /* set trap number              */ \
-@@ -428,11 +441,15 @@ label##_relon_hv:                                        
        \
+@@ -428,11 +429,15 @@ label##_relon_hv:                                        
        \
  #define SOFTEN_VALUE_0xe60    PACA_IRQ_HMI
  #define SOFTEN_VALUE_0xe62    PACA_IRQ_HMI
  
@@ -103,51 +104,7 @@ index 77f52b2..8eb3473 100644
  #define _SOFTEN_TEST(h, vec)  __SOFTEN_TEST(h, vec)
  
  #define SOFTEN_TEST_PR(vec)                                           \
-@@ -515,11 +532,63 @@ label##_relon_hv:                                        
                \
-  * runlatch, etc...
-  */
- 
-+.macro HARD_ENABLE_INTS tmp=r10
-+#ifdef CONFIG_PPC_BOOK3E
-+      wrteei  1
-+#else
-+      ld      \tmp,PACAKMSR(r13)
-+      ori     \tmp,\tmp,MSR_EE
-+      mtmsrd  \tmp,1
-+#endif /* CONFIG_PPC_BOOK3E */
-+.endm
-+
-+.macro HARD_DISABLE_INTS tmp=r10
-+#ifdef CONFIG_PPC_BOOK3E
-+      wrteei  0
-+#else
-+      ld      \tmp,PACAKMSR(r13) /* Get kernel MSR without EE */
-+      mtmsrd  \tmp,1            /* Update machine state */
-+#endif /* CONFIG_PPC_BOOK3E */
-+.endm
-+
-+.macro HARD_DISABLE_INTS_RI
-+#ifdef CONFIG_PPC_BOOK3E
-+      wrteei  0
-+#else
-+      /*
-+       * For performance reasons we clear RI the same time that we
-+       * clear EE. We only need to clear RI just before we restore r13
-+       * below, but batching it with EE saves us one expensive mtmsrd call.
-+       * We have to be careful to restore RI if we branch anywhere from
-+       * here (eg syscall_exit_work).
-+       *
-+       * CAUTION: using r9-r11 the way they are is assumed by the
-+       * caller.
-+       */
-+      ld      r10,PACAKMSR(r13) /* Get kernel MSR without EE */
-+      li      r9,MSR_RI
-+      andc    r11,r10,r9
-+      mtmsrd  r11,1             /* Update machine state */
-+#endif /* CONFIG_PPC_BOOK3E */
-+.endm
-+
- /*
+@@ -519,7 +524,18 @@ label##_relon_hv:                                         
        \
   * This addition reconciles our actual IRQ state with the various software
   * flags that track it. This may call C code.
   */
@@ -160,14 +117,13 @@ index 77f52b2..8eb3473 100644
 +      mfmsr   r11;                            \
 +      ori     r11,r11,MSR_EE;                 \
 +      mtmsrd  r11,1;
-+#define RECONCILE_IRQ_STATE(__rA, __rB)       HARD_DISABLE_INTS __rA
 +#else /* !CONFIG_IPIPE */
  #define ADD_RECONCILE RECONCILE_IRQ_STATE(r10,r11)
 +#endif /* !CONFIG_IPIPE */
  
  #define ADD_NVGPRS                            \
        bl      save_nvgprs
-@@ -552,9 +621,24 @@ label##_common:                                           
        \
+@@ -552,9 +568,24 @@ label##_common:                                           
        \
   * in the idle task and therefore need the special idle handling
   * (finish nap and runlatch)
   */
@@ -304,10 +260,10 @@ index b59ac27..9cc1d25 100644
   * or should we not care like we do now ? --BenH.
 diff --git a/arch/powerpc/include/asm/ipipe.h 
b/arch/powerpc/include/asm/ipipe.h
 new file mode 100644
-index 0000000..271fc48
+index 0000000..d7e4acd
 --- /dev/null
 +++ b/arch/powerpc/include/asm/ipipe.h
-@@ -0,0 +1,151 @@
+@@ -0,0 +1,153 @@
 +/*
 + *   include/asm-powerpc/ipipe.h
 + *
@@ -349,14 +305,14 @@ index 0000000..271fc48
 +#include <linux/cache.h>
 +#include <linux/threads.h>
 +
-+#define IPIPE_CORE_RELEASE    3
++#define IPIPE_CORE_RELEASE    7
 +
 +struct ipipe_domain;
 +
 +struct ipipe_arch_sysinfo {
 +};
 +
-+#ifdef CONFIG_DEBUGGER
++#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
 +extern cpumask_t __ipipe_dbrk_pending;
 +#endif
 +
@@ -418,7 +374,9 @@ index 0000000..271fc48
 +
 +void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd);
 +
-+void __ipipe_register_ipi(unsigned int irq);
++void __ipipe_register_mux_ipi(unsigned int irq);
++
++void __ipipe_finish_ipi_demux(unsigned int irq);
 +#else
 +#define __ipipe_hook_critical_ipi(ipd)        do { } while(0)
 +#endif /* CONFIG_SMP */
@@ -461,10 +419,10 @@ index 0000000..271fc48
 +#endif /* !__ASM_POWERPC_IPIPE_H */
 diff --git a/arch/powerpc/include/asm/ipipe_base.h 
b/arch/powerpc/include/asm/ipipe_base.h
 new file mode 100644
-index 0000000..167a70d
+index 0000000..14f7692
 --- /dev/null
 +++ b/arch/powerpc/include/asm/ipipe_base.h
-@@ -0,0 +1,135 @@
+@@ -0,0 +1,134 @@
 +/* -*- linux-c -*-
 + * include/asm-powerpc/ipipe_base.h
 + *
@@ -502,7 +460,8 @@ index 0000000..167a70d
 + * The first virtual interrupt is reserved for the timer (see
 + * __ipipe_early_core_setup).
 + */
-+#define IPIPE_TIMER_VIRQ      IPIPE_VIRQ_BASE
++#define IPIPE_TIMER_VIRQ      (IPIPE_VIRQ_BASE + 0)
++#define IPIPE_DOORBELL_VIRQ   (IPIPE_VIRQ_BASE + 1)
 +
 +#ifdef CONFIG_SMP
 +/* 
@@ -511,21 +470,19 @@ index 0000000..167a70d
 + * implemented by piggybacking the debugger break IPI 0x3,
 + * which is demultiplexed in __ipipe_ipi_demux().
 + */
++#define IPIPE_CRITICAL_IPI    (IPIPE_VIRQ_BASE + 2)
++#define IPIPE_HRTIMER_IPI     (IPIPE_VIRQ_BASE + 3)
++#define IPIPE_RESCHEDULE_IPI  (IPIPE_VIRQ_BASE + 4)
++#define IPIPE_BASE_IPI_OFFSET IPIPE_CRITICAL_IPI
++
 +/* these are bit numbers in practice */
 +#define IPIPE_MSG_CRITICAL_IPI                0
 +#define IPIPE_MSG_HRTIMER_IPI         (IPIPE_MSG_CRITICAL_IPI + 1)
 +#define IPIPE_MSG_RESCHEDULE_IPI      (IPIPE_MSG_CRITICAL_IPI + 2)
-+
 +#define IPIPE_MSG_IPI_MASK    ((1UL << IPIPE_MSG_CRITICAL_IPI) |      \
 +                               (1UL << IPIPE_MSG_HRTIMER_IPI) |       \
 +                               (1UL << IPIPE_MSG_RESCHEDULE_IPI))
 +
-+#define IPIPE_CRITICAL_IPI    (IPIPE_VIRQ_BASE + 1)
-+#define IPIPE_HRTIMER_IPI     (IPIPE_CRITICAL_IPI + 1)
-+#define IPIPE_RESCHEDULE_IPI  (IPIPE_CRITICAL_IPI + 2)
-+
-+#define IPIPE_BASE_IPI_OFFSET IPIPE_CRITICAL_IPI
-+
 +#define ipipe_processor_id()  raw_smp_processor_id()
 +
 +#else  /* !CONFIG_SMP */
@@ -602,10 +559,10 @@ index 0000000..167a70d
 +#endif        /* !__ASM_POWERPC_IPIPE_BASE_H */
 diff --git a/arch/powerpc/include/asm/ipipe_hwirq.h 
b/arch/powerpc/include/asm/ipipe_hwirq.h
 new file mode 100644
-index 0000000..2bc8217
+index 0000000..1f94e29
 --- /dev/null
 +++ b/arch/powerpc/include/asm/ipipe_hwirq.h
-@@ -0,0 +1,252 @@
+@@ -0,0 +1,256 @@
 +/* -*- linux-c -*-
 + * include/asm-powerpc/ipipe_hwirq.h
 + *
@@ -682,6 +639,9 @@ index 0000000..2bc8217
 +{
 +      __asm__ __volatile__("wrteei 1": : :"memory");
 +}
++
++#define hard_local_irq_restore_notrace(x)     mtmsr(x)
++
 +#else /* !CONFIG_PPC_BOOK3E */
 +static inline void hard_local_irq_disable_notrace(void)
 +{
@@ -692,6 +652,9 @@ index 0000000..2bc8217
 +{
 +      __mtmsrd(mfmsr() | MSR_EE, 1);
 +}
++
++#define hard_local_irq_restore_notrace(x)     __mtmsrd(x, 1)
++
 +#endif /* !CONFIG_PPC_BOOK3E */
 +
 +static inline unsigned long hard_local_irq_save_notrace(void)
@@ -701,8 +664,6 @@ index 0000000..2bc8217
 +      return msr;
 +}
 +
-+#define hard_local_irq_restore_notrace(x)     __mtmsrd(x, 1)
-+
 +#endif /* CONFIG_PPC64 */
 +
 +#ifdef CONFIG_IPIPE
@@ -880,37 +841,167 @@ index e8e3a0a..90fe40a 100644
 +
  #endif /* _ASM_IRQ_H */
  #endif /* __KERNEL__ */
+diff --git a/arch/powerpc/include/asm/irq_softstate.h 
b/arch/powerpc/include/asm/irq_softstate.h
+new file mode 100644
+index 0000000..663ce0f
+--- /dev/null
++++ b/arch/powerpc/include/asm/irq_softstate.h
+@@ -0,0 +1,118 @@
++#ifndef _ASM_POWERPC_IRQ_SOFTSTATE_H
++#define _ASM_POWERPC_IRQ_SOFTSTATE_H
++
++#ifdef __ASSEMBLY__
++
++.macro HARD_ENABLE_INTS tmp=r10
++#ifdef CONFIG_PPC_BOOK3E
++      wrteei  1
++#else
++      ld      \tmp,PACAKMSR(r13)
++      ori     \tmp,\tmp,MSR_EE
++      mtmsrd  \tmp,1
++#endif /* CONFIG_PPC_BOOK3E */
++.endm
++
++.macro HARD_DISABLE_INTS tmp=r10
++#ifdef CONFIG_PPC_BOOK3E
++      wrteei  0
++#else
++      ld      \tmp,PACAKMSR(r13) /* Get kernel MSR without EE */
++      mtmsrd  \tmp,1            /* Update machine state */
++#endif /* CONFIG_PPC_BOOK3E */
++.endm
++
++.macro HARD_DISABLE_INTS_RI
++#ifdef CONFIG_PPC_BOOK3E
++      wrteei  0
++#else
++      /*
++       * For performance reasons we clear RI the same time that we
++       * clear EE. We only need to clear RI just before we restore r13
++       * below, but batching it with EE saves us one expensive mtmsrd call.
++       * We have to be careful to restore RI if we branch anywhere from
++       * here (eg syscall_exit_work).
++       *
++       * CAUTION: using r9-r11 the way they are is assumed by the
++       * caller.
++       */
++      ld      r10,PACAKMSR(r13) /* Get kernel MSR without EE */
++      li      r9,MSR_RI
++      andc    r11,r10,r9
++      mtmsrd  r11,1             /* Update machine state */
++#endif /* CONFIG_PPC_BOOK3E */
++.endm
++
++#ifdef CONFIG_IPIPE
++
++  /* Do NOT alter Rc(eq) in this code;  our caller uses it. */
++#define __COPY_SOFTISTATE(mreg)                       \
++      ld      mreg,PACAROOTPCPU(r13);         \
++      ld      mreg,0(mreg);                   \
++      nor     mreg,mreg,mreg;                 \
++      clrldi  mreg,mreg,63;                   \
++
++/* Do NOT alter Rc(eq) in this code;  our caller uses it. */
++#define COPY_SOFTISTATE(mreg)                 \
++      __COPY_SOFTISTATE(mreg);                \
++      std     mreg,SOFTE(r1)
++
++#ifdef CONFIG_PPC_BOOK3E
++#define SPECIAL_SAVE_SOFTISTATE(mreg)         \
++      __COPY_SOFTISTATE(mreg);                \
++      SPECIAL_EXC_STORE(mreg, SOFTE)
++#endif
++
++#define EXC_SAVE_SOFTISTATE(mreg)             \
++      COPY_SOFTISTATE(mreg)
++
++#define RECONCILE_IRQ_STATE(__rA, __rB)       HARD_DISABLE_INTS __rA
++
++#else /* !CONFIG_IPIPE */
++
++#define COPY_SOFTISTATE(mreg)                 \
++      lbz     mreg,PACASOFTIRQEN(r13);        \
++      std     mreg,SOFTE(r1)
++
++#ifdef CONFIG_PPC_BOOK3E
++#define SPECIAL_SAVE_SOFTISTATE(mreg)         \
++      lbz     mreg,PACASOFTIRQEN(r13);        \
++      SPECIAL_EXC_STORE(mreg, SOFTE)
++#endif
++
++#define EXC_SAVE_SOFTISTATE(mreg)             \
++      COPY_SOFTISTATE(mreg)
++
++ /*
++ * This is used by assembly code to soft-disable interrupts first and
++ * reconcile irq state.
++ *
++ * NB: This may call C code, so the caller must be prepared for volatiles to
++ * be clobbered.
++ */
++#ifdef CONFIG_TRACE_IRQFLAGS
++#define RECONCILE_IRQ_STATE(__rA, __rB)               \
++      lbz     __rA,PACASOFTIRQEN(r13);        \
++      lbz     __rB,PACAIRQHAPPENED(r13);      \
++      cmpwi   cr0,__rA,0;                     \
++      li      __rA,0;                         \
++      ori     __rB,__rB,PACA_IRQ_HARD_DIS;    \
++      stb     __rB,PACAIRQHAPPENED(r13);      \
++      beq     44f;                            \
++      stb     __rA,PACASOFTIRQEN(r13);        \
++      TRACE_DISABLE_INTS;                     \
++44:
++#else
++#define RECONCILE_IRQ_STATE(__rA, __rB)               \
++      lbz     __rA,PACAIRQHAPPENED(r13);      \
++      li      __rB,0;                         \
++      ori     __rA,__rA,PACA_IRQ_HARD_DIS;    \
++      stb     __rB,PACASOFTIRQEN(r13);        \
++      stb     __rA,PACAIRQHAPPENED(r13)
++#endif /* !CONFIG_TRACE_IRQFLAGS */
++
++#endif /* !CONFIG_IPIPE */
++
++#endif /* __ASSEMBLY__ */
++   
++#endif /* _ASM_POWERPC_IRQ_SOFTSTATE_H */
 diff --git a/arch/powerpc/include/asm/irqflags.h 
b/arch/powerpc/include/asm/irqflags.h
-index f214906..093829c 100644
+index f214906..fa36751 100644
 --- a/arch/powerpc/include/asm/irqflags.h
 +++ b/arch/powerpc/include/asm/irqflags.h
-@@ -45,6 +45,7 @@
-  * NB: This may call C code, so the caller must be prepared for volatiles to
-  * be clobbered.
-  */
-+#ifndef CONFIG_IPIPE
- #define RECONCILE_IRQ_STATE(__rA, __rB)               \
-       lbz     __rA,PACASOFTIRQEN(r13);        \
-       lbz     __rB,PACAIRQHAPPENED(r13);      \
-@@ -56,17 +57,21 @@
-       stb     __rA,PACASOFTIRQEN(r13);        \
-       TRACE_DISABLE_INTS;                     \
- 44:
-+#endif /* !CONFIG_IPIPE */
+@@ -38,35 +38,10 @@
+ #define TRACE_ENABLE_INTS     TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on)
+ #define TRACE_DISABLE_INTS    TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off)
  
+-/*
+- * This is used by assembly code to soft-disable interrupts first and
+- * reconcile irq state.
+- *
+- * NB: This may call C code, so the caller must be prepared for volatiles to
+- * be clobbered.
+- */
+-#define RECONCILE_IRQ_STATE(__rA, __rB)               \
+-      lbz     __rA,PACASOFTIRQEN(r13);        \
+-      lbz     __rB,PACAIRQHAPPENED(r13);      \
+-      cmpwi   cr0,__rA,0;                     \
+-      li      __rA,0;                         \
+-      ori     __rB,__rB,PACA_IRQ_HARD_DIS;    \
+-      stb     __rB,PACAIRQHAPPENED(r13);      \
+-      beq     44f;                            \
+-      stb     __rA,PACASOFTIRQEN(r13);        \
+-      TRACE_DISABLE_INTS;                     \
+-44:
+-
  #else
  #define TRACE_ENABLE_INTS
  #define TRACE_DISABLE_INTS
  
-+#ifndef CONFIG_IPIPE
- #define RECONCILE_IRQ_STATE(__rA, __rB)               \
-       lbz     __rA,PACAIRQHAPPENED(r13);      \
-       li      __rB,0;                         \
-       ori     __rA,__rA,PACA_IRQ_HARD_DIS;    \
-       stb     __rB,PACASOFTIRQEN(r13);        \
-       stb     __rA,PACAIRQHAPPENED(r13)
-+#endif /* !CONFIG_IPIPE */
-+
+-#define RECONCILE_IRQ_STATE(__rA, __rB)               \
+-      lbz     __rA,PACAIRQHAPPENED(r13);      \
+-      li      __rB,0;                         \
+-      ori     __rA,__rA,PACA_IRQ_HARD_DIS;    \
+-      stb     __rB,PACASOFTIRQEN(r13);        \
+-      stb     __rA,PACAIRQHAPPENED(r13)
  #endif
  #endif
  
@@ -1122,10 +1213,18 @@ index 70bd438..c43f0e4 100644
        u64 user_time;                  /* accumulated usermode TB ticks */
        u64 system_time;                /* accumulated system TB ticks */
 diff --git a/arch/powerpc/include/asm/qe_ic.h 
b/arch/powerpc/include/asm/qe_ic.h
-index 25784cc..f29c8cf 100644
+index 25784cc..8eb2d78 100644
 --- a/arch/powerpc/include/asm/qe_ic.h
 +++ b/arch/powerpc/include/asm/qe_ic.h
-@@ -85,7 +85,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq,
+@@ -16,6 +16,7 @@
+ #define _ASM_POWERPC_QE_IC_H
+ 
+ #include <linux/irq.h>
++#include <linux/ipipe.h>
+ 
+ struct device_node;
+ struct qe_ic;
+@@ -85,7 +86,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq,
        unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
  
        if (cascade_irq != NO_IRQ)
@@ -1134,7 +1233,7 @@ index 25784cc..f29c8cf 100644
  }
  
  static inline void qe_ic_cascade_high_ipic(unsigned int irq,
-@@ -95,7 +95,7 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq,
+@@ -95,7 +96,7 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq,
        unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
  
        if (cascade_irq != NO_IRQ)
@@ -1143,7 +1242,7 @@ index 25784cc..f29c8cf 100644
  }
  
  static inline void qe_ic_cascade_low_mpic(unsigned int irq,
-@@ -106,7 +106,7 @@ static inline void qe_ic_cascade_low_mpic(unsigned int irq,
+@@ -106,7 +107,7 @@ static inline void qe_ic_cascade_low_mpic(unsigned int irq,
        struct irq_chip *chip = irq_desc_get_chip(desc);
  
        if (cascade_irq != NO_IRQ)
@@ -1152,7 +1251,7 @@ index 25784cc..f29c8cf 100644
  
        chip->irq_eoi(&desc->irq_data);
  }
-@@ -119,7 +119,7 @@ static inline void qe_ic_cascade_high_mpic(unsigned int 
irq,
+@@ -119,7 +120,7 @@ static inline void qe_ic_cascade_high_mpic(unsigned int 
irq,
        struct irq_chip *chip = irq_desc_get_chip(desc);
  
        if (cascade_irq != NO_IRQ)
@@ -1161,7 +1260,7 @@ index 25784cc..f29c8cf 100644
  
        chip->irq_eoi(&desc->irq_data);
  }
-@@ -136,7 +136,7 @@ static inline void qe_ic_cascade_muxed_mpic(unsigned int 
irq,
+@@ -136,7 +137,7 @@ static inline void qe_ic_cascade_muxed_mpic(unsigned int 
irq,
                cascade_irq = qe_ic_get_low_irq(qe_ic);
  
        if (cascade_irq != NO_IRQ)
@@ -1434,7 +1533,7 @@ index 60262fd..e4db6db 100644
        struct cpu_spec *s = cpu_specs;
        int i;
 diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
-index 46fc0f4..f4568bb 100644
+index 46fc0f4..fa32ed8 100644
 --- a/arch/powerpc/kernel/entry_32.S
 +++ b/arch/powerpc/kernel/entry_32.S
 @@ -179,9 +179,11 @@ transfer_to_handler:
@@ -1604,11 +1703,13 @@ index 46fc0f4..f4568bb 100644
        CURRENT_THREAD_INFO(r9, r1)
        lwz     r3,TI_FLAGS(r9)
        andi.   r0,r3,_TIF_NEED_RESCHED
-@@ -1233,6 +1346,13 @@ ee_restarts:
+@@ -1233,6 +1346,15 @@ ee_restarts:
        .space  4
        .previous
  
 +#ifdef CONFIG_IPIPE
++_GLOBAL(__ipipe_ret_from_except_full)
++      REST_NVGPRS(r1)
 +_GLOBAL(__ipipe_ret_from_except)
 +        cmpwi   r3, 0
 +        bne+ ret_from_except
@@ -1619,7 +1720,7 @@ index 46fc0f4..f4568bb 100644
   * PROM code for specific machines follows.  Put it
   * here so it's easy to add arch-specific sections later.
 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
-index afbc200..65fe2c1 100644
+index afbc200..107d0dd 100644
 --- a/arch/powerpc/kernel/entry_64.S
 +++ b/arch/powerpc/kernel/entry_64.S
 @@ -33,6 +33,7 @@
@@ -1858,25 +1959,21 @@ index afbc200..65fe2c1 100644
  #endif /* CONFIG_PREEMPT */
  
        .globl  fast_exc_return_irq
-@@ -743,6 +798,17 @@ restore:
+@@ -743,6 +798,13 @@ restore:
         * are about to re-enable interrupts
         */
        ld      r5,SOFTE(r1)
 +#ifdef CONFIG_IPIPE
-+      cmpwi   cr0,r5,0
-+      nor     r5,r5,r5        /* IPIPE_STALL_FLAG = !SOFTE */
 +      ld      r4,PACAROOTPCPU(r13)
-+      ld      r3,0(r4)
-+      insrdi  r3,r5,1,63
-+      std     r3,0(r4)
-+      beq     1f
++      cmpwi   cr0,r4,0
++      bne     1f
 +      TRACE_ENABLE_INTS
 +1:
 +#else /* !CONFIG_IPIPE */
        lbz     r6,PACASOFTIRQEN(r13)
        cmpwi   cr0,r5,0
        beq     restore_irq_off
-@@ -769,6 +835,7 @@ restore_no_replay:
+@@ -769,6 +831,7 @@ restore_no_replay:
        TRACE_ENABLE_INTS
        li      r0,1
        stb     r0,PACASOFTIRQEN(r13);
@@ -1884,7 +1981,7 @@ index afbc200..65fe2c1 100644
  
        /*
         * Final return path. BookE is handled in a different file
-@@ -859,6 +926,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+@@ -859,6 +922,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  
  #endif /* CONFIG_PPC_BOOK3E */
  
@@ -1892,7 +1989,7 @@ index afbc200..65fe2c1 100644
        /*
         * We are returning to a context with interrupts soft disabled.
         *
-@@ -940,6 +1008,8 @@ restore_check_irq_replay:
+@@ -940,6 +1004,8 @@ restore_check_irq_replay:
        b       ret_from_except
  #endif /* CONFIG_PPC_DOORBELL */
  1:    b       ret_from_except /* What else to do here ? */
@@ -1901,7 +1998,7 @@ index afbc200..65fe2c1 100644
   
  unrecov_restore:
        addi    r3,r1,STACK_FRAME_OVERHEAD
-@@ -986,7 +1056,7 @@ _GLOBAL(enter_rtas)
+@@ -986,7 +1052,7 @@ _GLOBAL(enter_rtas)
        li      r0,0
        mtcr    r0
  
@@ -1910,6 +2007,170 @@ index afbc200..65fe2c1 100644
        /* There is no way it is acceptable to get here with interrupts enabled,
         * check it with the asm equivalent of WARN_ON
         */
+diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
+index 3e68d1c..18a6004 100644
+--- a/arch/powerpc/kernel/exceptions-64e.S
++++ b/arch/powerpc/kernel/exceptions-64e.S
+@@ -65,7 +65,9 @@
+       ld      reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
+ 
+ special_reg_save:
++#ifndef CONFIG_IPIPE  
+       lbz     r9,PACAIRQHAPPENED(r13)
++#endif        
+       RECONCILE_IRQ_STATE(r3,r4)
+ 
+       /*
+@@ -132,15 +134,15 @@ BEGIN_FTR_SECTION
+       mtspr   SPRN_MAS5,r10
+       mtspr   SPRN_MAS8,r10
+ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
++#ifndef CONFIG_IPIPE  
+       SPECIAL_EXC_STORE(r9,IRQHAPPENED)
+-
++#endif
+       mfspr   r10,SPRN_DEAR
+       SPECIAL_EXC_STORE(r10,DEAR)
+       mfspr   r10,SPRN_ESR
+       SPECIAL_EXC_STORE(r10,ESR)
+ 
+-      lbz     r10,PACASOFTIRQEN(r13)
+-      SPECIAL_EXC_STORE(r10,SOFTE)
++      SPECIAL_SAVE_SOFTISTATE(r10)
+       ld      r10,_NIP(r1)
+       SPECIAL_EXC_STORE(r10,CSRR0)
+       ld      r10,_MSR(r1)
+@@ -206,8 +208,15 @@ BEGIN_FTR_SECTION
+       mtspr   SPRN_MAS8,r10
+ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+ 
+-      lbz     r6,PACASOFTIRQEN(r13)
++#ifdef CONFIG_IPIPE
++      ld      r6,PACAROOTPCPU(r13)
++      cmpwi   cr0,r6,0
++      bne     1f
++      TRACE_ENABLE_INTS
++1:
++#else
+       ld      r5,SOFTE(r1)
++      lbz     r6,PACASOFTIRQEN(r13)
+ 
+       /* Interrupts had better not already be enabled... */
+       twnei   r6,0
+@@ -226,6 +235,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+        */
+       SPECIAL_EXC_LOAD(r10,IRQHAPPENED)
+       stb     r10,PACAIRQHAPPENED(r13)
++#endif
+ 
+       SPECIAL_EXC_LOAD(r10,DEAR)
+       mtspr   SPRN_DEAR,r10
+@@ -350,10 +360,16 @@ ret_from_mc_except:
+ #define PROLOG_ADDITION_NONE_DBG(n)
+ #define PROLOG_ADDITION_NONE_MC(n)
+ 
++#ifdef CONFIG_IPIPE
++#define PROLOG_ADDITION_MASKABLE_GEN(n)
++#define MASKABLE_EXCEPTION_EXIT        b      __ipipe_ret_from_except_lite
++#else
+ #define PROLOG_ADDITION_MASKABLE_GEN(n)                                       
    \
+       lbz     r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */      \
+       cmpwi   cr0,r10,0;              /* yes -> go out of line */         \
+       beq     masked_interrupt_book3e_##n
++#define MASKABLE_EXCEPTION_EXIT        b      ret_from_except_lite
++#endif
+ 
+ #define PROLOG_ADDITION_2REGS_GEN(n)                                      \
+       std     r14,PACA_EXGEN+EX_R14(r13);                                 \
+@@ -397,8 +413,8 @@ exc_##n##_common:                                          
            \
+       mfspr   r8,SPRN_XER;            /* save XER in stackframe */        \
+       ld      r9,excf+EX_R1(r13);     /* load orig r1 back from PACA */   \
+       lwz     r10,excf+EX_CR(r13);    /* load orig CR back from PACA  */  \
+-      lbz     r11,PACASOFTIRQEN(r13); /* get current IRQ softe */         \
+       ld      r12,exception_marker@toc(r2);                               \
++      EXC_SAVE_SOFTISTATE(r11);                                           \
+       li      r0,0;                                                       \
+       std     r3,GPR10(r1);           /* save r10 to stackframe */        \
+       std     r4,GPR11(r1);           /* save r11 to stackframe */        \
+@@ -410,7 +426,6 @@ exc_##n##_common:                                          
            \
+       std     r9,0(r1);               /* store stack frame back link */   \
+       std     r10,_CCR(r1);           /* store orig CR in stackframe */   \
+       std     r9,GPR1(r1);            /* store stack frame back link */   \
+-      std     r11,SOFTE(r1);          /* and save it to stackframe */     \
+       std     r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */       \
+       std     r3,_TRAP(r1);           /* set trap number              */  \
+       std     r0,RESULT(r1);          /* clear regs->result */
+@@ -499,7 +514,7 @@ exc_##n##_bad_stack:                                       
                    \
+       CHECK_NAPPING();                                                \
+       addi    r3,r1,STACK_FRAME_OVERHEAD;                             \
+       bl      hdlr;                                                   \
+-      b       ret_from_except_lite;
++      MASKABLE_EXCEPTION_EXIT;
+ 
+ /* This value is used to mark exception frames on the stack. */
+       .section        ".toc","aw"
+@@ -545,6 +560,16 @@ interrupt_base_book3e:                                    
/* fake trap */
+       .globl interrupt_end_book3e
+ interrupt_end_book3e:
+ 
++#ifdef CONFIG_IPIPE
++#define BOOKE_EXTIRQ_HANDLER  __ipipe_grab_irq
++#define BOOKE_TIMER_HANDLER   __ipipe_grab_timer
++#define BOOKE_DBELL_HANDLER   __ipipe_grab_doorbell
++#else 
++#define BOOKE_EXTIRQ_HANDLER  do_IRQ
++#define BOOKE_TIMER_HANDLER   timer_interrupt
++#define BOOKE_DBELL_HANDLER   doorbell_exception
++#endif        
++      
+ /* Critical Input Interrupt */
+       START_EXCEPTION(critical_input);
+       CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
+@@ -591,8 +616,8 @@ interrupt_end_book3e:
+ 
+ /* External Input Interrupt */
+       MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
+-                         external_input, do_IRQ, ACK_NONE)
+-
++                         external_input, BOOKE_EXTIRQ_HANDLER, ACK_NONE)
++      
+ /* Alignment */
+       START_EXCEPTION(alignment);
+       NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT,
+@@ -676,7 +701,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ 
+ /* Decrementer Interrupt */
+       MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
+-                         decrementer, timer_interrupt, ACK_DEC)
++                         decrementer, BOOKE_TIMER_HANDLER, ACK_DEC)
+ 
+ /* Fixed Interval Timer Interrupt */
+       MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
+@@ -855,7 +880,7 @@ kernel_dbg_exc:
+ 
+ /* Doorbell interrupt */
+       MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
+-                         doorbell, doorbell_exception, ACK_NONE)
++                         doorbell, BOOKE_DBELL_HANDLER, ACK_NONE)
+ 
+ /* Doorbell critical Interrupt */
+       START_EXCEPTION(doorbell_crit);
+@@ -928,6 +953,7 @@ kernel_dbg_exc:
+       bl      .unknown_exception
+       b       .ret_from_except
+ 
++#ifndef CONFIG_IPIPE
+ /*
+  * An interrupt came in while soft-disabled; We mark paca->irq_happened
+  * accordingly and if the interrupt is level sensitive, we hard disable
+@@ -1000,6 +1026,7 @@ _GLOBAL(__replay_interrupt)
+       beq     exc_0x280_common
+       blr
+ 
++#endif /* !CONFIG_IPIPE */
+ 
+ /*
+  * This is called from 0x300 and 0x400 handlers after the prologs with
 diff --git a/arch/powerpc/kernel/exceptions-64s.S 
b/arch/powerpc/kernel/exceptions-64s.S
 index 9519e6b..fbcbc46 100644
 --- a/arch/powerpc/kernel/exceptions-64s.S
@@ -2227,10 +2488,10 @@ index 9b53fe1..35b4944 100644
        blr
  #endif
 diff --git a/arch/powerpc/kernel/head_booke.h 
b/arch/powerpc/kernel/head_booke.h
-index a620203..a67bb7c 100644
+index a620203..1fd92fb 100644
 --- a/arch/powerpc/kernel/head_booke.h
 +++ b/arch/powerpc/kernel/head_booke.h
-@@ -240,6 +240,12 @@ label:
+@@ -240,6 +240,16 @@ label:
        EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, 
transfer_to_handler_full, \
                          ret_from_except_full)
  
@@ -2238,12 +2499,16 @@ index a620203..a67bb7c 100644
 +#define EXC_XFER_IPIPE(n, hdlr)               \
 +      EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
 +                        __ipipe_ret_from_except)
++
++#define EXC_XFER_IPIPE_FULL(n, hdlr)                                          
\
++      EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, 
transfer_to_handler_full, \
++                        __ipipe_ret_from_except_full)
 +#endif /* CONFIG_IPIPE */
 +
  #define EXC_XFER_LITE(n, hdlr)                \
        EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
                          ret_from_except)
-@@ -404,6 +410,15 @@ label:
+@@ -404,6 +414,15 @@ label:
        addi    r3,r1,STACK_FRAME_OVERHEAD;                                   \
        EXC_XFER_STD(0x0700, program_check_exception)
  
@@ -2259,7 +2524,7 @@ index a620203..a67bb7c 100644
  #define DECREMENTER_EXCEPTION                                               \
        START_EXCEPTION(Decrementer)                                          \
        NORMAL_EXCEPTION_PROLOG(DECREMENTER);                 \
-@@ -411,6 +426,7 @@ label:
+@@ -411,6 +430,7 @@ label:
        mtspr   SPRN_TSR,r0;            /* Clear the DEC interrupt */         \
        addi    r3,r1,STACK_FRAME_OVERHEAD;                                   \
        EXC_XFER_LITE(0x0900, timer_interrupt)
@@ -2268,7 +2533,7 @@ index a620203..a67bb7c 100644
  #define FP_UNAVAILABLE_EXCEPTION                                            \
        START_EXCEPTION(FloatingPointUnavailable)                             \
 diff --git a/arch/powerpc/kernel/head_fsl_booke.S 
b/arch/powerpc/kernel/head_fsl_booke.S
-index fffd1f9..66425e9 100644
+index fffd1f9..25db5bf 100644
 --- a/arch/powerpc/kernel/head_fsl_booke.S
 +++ b/arch/powerpc/kernel/head_fsl_booke.S
 @@ -390,7 +390,11 @@ interrupt_base:
@@ -2283,7 +2548,19 @@ index fffd1f9..66425e9 100644
  
        /* Alignment Interrupt */
        ALIGNMENT_EXCEPTION
-@@ -1017,10 +1021,14 @@ _GLOBAL(__setup_ehv_ivors)
+@@ -648,7 +652,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+       EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
+                 performance_monitor_exception, EXC_XFER_STD)
+ 
++#ifdef CONFIG_IPIPE
++      EXCEPTION(0x2070, DOORBELL, Doorbell, __ipipe_grab_doorbell, 
EXC_XFER_IPIPE_FULL)
++#else
+       EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception, EXC_XFER_STD)
++#endif
+ 
+       CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \
+                          CriticalDoorbell, unknown_exception)
+@@ -1017,10 +1025,14 @@ _GLOBAL(__setup_ehv_ivors)
  _GLOBAL(giveup_spe)
        mfmsr   r5
        oris    r5,r5,MSR_SPE@h
@@ -2299,7 +2576,7 @@ index fffd1f9..66425e9 100644
        addi    r3,r3,THREAD            /* want THREAD of task */
        lwz     r5,PT_REGS(r3)
        cmpi    0,r5,0
-@@ -1040,6 +1048,15 @@ _GLOBAL(giveup_spe)
+@@ -1040,6 +1052,15 @@ _GLOBAL(giveup_spe)
        lis     r4,last_task_used_spe@ha
        stw     r5,last_task_used_spe@l(r4)
  #endif /* !CONFIG_SMP */
@@ -2376,6 +2653,43 @@ index d7216c9..888dfd1 100644
                local_irq_enable();
                /*
                 * Go into low thread priority and possibly
+diff --git a/arch/powerpc/kernel/idle_book3e.S 
b/arch/powerpc/kernel/idle_book3e.S
+index 48c21ac..329e162 100644
+--- a/arch/powerpc/kernel/idle_book3e.S
++++ b/arch/powerpc/kernel/idle_book3e.S
+@@ -27,15 +27,20 @@ _GLOBAL(\name)
+       mflr    r0
+       std     r0,16(r1)
+ 
++#ifndef CONFIG_IPIPE  
+       /* Hard disable interrupts */
+       wrteei  0
+ 
+       /* Now check if an interrupt came in while we were soft disabled
+-       * since we may otherwise lose it (doorbells etc...).
++       * since we may otherwise lose it (doorbells etc...). There is no
++       * need to do that if pipelining IRQs, since our caller already
++       * cleared the stall bit, then synchronized the interrupt log,
++       * disabling hw IRQs before getting here.
+        */
+       lbz     r3,PACAIRQHAPPENED(r13)
+       cmpwi   cr0,r3,0
+       bnelr
++#endif        
+ 
+       /* Now we are going to mark ourselves as soft and hard enabled in
+        * order to be able to take interrupts while asleep. We inform lockdep
+@@ -46,8 +51,10 @@ _GLOBAL(\name)
+       bl      trace_hardirqs_on
+       addi    r1,r1,128
+ #endif
++#ifndef CONFIG_IPIPE  
+       li      r0,1
+       stb     r0,PACASOFTIRQEN(r13)
++#endif
+       
+       /* Interrupts will make use return to LR, so get something we want
+        * in there
 diff --git a/arch/powerpc/kernel/idle_power4.S 
b/arch/powerpc/kernel/idle_power4.S
 index f57a193..ac19c01 100644
 --- a/arch/powerpc/kernel/idle_power4.S
@@ -2437,10 +2751,10 @@ index 112ccf4..5dd70f7 100644
        li      r0,0
 diff --git a/arch/powerpc/kernel/ipipe.c b/arch/powerpc/kernel/ipipe.c
 new file mode 100644
-index 0000000..9a5702a
+index 0000000..1ea81c6
 --- /dev/null
 +++ b/arch/powerpc/kernel/ipipe.c
-@@ -0,0 +1,372 @@
+@@ -0,0 +1,406 @@
 +/* -*- linux-c -*-
 + * linux/arch/powerpc/kernel/ipipe.c
 + *
@@ -2487,23 +2801,33 @@ index 0000000..9a5702a
 +#include <asm/time.h>
 +#include <asm/runlatch.h>
 +#include <asm/debug.h>
++#include <asm/dbell.h>
 +
 +static void __ipipe_do_IRQ(unsigned int irq, void *cookie);
 +
 +static void __ipipe_do_timer(unsigned int irq, void *cookie);
 +
++#ifdef CONFIG_PPC_DOORBELL
++static void __ipipe_do_doorbell(unsigned int irq, void *cookie);
++#endif
++
 +#define DECREMENTER_MAX       0x7fffffff
 +
 +#ifdef CONFIG_SMP
 +
 +static DEFINE_PER_CPU(struct ipipe_ipi_struct, ipipe_ipi_message);
 +
-+unsigned int __ipipe_ipi_irq = NR_IRQS + 1; /* dummy value */
-+
-+#ifdef CONFIG_DEBUGGER
++#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
 +cpumask_t __ipipe_dbrk_pending;       /* pending debugger break IPIs */
 +#endif
 +
++static unsigned int mux_ipi;
++
++void __ipipe_register_mux_ipi(unsigned int irq)
++{
++      mux_ipi = irq;
++}
++
 +void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd)
 +{
 +      unsigned int ipi = IPIPE_CRITICAL_IPI;
@@ -2514,30 +2838,22 @@ index 0000000..9a5702a
 +      ipd->irqs[ipi].control = IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK;
 +}
 +
-+void __ipipe_register_ipi(unsigned int irq)
++static void do_ipi_demux(int irq, struct pt_regs *regs)
 +{
-+      __ipipe_ipi_irq = irq;
-+}
-+
-+static void __ipipe_ipi_demux(int irq, struct pt_regs *regs)
-+{
-+      struct irq_desc *desc = irq_to_desc(irq);
-+      int ipi, cpu = ipipe_processor_id();
-+
-+      desc->ipipe_ack(irq, desc);
-+
-+      kstat_incr_irq_this_cpu(irq);
-+
-+      while (per_cpu(ipipe_ipi_message, cpu).value & IPIPE_MSG_IPI_MASK) {
-+              for (ipi = IPIPE_MSG_CRITICAL_IPI; ipi <= 
IPIPE_MSG_RESCHEDULE_IPI; ++ipi) {
-+                      if (test_and_clear_bit(ipi, &per_cpu(ipipe_ipi_message, 
cpu).value)) {
++      int cpu __maybe_unused = ipipe_processor_id(), ipi;
++      
++      while (this_cpu_ptr(&ipipe_ipi_message)->value & IPIPE_MSG_IPI_MASK) {
++              for (ipi = IPIPE_MSG_CRITICAL_IPI;
++                   ipi <= IPIPE_MSG_RESCHEDULE_IPI; ++ipi) {
++                      if (test_and_clear_bit(ipi,
++                             &this_cpu_ptr(&ipipe_ipi_message)->value)) {
 +                              mb();
 +                              __ipipe_handle_irq(ipi + IPIPE_BASE_IPI_OFFSET, 
NULL);
 +                      }
 +              }
 +      }
 +
-+#ifdef CONFIG_DEBUGGER
++#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
 +      /*
 +       * The debugger IPI handler should be NMI-safe, so let's call
 +       * it immediately in case the IPI is pending.
@@ -2546,9 +2862,9 @@ index 0000000..9a5702a
 +              cpumask_clear_cpu(cpu, &__ipipe_dbrk_pending);
 +              debugger_ipi(regs);
 +      }
-+#endif /* CONFIG_DEBUGGER */
++#endif /* CONFIG_DEBUGGER || CONFIG_KEXEC */
 +
-+      ipipe_end_irq(irq);
++      __ipipe_finish_ipi_demux(irq);
 +}
 +
 +void ipipe_set_irq_affinity(unsigned int irq, cpumask_t cpumask)
@@ -2570,22 +2886,20 @@ index 0000000..9a5702a
 +
 +      flags = hard_local_irq_save();
 +
-+      ipi -= IPIPE_BASE_IPI_OFFSET;
-+      for_each_online_cpu(cpu) {
-+              if (cpumask_test_cpu(cpu, &cpumask))
-+                      set_bit(ipi, &per_cpu(ipipe_ipi_message, cpu).value);
-+      }
-+      mb();
-+
-+      if (unlikely(cpumask_empty(&cpumask)))
-+              goto out;
-+
 +      me = ipipe_processor_id();
++      ipi -= IPIPE_BASE_IPI_OFFSET;
 +      for_each_cpu(cpu, &cpumask) {
-+              if (cpu != me)
++              if (cpu == me)
++                      continue;
++              set_bit(ipi, &per_cpu(ipipe_ipi_message, cpu).value);
++              if (smp_ops->message_pass)
 +                      smp_ops->message_pass(cpu, PPC_MSG_IPIPE_DEMUX);
++#ifdef CONFIG_PPC_SMP_MUXED_IPI
++              else
++                      smp_muxed_ipi_message_pass(cpu, PPC_MSG_IPIPE_DEMUX);
++#endif
 +      }
-+out:
++
 +      hard_local_irq_restore(flags);
 +}
 +EXPORT_SYMBOL_GPL(ipipe_send_ipi);
@@ -2628,7 +2942,7 @@ index 0000000..9a5702a
 +}
 +EXPORT_SYMBOL_GPL(ipipe_test_root);
 +
-+#endif        /* CONFIG_SMP */
++#endif        /* !CONFIG_SMP */
 +
 +void __ipipe_early_core_setup(void)
 +{
@@ -2640,6 +2954,13 @@ index 0000000..9a5702a
 +       */
 +      virq = ipipe_alloc_virq();
 +      BUG_ON(virq != IPIPE_TIMER_VIRQ);
++      /*
++       * Although not all CPUs define the doorbell event, we always
++       * allocate the corresponding VIRQ, so that we can keep fixed
++       * values for all VIRQ numbers.
++       */
++      virq = ipipe_alloc_virq();
++      BUG_ON(virq != IPIPE_DOORBELL_VIRQ);
 +#ifdef CONFIG_SMP
 +      virq = ipipe_alloc_virq();
 +      BUG_ON(virq != IPIPE_CRITICAL_IPI);
@@ -2681,6 +3002,13 @@ index 0000000..9a5702a
 +                        __ipipe_do_timer, NULL,
 +                        NULL);
 +
++#ifdef CONFIG_PPC_DOORBELL
++      ipipe_request_irq(ipipe_root_domain,
++                        IPIPE_DOORBELL_VIRQ,
++                        __ipipe_do_doorbell, NULL,
++                        NULL);
++#endif
++      
 +      ipipe_critical_exit(flags);
 +}
 +
@@ -2741,10 +3069,13 @@ index 0000000..9a5702a
 +      if (likely(irq != NO_IRQ)) {
 +              ipipe_trace_irq_entry(irq);
 +#ifdef CONFIG_SMP
-+              /* Check for cascaded I-pipe IPIs */
-+              if (irq == __ipipe_ipi_irq)
-+                      __ipipe_ipi_demux(irq, regs);
-+              else
++              if (irq == mux_ipi) {
++                      struct irq_desc *desc = irq_to_desc(irq);
++                      desc->ipipe_ack(irq, desc);
++                      kstat_incr_irq_this_cpu(irq);
++                      do_ipi_demux(irq, regs);
++                      ipipe_end_irq(irq);
++              } else
 +#endif /* CONFIG_SMP */
 +                      __ipipe_handle_irq(irq, regs);
 +      }
@@ -2771,6 +3102,23 @@ index 0000000..9a5702a
 +      timer_interrupt(raw_cpu_ptr(&ipipe_percpu.tick_regs));
 +}
 +
++#ifdef CONFIG_PPC_DOORBELL
++
++int __ipipe_grab_doorbell(struct pt_regs *regs)
++{
++#ifdef CONFIG_SMP
++      do_ipi_demux(IPIPE_DOORBELL_VIRQ, regs);
++#endif
++      return __ipipe_exit_irq(regs);
++}
++
++static void __ipipe_do_doorbell(unsigned int irq, void *cookie)
++{
++      doorbell_exception(raw_cpu_ptr(&ipipe_percpu.tick_regs));
++}
++
++#endif
++
 +int __ipipe_grab_timer(struct pt_regs *regs)
 +{
 +      struct pt_regs *tick_regs;
@@ -2814,7 +3162,7 @@ index 0000000..9a5702a
 +#endif        /* !CONFIG_PPC64 */
 +#endif /* !CONFIG_IPIPE_LEGACY */
 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
-index 4509603..8d64765 100644
+index 4509603..c63ea4c 100644
 --- a/arch/powerpc/kernel/irq.c
 +++ b/arch/powerpc/kernel/irq.c
 @@ -94,6 +94,36 @@ extern int tau_interrupts(int);
@@ -2846,7 +3194,7 @@ index 4509603..8d64765 100644
 +static inline notrace int decrementer_check_overflow(void)
 +{
 +      u64 now = get_tb_or_rtc();
-+      u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
++      u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
 + 
 +      return now >= *next_tb;
 +}
@@ -2987,7 +3335,7 @@ index 4509603..8d64765 100644
  
        set_irq_regs(old_regs);
  }
-@@ -595,6 +613,19 @@ void exc_lvl_ctx_init(void)
+@@ -595,6 +613,21 @@ void exc_lvl_ctx_init(void)
  }
  #endif
  
@@ -2997,17 +3345,19 @@ index 4509603..8d64765 100644
 +
 +void irq_ctx_init(void) { }
 +
++#ifndef CONFIG_PREEMPT_RT_FULL
 +void do_softirq_own_stack(void)
 +{
 +      __do_softirq();
 +}
++#endif
 +
 +#else  /* !CONFIG_IPIPE */
 +
  struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
  struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
  
-@@ -632,6 +663,8 @@ void do_softirq_own_stack(void)
+@@ -632,6 +665,8 @@ void do_softirq_own_stack(void)
                set_bits(irqtp->flags, &curtp->flags);
  }
  
@@ -3300,7 +3650,7 @@ index c69671c..66ac366 100644
  #endif
  
 diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
-index ec9ec20..a17ba31 100644
+index ec9ec20..73ba6f2 100644
 --- a/arch/powerpc/kernel/smp.c
 +++ b/arch/powerpc/kernel/smp.c
 @@ -179,7 +179,7 @@ const char *smp_ipi_name[] = {
@@ -3312,7 +3662,7 @@ index ec9ec20..a17ba31 100644
  };
  
  /* optional function to request ipi, for controllers with >= 4 ipis */
-@@ -190,10 +190,10 @@ int smp_request_message_ipi(int virq, int msg)
+@@ -190,10 +190,9 @@ int smp_request_message_ipi(int virq, int msg)
        if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
                return -EINVAL;
        }
@@ -3322,12 +3672,36 @@ index ec9ec20..a17ba31 100644
 -      }
 +#ifdef CONFIG_IPIPE
 +      if (msg == PPC_MSG_DEBUGGER_BREAK)
-+              /* Piggyback the debugger IPI for the I-pipe. */
-+              __ipipe_register_ipi(virq);
++              __ipipe_register_mux_ipi(virq);
  #endif
        err = request_irq(virq, smp_ipi_action[msg],
                          IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
-@@ -314,8 +314,12 @@ void smp_send_debugger_break(void)
+@@ -262,6 +261,24 @@ irqreturn_t smp_ipi_demux(void)
+ 
+       return IRQ_HANDLED;
+ }
++
++#ifdef CONFIG_IPIPE
++
++void __ipipe_finish_ipi_demux(unsigned int irq)
++{
++      struct cpu_messages *info = this_cpu_ptr(&ipi_message);
++
++      /* Propagate remaining events to the root domain. */
++      if (info->messages)
++              __ipipe_handle_irq(irq, NULL);
++}
++
++#endif
++
++#elif defined(CONFIG_IPIPE)
++
++void __ipipe_finish_ipi_demux(unsigned int irq) { }
++
+ #endif /* CONFIG_PPC_SMP_MUXED_IPI */
+ 
+ static inline void do_message_pass(int cpu, int msg)
+@@ -314,8 +331,12 @@ void smp_send_debugger_break(void)
                return;
  
        for_each_online_cpu(cpu)
@@ -3341,7 +3715,7 @@ index ec9ec20..a17ba31 100644
  }
  #endif
  
-@@ -680,6 +684,9 @@ void start_secondary(void *unused)
+@@ -680,6 +701,9 @@ void start_secondary(void *unused)
        unsigned int cpu = smp_processor_id();
        int i, base;
  
@@ -4903,10 +5277,18 @@ index b83f325..782d29f 100644
 +      }
  }
 diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
-index fd16cb5..884694d 100644
+index fd16cb5..088f7a2 100644
 --- a/arch/powerpc/sysdev/fsl_msi.c
 +++ b/arch/powerpc/sysdev/fsl_msi.c
-@@ -316,7 +316,7 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data)
+@@ -19,6 +19,7 @@
+ #include <linux/of_platform.h>
+ #include <linux/interrupt.h>
+ #include <linux/seq_file.h>
++#include <linux/ipipe.h>
+ #include <sysdev/fsl_soc.h>
+ #include <asm/prom.h>
+ #include <asm/hw_irq.h>
+@@ -316,7 +317,7 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data)
                                msi_hwirq(msi_data, msir_index,
                                          intr_index + have_shift));
                if (cascade_irq != NO_IRQ) {
@@ -4915,7 +5297,7 @@ index fd16cb5..884694d 100644
                        ret = IRQ_HANDLED;
                }
                have_shift += intr_index + 1;
-@@ -326,6 +326,13 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data)
+@@ -326,6 +327,13 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data)
        return ret;
  }
  
@@ -4929,7 +5311,7 @@ index fd16cb5..884694d 100644
  static int fsl_of_msi_remove(struct platform_device *ofdev)
  {
        struct fsl_msi *msi = platform_get_drvdata(ofdev);
-@@ -379,12 +386,17 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, 
struct platform_device *dev,
+@@ -379,12 +387,17 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, 
struct platform_device *dev,
        cascade_data->virq = virt_msir;
        msi->cascade_array[irq_index] = cascade_data;
  
@@ -5118,7 +5500,7 @@ index c4828c0..fb9654a 100644
  
  unsigned int mpc8xx_get_irq(void)
 diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
-index b2b8447..687357f 100644
+index b2b8447..1f85b59 100644
 --- a/arch/powerpc/sysdev/mpic.c
 +++ b/arch/powerpc/sysdev/mpic.c
 @@ -29,6 +29,7 @@
@@ -5156,10 +5538,10 @@ index b2b8447..687357f 100644
 +{
 +      unsigned long flags;
 +
-+      spin_lock_irqsave(&mpic_lock, flags);
++      raw_spin_lock_irqsave(&mpic_lock, flags);
 +      ipipe_unlock_irq(d->irq);
 +      __mpic_unmask_irq(d);
-+      spin_unlock_irqrestore(&mpic_lock, flags);
++      raw_spin_unlock_irqrestore(&mpic_lock, flags);
 +}
 +
 +void __mpic_mask_irq(struct irq_data *d)
@@ -5174,10 +5556,10 @@ index b2b8447..687357f 100644
 +{
 +      unsigned long flags;
 +
-+      spin_lock_irqsave(&mpic_lock, flags);
++      raw_spin_lock_irqsave(&mpic_lock, flags);
 +      __mpic_mask_irq(d);
 +      ipipe_lock_irq(d->irq);
-+      spin_unlock_irqrestore(&mpic_lock, flags);
++      raw_spin_unlock_irqrestore(&mpic_lock, flags);
 +}
 +
  void mpic_end_irq(struct irq_data *d)
@@ -5194,19 +5576,19 @@ index b2b8447..687357f 100644
 +      struct mpic *mpic = mpic_from_irq_data(d);
 +      unsigned long flags;
 +
-+      spin_lock_irqsave(&mpic_lock, flags);
++      raw_spin_lock_irqsave(&mpic_lock, flags);
 +      mpic_eoi(mpic);
 +      __mpic_mask_irq(d);
-+      spin_unlock_irqrestore(&mpic_lock, flags);
++      raw_spin_unlock_irqrestore(&mpic_lock, flags);
 +}
 +
 +void mpic_release_irq(struct irq_data *d)
 +{
 +      unsigned long flags;
 +
-+      spin_lock_irqsave(&mpic_lock, flags);
++      raw_spin_lock_irqsave(&mpic_lock, flags);
 +      __mpic_unmask_irq(d);
-+      spin_unlock_irqrestore(&mpic_lock, flags);
++      raw_spin_unlock_irqrestore(&mpic_lock, flags);
 +}
 +
 +#endif
@@ -5249,9 +5631,9 @@ index b2b8447..687357f 100644
  
 -      DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src);
 +      DBG("%s: unmask_ipi: %d (ipi %d)\n", mpic->name, d->irq, src);
-+      spin_lock_irqsave(&mpic_lock, flags);
++      raw_spin_lock_irqsave(&mpic_lock, flags);
        mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
-+      spin_unlock_irqrestore(&mpic_lock, flags);
++      raw_spin_unlock_irqrestore(&mpic_lock, flags);
  }
  
  static void mpic_mask_ipi(struct irq_data *d)
@@ -5447,6 +5829,53 @@ index 7c37157..ca1d5c9 100644
  }
  
  static struct uic * __init uic_init_one(struct device_node *node)
+diff --git a/arch/powerpc/sysdev/xics/xics-common.c 
b/arch/powerpc/sysdev/xics/xics-common.c
+index 878a540..be519cd 100644
+--- a/arch/powerpc/sysdev/xics/xics-common.c
++++ b/arch/powerpc/sysdev/xics/xics-common.c
+@@ -20,6 +20,7 @@
+ #include <linux/of.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/ipipe.h>
+ 
+ #include <asm/prom.h>
+ #include <asm/io.h>
+@@ -132,6 +133,7 @@ static void xics_request_ipi(void)
+ 
+       ipi = irq_create_mapping(xics_host, XICS_IPI);
+       BUG_ON(ipi == NO_IRQ);
++      __ipipe_register_mux_ipi(ipi);
+ 
+       /*
+        * IPIs are marked IRQF_PERCPU. The handler was set in map.
+@@ -313,9 +315,26 @@ static int xics_host_match(struct irq_domain *h, struct 
device_node *node)
+ static void xics_ipi_unmask(struct irq_data *d) { }
+ static void xics_ipi_mask(struct irq_data *d) { }
+ 
++#ifdef CONFIG_IPIPE
++
++static struct irq_chip xics_ipi_chip;
++
++static void xics_ipi_hold(struct irq_data *d)
++{
++      xics_ipi_chip.irq_eoi(d);
++}
++
++static void xics_ipi_release(struct irq_data *d) { }
++
++#endif
++
+ static struct irq_chip xics_ipi_chip = {
+       .name = "XICS",
+       .irq_eoi = NULL, /* Patched at init time */
++#ifdef CONFIG_IPIPE
++      .irq_hold = xics_ipi_hold,
++      .irq_release = xics_ipi_release,
++#endif
+       .irq_mask = xics_ipi_mask,
+       .irq_unmask = xics_ipi_unmask,
+ };
 diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
 index e599259..fd55e35 100644
 --- a/arch/powerpc/xmon/xmon.c
@@ -5994,6 +6423,19 @@ index 0000000..1f6e9c3
 +static inline void __ipipe_init_threadinfo(struct ipipe_threadinfo *p) { }
 +
 +#endif /* !_IPIPE_THREAD_INFO_H */
+diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
+index 0e97856..b5d7e60 100644
+--- a/include/linux/basic_mmio_gpio.h
++++ b/include/linux/basic_mmio_gpio.h
+@@ -50,7 +50,7 @@ struct bgpio_chip {
+        * Used to lock bgpio_chip->data. Also, this is needed to keep
+        * shadowed and real data registers writes together.
+        */
+-      spinlock_t lock;
++      ipipe_spinlock_t lock;
+ 
+       /* Shadowed data register to clear/set bits safely. */
+       unsigned long data;
 diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
 index 96c280b..0baa8f1 100644
 --- a/include/linux/clockchips.h
@@ -6046,6 +6488,18 @@ index 901555a..1ba117c 100644
        int     (*read)(struct console *, char *, unsigned);
        struct tty_driver *(*device)(struct console *, int *);
        void    (*unblank)(void);
+diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
+index 6cd8c0e..0951ab4 100644
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -134,6 +134,7 @@ enum {
+       FTRACE_OPS_FL_ALLOC_TRAMP               = 1 << 12,
+       FTRACE_OPS_FL_IPMODIFY                  = 1 << 13,
+       FTRACE_OPS_FL_PID                       = 1 << 14,
++      FTRACE_OPS_FL_IPIPE_EXCLUSIVE           = 1 << 15,
+ };
+ 
+ #ifdef CONFIG_DYNAMIC_FTRACE
 diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
 index f4af034..fa16b8c 100644
 --- a/include/linux/hardirq.h
@@ -6556,10 +7010,10 @@ index 0000000..0a9b5b6
 +#endif        /* !__LINUX_IPIPE_H */
 diff --git a/include/linux/ipipe_base.h b/include/linux/ipipe_base.h
 new file mode 100644
-index 0000000..8c06eed
+index 0000000..42b368a
 --- /dev/null
 +++ b/include/linux/ipipe_base.h
-@@ -0,0 +1,360 @@
+@@ -0,0 +1,365 @@
 +/* -*- linux-c -*-
 + * include/linux/ipipe_base.h
 + *
@@ -6670,6 +7124,7 @@ index 0000000..8c06eed
 +#define IPIPE_KEVT_EXIT               4
 +#define IPIPE_KEVT_CLEANUP    5
 +#define IPIPE_KEVT_HOSTRT     6
++#define IPIPE_KEVT_CLOCKFREQ  7
 +
 +struct ipipe_vm_notifier {
 +      void (*handler)(struct ipipe_vm_notifier *nfy);
@@ -6798,6 +7253,9 @@ index 0000000..8c06eed
 +#define __ipipe_report_cleanup(mm)                                    \
 +      __ipipe_notify_kevent(IPIPE_KEVT_CLEANUP, mm)
 +
++#define __ipipe_report_clockfreq_update(freq)                         \
++      __ipipe_notify_kevent(IPIPE_KEVT_CLOCKFREQ, &(freq))
++
 +void __ipipe_notify_vm_preemption(void);
 +
 +void __ipipe_call_mayday(struct pt_regs *regs);
@@ -6817,7 +7275,8 @@ index 0000000..8c06eed
 +#define IPIPE_EVENT_EXIT      (IPIPE_FIRST_EVENT + 4)
 +#define IPIPE_EVENT_CLEANUP   (IPIPE_FIRST_EVENT + 5)
 +#define IPIPE_EVENT_HOSTRT    (IPIPE_FIRST_EVENT + 6)
-+#define IPIPE_EVENT_SYSCALL   (IPIPE_FIRST_EVENT + 7)
++#define IPIPE_EVENT_CLOCKFREQ (IPIPE_FIRST_EVENT + 7)
++#define IPIPE_EVENT_SYSCALL   (IPIPE_FIRST_EVENT + 8)
 +#define IPIPE_LAST_EVENT      IPIPE_EVENT_SYSCALL
 +#define IPIPE_NR_EVENTS               (IPIPE_LAST_EVENT + 1)
 +
@@ -7668,10 +8127,10 @@ index 0000000..d00c56b
 +#endif        /* !__LINUX_IPIPE_DOMAIN_H */
 diff --git a/include/linux/ipipe_lock.h b/include/linux/ipipe_lock.h
 new file mode 100644
-index 0000000..f71d2f1
+index 0000000..a108278
 --- /dev/null
 +++ b/include/linux/ipipe_lock.h
-@@ -0,0 +1,260 @@
+@@ -0,0 +1,327 @@
 +/*   -*- linux-c -*-
 + *   include/linux/ipipe_lock.h
 + *
@@ -7700,22 +8159,87 @@ index 0000000..f71d2f1
 +      arch_spinlock_t arch_lock;
 +} __ipipe_spinlock_t;
 +
++#define ipipe_spinlock(lock)  ((__ipipe_spinlock_t *)(lock))
 +#define ipipe_spinlock_p(lock)                                                
        \
 +      __builtin_types_compatible_p(typeof(lock), __ipipe_spinlock_t *) ||     
\
 +      __builtin_types_compatible_p(typeof(lock), __ipipe_spinlock_t [])
 +
++#define std_spinlock_raw(lock)        ((raw_spinlock_t *)(lock))
 +#define std_spinlock_raw_p(lock)                                      \
 +      __builtin_types_compatible_p(typeof(lock), raw_spinlock_t *) || \
 +      __builtin_types_compatible_p(typeof(lock), raw_spinlock_t [])
 +
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++#define PICK_SPINLOCK_IRQSAVE(lock, flags)                            \
++      do {                                                            \
++              if (ipipe_spinlock_p(lock))                             \
++                      (flags) = 
__ipipe_spin_lock_irqsave(ipipe_spinlock(lock)); \
++              else if (std_spinlock_raw_p(lock))                              
\
++                      __real_raw_spin_lock_irqsave(std_spinlock_raw(lock), 
flags); \
++              else __bad_lock_type();                                 \
++      } while (0)
++
++#define PICK_SPINTRYLOCK_IRQSAVE(lock, flags)                         \
++      ({                                                              \
++              int __ret__;                                            \
++              if (ipipe_spinlock_p(lock))                             \
++                      __ret__ = 
__ipipe_spin_trylock_irqsave(ipipe_spinlock(lock), &(flags)); \
++              else if (std_spinlock_raw_p(lock))                              
\
++                      __ret__ = 
__real_raw_spin_trylock_irqsave(std_spinlock_raw(lock), flags); \
++              else __bad_lock_type();                                 \
++              __ret__;                                                \
++       })
++
++#define PICK_SPINTRYLOCK_IRQ(lock)                                    \
++      ({                                                              \
++              int __ret__;                                            \
++              if (ipipe_spinlock_p(lock))                             \
++                      __ret__ = 
__ipipe_spin_trylock_irq(ipipe_spinlock(lock)); \
++              else if (std_spinlock_raw_p(lock))                              
\
++                      __ret__ = 
__real_raw_spin_trylock_irq(std_spinlock_raw(lock)); \
++              else __bad_lock_type();                                 \
++              __ret__;                                                \
++       })
++
++#define PICK_SPINUNLOCK_IRQRESTORE(lock, flags)                               
\
++      do {                                                            \
++              if (ipipe_spinlock_p(lock))                             \
++                      __ipipe_spin_unlock_irqrestore(ipipe_spinlock(lock), 
flags); \
++              else if (std_spinlock_raw_p(lock)) {                    \
++                      __ipipe_spin_unlock_debug(flags);               \
++                      
__real_raw_spin_unlock_irqrestore(std_spinlock_raw(lock), flags); \
++              } else __bad_lock_type();                               \
++      } while (0)
++
++#define PICK_SPINOP(op, lock)                                         \
++      ({                                                              \
++              if (ipipe_spinlock_p(lock))                             \
++                      arch_spin##op(&ipipe_spinlock(lock)->arch_lock); \
++              else if (std_spinlock_raw_p(lock))                      \
++                      __real_raw_spin##op(std_spinlock_raw(lock));    \
++              else __bad_lock_type();                                 \
++              (void)0;                                                \
++      })
++
++#define PICK_SPINOP_RET(op, lock, type)                                       
\
++      ({                                                              \
++              type __ret__;                                           \
++              if (ipipe_spinlock_p(lock))                             \
++                      __ret__ = 
arch_spin##op(&ipipe_spinlock(lock)->arch_lock); \
++              else if (std_spinlock_raw_p(lock))                      \
++                      __ret__ = __real_raw_spin##op(std_spinlock_raw(lock)); \
++              else { __ret__ = -1; __bad_lock_type(); }               \
++              __ret__;                                                \
++      })
++
++#else /* !CONFIG_PREEMPT_RT_FULL */
++
++#define std_spinlock(lock)    ((spinlock_t *)(lock))
 +#define std_spinlock_p(lock)                                          \
 +      __builtin_types_compatible_p(typeof(lock), spinlock_t *) ||     \
 +      __builtin_types_compatible_p(typeof(lock), spinlock_t [])
 +
-+#define ipipe_spinlock(lock)  ((__ipipe_spinlock_t *)(lock))
-+#define std_spinlock_raw(lock)        ((raw_spinlock_t *)(lock))
-+#define std_spinlock(lock)    ((spinlock_t *)(lock))
-+
 +#define PICK_SPINLOCK_IRQSAVE(lock, flags)                            \
 +      do {                                                            \
 +              if (ipipe_spinlock_p(lock))                             \
@@ -7791,6 +8315,8 @@ index 0000000..f71d2f1
 +              __ret__;                                                \
 +      })
 +
++#endif /* !CONFIG_PREEMPT_RT_FULL */
++
 +#define arch_spin_lock_init(lock)                                     \
 +      do {                                                            \
 +              IPIPE_DEFINE_SPINLOCK(__lock__);                        \
@@ -7920,7 +8446,7 @@ index 0000000..f71d2f1
 +#define __ipipe_spin_trylock_irq(lock)                1
 +#define __ipipe_spin_trylock_irqsave(lock, x) ({ (void)(x); 1; })
 +#define __ipipe_spin_unlock_irqrestore(lock, x)       do { (void)(x); } while 
(0)
-+#define __ipipe_spin_unlock_irqbegin(lock)    do { } while (0)
++#define __ipipe_spin_unlock_irqbegin(lock)    spin_unlock(lock)
 +#define __ipipe_spin_unlock_irqcomplete(x)    do { (void)(x); } while (0)
 +#define __ipipe_spin_unlock_debug(flags)      do { } while (0)
 +
@@ -7934,10 +8460,10 @@ index 0000000..f71d2f1
 +#endif /* !__LINUX_IPIPE_LOCK_H */
 diff --git a/include/linux/ipipe_tickdev.h b/include/linux/ipipe_tickdev.h
 new file mode 100644
-index 0000000..58a4142
+index 0000000..2f065ad
 --- /dev/null
 +++ b/include/linux/ipipe_tickdev.h
-@@ -0,0 +1,145 @@
+@@ -0,0 +1,148 @@
 +/* -*- linux-c -*-
 + * include/linux/ipipe_tickdev.h
 + *
@@ -8017,6 +8543,7 @@ index 0000000..58a4142
 +                            struct clock_event_device *cdev);
 +      int (*real_set_next_event)(unsigned long evt,
 +                                 struct clock_event_device *cdev);
++      unsigned int (*refresh_freq)(void);
 +};
 +
 +#define __ipipe_hrtimer_irq __ipipe_raw_cpu_read(ipipe_percpu.hrtimer_irq)
@@ -8069,6 +8596,8 @@ index 0000000..58a4142
 +
 +unsigned ipipe_timer_ns2ticks(struct ipipe_timer *timer, unsigned ns);
 +
++void __ipipe_timer_refresh_freq(unsigned int hrclock_freq);
++
 +#else /* !CONFIG_IPIPE */
 +
 +#define ipipe_host_timer_register(clkevt) do { } while (0)
@@ -9623,7 +10152,7 @@ index 0000000..797a849
 +}
 diff --git a/kernel/ipipe/core.c b/kernel/ipipe/core.c
 new file mode 100644
-index 0000000..3b07d65
+index 0000000..339fb95
 --- /dev/null
 +++ b/kernel/ipipe/core.c
 @@ -0,0 +1,1916 @@
@@ -10451,7 +10980,7 @@ index 0000000..3b07d65
 +      unsigned long flags, irq = 0;
 +      int ipos;
 +
-+      spin_lock_irqsave(&__ipipe_lock, flags);
++      raw_spin_lock_irqsave(&__ipipe_lock, flags);
 +
 +      if (__ipipe_virtual_irq_map != ~0) {
 +              ipos = ffz(__ipipe_virtual_irq_map);
@@ -10459,7 +10988,7 @@ index 0000000..3b07d65
 +              irq = ipos + IPIPE_VIRQ_BASE;
 +      }
 +
-+      spin_unlock_irqrestore(&__ipipe_lock, flags);
++      raw_spin_unlock_irqrestore(&__ipipe_lock, flags);
 +
 +      return irq;
 +}
@@ -10489,7 +11018,7 @@ index 0000000..3b07d65
 +          (irq >= IPIPE_NR_XIRQS && !ipipe_virtual_irq_p(irq)))
 +              return -EINVAL;
 +
-+      spin_lock_irqsave(&__ipipe_lock, flags);
++      raw_spin_lock_irqsave(&__ipipe_lock, flags);
 +
 +      if (ipd->irqs[irq].handler) {
 +              ret = -EBUSY;
@@ -10507,7 +11036,7 @@ index 0000000..3b07d65
 +      if (irq < IPIPE_NR_ROOT_IRQS)
 +              __ipipe_enable_irqdesc(ipd, irq);
 +out:
-+      spin_unlock_irqrestore(&__ipipe_lock, flags);
++      raw_spin_unlock_irqrestore(&__ipipe_lock, flags);
 +
 +      return ret;
 +}
@@ -10522,7 +11051,7 @@ index 0000000..3b07d65
 +      ipipe_root_only();
 +#endif /* CONFIG_IPIPE_LEGACY */
 +
-+      spin_lock_irqsave(&__ipipe_lock, flags);
++      raw_spin_lock_irqsave(&__ipipe_lock, flags);
 +
 +      if (ipd->irqs[irq].handler == NULL)
 +              goto out;
@@ -10535,7 +11064,7 @@ index 0000000..3b07d65
 +      if (irq < IPIPE_NR_ROOT_IRQS)
 +              __ipipe_disable_irqdesc(ipd, irq);
 +out:
-+      spin_unlock_irqrestore(&__ipipe_lock, flags);
++      raw_spin_unlock_irqrestore(&__ipipe_lock, flags);
 +}
 +EXPORT_SYMBOL_GPL(ipipe_free_irq);
 +
@@ -11178,7 +11707,7 @@ index 0000000..3b07d65
 +       * another CPU. Enter a spinning wait until he releases the
 +       * global lock.
 +       */
-+      spin_lock(&__ipipe_cpu_barrier);
++      raw_spin_lock(&__ipipe_cpu_barrier);
 +
 +      /* Got it. Now get out. */
 +
@@ -11188,7 +11717,7 @@ index 0000000..3b07d65
 +
 +      cpumask_set_cpu(cpu, &__ipipe_cpu_pass_map);
 +
-+      spin_unlock(&__ipipe_cpu_barrier);
++      raw_spin_unlock(&__ipipe_cpu_barrier);
 +
 +      cpumask_clear_cpu(cpu, &__ipipe_cpu_sync_map);
 +}
@@ -11221,7 +11750,7 @@ index 0000000..3b07d65
 +              }
 +restart:
 +              online = *cpu_online_mask;
-+              spin_lock(&__ipipe_cpu_barrier);
++              raw_spin_lock(&__ipipe_cpu_barrier);
 +
 +              __ipipe_cpu_sync = syncfn;
 +
@@ -11247,7 +11776,7 @@ index 0000000..3b07d65
 +                       */
 +                      __ipipe_cpu_sync = NULL;
 +
-+                      spin_unlock(&__ipipe_cpu_barrier);
++                      raw_spin_unlock(&__ipipe_cpu_barrier);
 +                      /*
 +                       * Ensure all CPUs consumed the IPI to avoid
 +                       * running __ipipe_cpu_sync prematurely. This
@@ -11277,7 +11806,7 @@ index 0000000..3b07d65
 +
 +#ifdef CONFIG_SMP
 +      if (atomic_dec_and_test(&__ipipe_critical_count)) {
-+              spin_unlock(&__ipipe_cpu_barrier);
++              raw_spin_unlock(&__ipipe_cpu_barrier);
 +              while (!cpumask_empty(&__ipipe_cpu_sync_map))
 +                      cpu_relax();
 +              cpumask_clear_cpu(ipipe_processor_id(), &__ipipe_cpu_lock_map);
@@ -11545,10 +12074,10 @@ index 0000000..3b07d65
 +#endif
 diff --git a/kernel/ipipe/timer.c b/kernel/ipipe/timer.c
 new file mode 100644
-index 0000000..73f748e
+index 0000000..a9917f4
 --- /dev/null
 +++ b/kernel/ipipe/timer.c
-@@ -0,0 +1,497 @@
+@@ -0,0 +1,520 @@
 +/* -*- linux-c -*-
 + * linux/kernel/ipipe/timer.c
 + *
@@ -11675,7 +12204,7 @@ index 0000000..73f748e
 +      if (timer->cpumask == NULL)
 +              timer->cpumask = cpumask_of(smp_processor_id());
 +
-+      spin_lock_irqsave(&lock, flags);
++      raw_spin_lock_irqsave(&lock, flags);
 +
 +      list_for_each_entry(t, &timers, link) {
 +              if (t->rating <= timer->rating) {
@@ -11685,7 +12214,7 @@ index 0000000..73f748e
 +      }
 +      list_add_tail(&timer->link, &timers);
 +  done:
-+      spin_unlock_irqrestore(&lock, flags);
++      raw_spin_unlock_irqrestore(&lock, flags);
 +}
 +
 +static void ipipe_timer_request_sync(void)
@@ -11708,18 +12237,14 @@ index 0000000..73f748e
 +      timer->request(timer, steal);
 +}
 +
-+/* Set up a timer as per-cpu timer for ipipe */
-+static void install_pcpu_timer(unsigned cpu, unsigned hrclock_freq,
-+                            struct ipipe_timer *t) {
-+      unsigned hrtimer_freq;
++static void config_pcpu_timer(struct ipipe_timer *t, unsigned hrclock_freq)
++{
 +      unsigned long long tmp;
++      unsigned hrtimer_freq;
 +
-+      if (__ipipe_hrtimer_freq == 0)
++      if (__ipipe_hrtimer_freq != t->freq)
 +              __ipipe_hrtimer_freq = t->freq;
 +
-+      per_cpu(ipipe_percpu.hrtimer_irq, cpu) = t->irq;
-+      per_cpu(percpu_timer, cpu) = t;
-+
 +      hrtimer_freq = t->freq;
 +      if (__ipipe_hrclock_freq > UINT_MAX)
 +              hrtimer_freq /= 1000;
@@ -11732,6 +12257,15 @@ index 0000000..73f748e
 +      t->c2t_frac = tmp;
 +}
 +
++/* Set up a timer as per-cpu timer for ipipe */
++static void install_pcpu_timer(unsigned cpu, unsigned hrclock_freq,
++                            struct ipipe_timer *t)
++{
++      per_cpu(ipipe_percpu.hrtimer_irq, cpu) = t->irq;
++      per_cpu(percpu_timer, cpu) = t;
++      config_pcpu_timer(t, hrclock_freq);
++}
++
 +static void select_root_only_timer(unsigned cpu, unsigned hrclock_khz,
 +                                 const struct cpumask *mask,
 +                                 struct ipipe_timer *t) {
@@ -11785,7 +12319,7 @@ index 0000000..73f748e
 +      } else
 +              hrclock_freq = __ipipe_hrclock_freq;
 +
-+      spin_lock_irqsave(&lock, flags);
++      raw_spin_lock_irqsave(&lock, flags);
 +
 +      /* First, choose timers for the CPUs handled by ipipe */
 +      for_each_cpu(cpu, mask) {
@@ -11825,7 +12359,7 @@ index 0000000..73f748e
 +              }
 +      }
 +
-+      spin_unlock_irqrestore(&lock, flags);
++      raw_spin_unlock_irqrestore(&lock, flags);
 +
 +      flags = ipipe_critical_enter(ipipe_timer_request_sync);
 +      ipipe_timer_request_sync();
@@ -11834,7 +12368,7 @@ index 0000000..73f748e
 +      return 0;
 +
 +err_remove_all:
-+      spin_unlock_irqrestore(&lock, flags);
++      raw_spin_unlock_irqrestore(&lock, flags);
 +
 +      for_each_cpu(cpu, mask) {
 +              per_cpu(ipipe_percpu.hrtimer_irq, cpu) = -1;
@@ -12046,9 +12580,27 @@ index 0000000..73f748e
 +}
 +
 +#endif /* CONFIG_IPIPE_HAVE_HOSTRT */
++
++int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
++                            bool force);
++
++void __ipipe_timer_refresh_freq(unsigned int hrclock_freq)
++{
++      struct ipipe_timer *t = __ipipe_raw_cpu_read(percpu_timer);
++      unsigned long flags;
++
++      if (t && t->refresh_freq) {
++              t->freq = t->refresh_freq();
++              flags = hard_local_irq_save();
++              config_pcpu_timer(t, hrclock_freq);
++              hard_local_irq_restore(flags);
++              clockevents_program_event(t->host_timer,
++                                        t->host_timer->next_event, false);
++      }
++}
 diff --git a/kernel/ipipe/tracer.c b/kernel/ipipe/tracer.c
 new file mode 100644
-index 0000000..da272c50
+index 0000000..1ae7bc2
 --- /dev/null
 +++ b/kernel/ipipe/tracer.c
 @@ -0,0 +1,1468 @@
@@ -12113,7 +12665,7 @@ index 0000000..da272c50
 +#define IPIPE_TFLG_CURRDOM_SHIFT    10         /* bits 10..11: current domain 
*/
 +#define IPIPE_TFLG_CURRDOM_MASK           0x0C00
 +#define IPIPE_TFLG_DOMSTATE_SHIFT   12         /* bits 12..15: domain 
stalled? */
-+#define IPIPE_TFLG_DOMSTATE_BITS    3
++#define IPIPE_TFLG_DOMSTATE_BITS    1
 +
 +#define IPIPE_TFLG_DOMAIN_STALLED(point, n) \
 +      (point->flags & (1 << (n + IPIPE_TFLG_DOMSTATE_SHIFT)))
@@ -12256,7 +12808,7 @@ index 0000000..da272c50
 +      if (length > per_cpu(trace_path, cpu)[per_cpu(max_path, cpu)].length) {
 +              /* we need protection here against other cpus trying
 +                 to start a proc dump */
-+              spin_lock(&global_path_lock);
++              raw_spin_lock(&global_path_lock);
 +
 +              /* active path holds new worst case */
 +              tp->length = length;
@@ -12265,7 +12817,7 @@ index 0000000..da272c50
 +              /* find next unused trace path */
 +              active = __ipipe_get_free_trace_path(active, cpu);
 +
-+              spin_unlock(&global_path_lock);
++              raw_spin_unlock(&global_path_lock);
 +
 +              tp = &per_cpu(trace_path, cpu)[active];
 +
@@ -12288,7 +12840,7 @@ index 0000000..da272c50
 +
 +      /* we need protection here against other cpus trying
 +       * to set their frozen path or to start a proc dump */
-+      spin_lock(&global_path_lock);
++      raw_spin_lock(&global_path_lock);
 +
 +      per_cpu(frozen_path, cpu) = active;
 +
@@ -12302,7 +12854,7 @@ index 0000000..da272c50
 +                      tp->end = -1;
 +      }
 +
-+      spin_unlock(&global_path_lock);
++      raw_spin_unlock(&global_path_lock);
 +
 +      tp = &per_cpu(trace_path, cpu)[active];
 +
@@ -12457,7 +13009,7 @@ index 0000000..da272c50
 +      int cpu;
 +      struct ipipe_trace_path *tp;
 +
-+      spin_lock_irqsave(&global_path_lock, flags);
++      raw_spin_lock_irqsave(&global_path_lock, flags);
 +
 +      cpu = ipipe_processor_id();
 + restart:
@@ -13386,7 +13938,7 @@ index 0000000..da272c50
 +
 +static struct ftrace_ops ipipe_trace_ops = {
 +      .func = ipipe_trace_function,
-+      .flags = FTRACE_OPS_FL_RECURSION_SAFE,
++      .flags = FTRACE_OPS_FL_IPIPE_EXCLUSIVE,
 +};
 +
 +static ssize_t __ipipe_wr_enable(struct file *file, const char __user *buffer,
@@ -14273,7 +14825,7 @@ index 2329daa..79cfe9b 100644
        if (pm_wakeup_pending()) {
                error = -EAGAIN;
 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
-index 3c1aca0..90b1189 100644
+index 3c1aca0..ae6b3d5 100644
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
 @@ -62,6 +62,9 @@ int console_printk[4] = {
@@ -14389,7 +14941,7 @@ index 3c1aca0..90b1189 100644
 +      goto start;
 +
 +      do {
-+              spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
++              raw_spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
 + start:
 +              lmax = __ipipe_printk_fill;
 +              while (out < lmax) {
@@ -14398,13 +14950,13 @@ index 3c1aca0..90b1189 100644
 +                      p += len;
 +                      out += len;
 +              }
-+              spin_lock_irqsave(&__ipipe_printk_lock, flags);
++              raw_spin_lock_irqsave(&__ipipe_printk_lock, flags);
 +      }
 +      while (__ipipe_printk_fill != lmax);
 +
 +      __ipipe_printk_fill = 0;
 +
-+      spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
++      raw_spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
 +}
 +
  /**
@@ -14442,7 +14994,7 @@ index 3c1aca0..90b1189 100644
 +              goto out;
 +      }
 +
-+      spin_lock_irqsave(&__ipipe_printk_lock, flags);
++      raw_spin_lock_irqsave(&__ipipe_printk_lock, flags);
 +
 +      oldcount = __ipipe_printk_fill;
 +      fbytes = __LOG_BUF_LEN - oldcount;
@@ -14453,7 +15005,7 @@ index 3c1aca0..90b1189 100644
 +      } else
 +              r = 0;
 +
-+      spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
++      raw_spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
 +
 +      if (oldcount == 0)
 +              ipipe_raise_irq(__ipipe_printk_virq);
@@ -15102,7 +15654,7 @@ index 3b9a48a..edd9470 100644
        help
          This option will modify all the calls to function tracing
 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index eb11011..5fa6c0a 100644
+index eb11011..d0957bd 100644
 --- a/kernel/trace/ftrace.c
 +++ b/kernel/trace/ftrace.c
 @@ -32,6 +32,7 @@
@@ -15113,7 +15665,33 @@ index eb11011..5fa6c0a 100644
  
  #include <trace/events/sched.h>
  
-@@ -2518,6 +2519,9 @@ void __weak arch_ftrace_update_code(int command)
+@@ -262,8 +263,17 @@ static ftrace_func_t ftrace_ops_get_list_func(struct 
ftrace_ops *ops)
+ 
+ static void update_ftrace_function(void)
+ {
++      struct ftrace_ops *ops;
+       ftrace_func_t func;
+ 
++      for (ops = ftrace_ops_list;
++           ops != &ftrace_list_end; ops = ops->next)
++              if (ops->flags & FTRACE_OPS_FL_IPIPE_EXCLUSIVE) {
++                      set_function_trace_op = ops;
++                      func = ops->func;
++                      goto set_pointers;
++              }
++
+       /*
+        * Prepare the ftrace_ops that the arch callback will use.
+        * If there's only one ftrace_ops registered, the ftrace_ops_list
+@@ -291,6 +301,7 @@ static void update_ftrace_function(void)
+ 
+       update_function_graph_func();
+ 
++  set_pointers:
+       /* If there's no change, then do nothing more here */
+       if (ftrace_trace_function == func)
+               return;
+@@ -2518,6 +2529,9 @@ void __weak arch_ftrace_update_code(int command)
  
  static void ftrace_run_update_code(int command)
  {
@@ -15123,7 +15701,7 @@ index eb11011..5fa6c0a 100644
        int ret;
  
        ret = ftrace_arch_code_modify_prepare();
-@@ -2531,7 +2535,13 @@ static void ftrace_run_update_code(int command)
+@@ -2531,7 +2545,13 @@ static void ftrace_run_update_code(int command)
         * is safe. The stop_machine() is the safest, but also
         * produces the most overhead.
         */
@@ -15137,7 +15715,7 @@ index eb11011..5fa6c0a 100644
  
        ret = ftrace_arch_code_modify_post_process();
        FTRACE_WARN_ON(ret);
-@@ -4877,10 +4887,10 @@ static int ftrace_process_locs(struct module *mod,
+@@ -4877,10 +4897,10 @@ static int ftrace_process_locs(struct module *mod,
         * reason to cause large interrupt latencies while we do it.
         */
        if (!mod)
@@ -15150,7 +15728,7 @@ index eb11011..5fa6c0a 100644
        ret = 0;
   out:
        mutex_unlock(&ftrace_lock);
-@@ -4979,9 +4989,11 @@ void __init ftrace_init(void)
+@@ -4979,9 +4999,11 @@ void __init ftrace_init(void)
        unsigned long count, flags;
        int ret;
  
@@ -15164,7 +15742,7 @@ index eb11011..5fa6c0a 100644
        if (ret)
                goto failed;
  
-@@ -5174,7 +5186,16 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long 
parent_ip,
+@@ -5174,7 +5196,16 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long 
parent_ip,
                }
        } while_for_each_ftrace_op(op);
  out:
@@ -15524,24 +16102,22 @@ index 86c8911..373a30b 100644
        return err;
  }
 diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
-index 1afec32..5803111 100644
+index 1afec32..f7c1a2a 100644
 --- a/lib/smp_processor_id.c
 +++ b/lib/smp_processor_id.c
-@@ -12,10 +12,13 @@ notrace static unsigned int 
check_preemption_disabled(const char *what1,
+@@ -12,6 +12,12 @@ notrace static unsigned int check_preemption_disabled(const 
char *what1,
  {
        int this_cpu = raw_smp_processor_id();
  
++      if (hard_irqs_disabled())
++              goto out;
++
 +      if (!ipipe_root_p)
 +              goto out;
 +
        if (likely(preempt_count()))
                goto out;
  
--      if (irqs_disabled())
-+      if (irqs_disabled() || hard_irqs_disabled())
-               goto out;
- 
-       /*
 diff --git a/mm/memory.c b/mm/memory.c
 index 2a9e098..46ec4cd 100644
 --- a/mm/memory.c
@@ -15749,7 +16325,7 @@ index 2a9e098..46ec4cd 100644
  
  static struct kmem_cache *page_ptl_cachep;
 diff --git a/mm/mlock.c b/mm/mlock.c
-index 3d3ee6c..ed47c08 100644
+index 3d3ee6ca..ed47c08 100644
 --- a/mm/mlock.c
 +++ b/mm/mlock.c
 @@ -756,3 +756,28 @@ void user_shm_unlock(size_t size, struct user_struct 
*user)
@@ -15884,3 +16460,16 @@ index 2faaa29..ef00f26 100644
        return nr;
  }
  
+diff --git a/sound/soc/intel/atom/sst/sst.c b/sound/soc/intel/atom/sst/sst.c
+index 96c2e42..2e6e3cf 100644
+--- a/sound/soc/intel/atom/sst/sst.c
++++ b/sound/soc/intel/atom/sst/sst.c
+@@ -369,7 +369,7 @@ static inline void sst_restore_shim64(struct intel_sst_drv 
*ctx,
+        */
+       spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
+       sst_shim_write64(shim, SST_IMRX, shim_regs->imrx),
+-      sst_shim_write64(shim, SST_CSR, shim_regs->csr),
++      sst_shim_write64(shim, SST_CSR, shim_regs->csr);
+       spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
+ }
+ 


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
https://xenomai.org/mailman/listinfo/xenomai-git

Reply via email to