Module: xenomai-3
Branch: master
Commit: feeaab12edd92ebc9580228406064c812e2fa5bd
URL:    
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=feeaab12edd92ebc9580228406064c812e2fa5bd

Author: Philippe Gerum <r...@xenomai.org>
Date:   Fri Mar 20 09:26:27 2015 +0100

cobalt/powerpc: upgrade I-pipe support

---

 ...-7.patch => ipipe-core-3.14.33-powerpc-7.patch} |   35 +++++---
 ...c-2.patch => ipipe-core-3.16.7-powerpc-3.patch} |   91 +++++++++++++-------
 2 files changed, 80 insertions(+), 46 deletions(-)

diff --git 
a/kernel/cobalt/arch/powerpc/patches/ipipe-core-3.14.28-powerpc-7.patch 
b/kernel/cobalt/arch/powerpc/patches/ipipe-core-3.14.33-powerpc-7.patch
similarity index 99%
rename from 
kernel/cobalt/arch/powerpc/patches/ipipe-core-3.14.28-powerpc-7.patch
rename to kernel/cobalt/arch/powerpc/patches/ipipe-core-3.14.33-powerpc-7.patch
index a9d7249..64a21f2 100644
--- a/kernel/cobalt/arch/powerpc/patches/ipipe-core-3.14.28-powerpc-7.patch
+++ b/kernel/cobalt/arch/powerpc/patches/ipipe-core-3.14.33-powerpc-7.patch
@@ -5468,10 +5468,10 @@ index 9203393..8b03381 100644
  
  static struct uic * __init uic_init_one(struct device_node *node)
 diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
-index bc5fbc2..41251db 100644
+index f89389f..1db42a0 100644
 --- a/arch/powerpc/xmon/xmon.c
 +++ b/arch/powerpc/xmon/xmon.c
-@@ -1452,7 +1452,7 @@ static void excprint(struct pt_regs *fp)
+@@ -1453,7 +1453,7 @@ static void excprint(struct pt_regs *fp)
        }
  
        printf("  current = 0x%lx\n", current);
@@ -5480,7 +5480,7 @@ index bc5fbc2..41251db 100644
        printf("  paca    = 0x%lx\t softe: %d\t irq_happened: 0x%02x\n",
               local_paca, local_paca->soft_enabled, local_paca->irq_happened);
  #endif
-@@ -2072,8 +2072,10 @@ static void dump_one_paca(int cpu)
+@@ -2073,8 +2073,10 @@ static void dump_one_paca(int cpu)
        DUMP(p, stab_rr, "lx");
        DUMP(p, saved_r1, "lx");
        DUMP(p, trap_save, "x");
@@ -13857,7 +13857,7 @@ index 452d6f2..7f20d7c 100644
  }
  
 diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
-index 001fa5b..4dad5c7 100644
+index 8a160e8..8a62f8b 100644
 --- a/kernel/irq/internals.h
 +++ b/kernel/irq/internals.h
 @@ -53,6 +53,9 @@ enum {
@@ -13871,7 +13871,7 @@ index 001fa5b..4dad5c7 100644
  
  #include "debug.h"
 diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
-index 8ab8e93..5261b2a 100644
+index 07d4551..dc0af3d 100644
 --- a/kernel/irq/irqdesc.c
 +++ b/kernel/irq/irqdesc.c
 @@ -91,6 +91,9 @@ static void desc_set_defaults(unsigned int irq, struct 
irq_desc *desc, int node,
@@ -13884,7 +13884,7 @@ index 8ab8e93..5261b2a 100644
  }
  
  int nr_irqs = NR_IRQS;
-@@ -270,11 +273,13 @@ int __init early_irq_init(void)
+@@ -286,11 +289,13 @@ int __init early_irq_init(void)
        return arch_early_irq_init();
  }
  
@@ -14724,7 +14724,7 @@ index 0de9d7f..7615cff 100644
  }
  
 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
-index 6558b7a..f0ddb74 100644
+index 8c08a6f..a21ec0f 100644
 --- a/kernel/time/tick-sched.c
 +++ b/kernel/time/tick-sched.c
 @@ -148,7 +148,7 @@ static void tick_sched_handle(struct tick_sched *ts, 
struct pt_regs *regs)
@@ -15068,7 +15068,7 @@ index 2aefbee..c3ec43f 100644
  }
  EXPORT_SYMBOL(trace_hardirqs_off_caller);
 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
-index a48abea..50cc0312 100644
+index a48abea..0e8ae4a 100644
 --- a/lib/Kconfig.debug
 +++ b/lib/Kconfig.debug
 @@ -312,6 +312,7 @@ config MAGIC_SYSRQ
@@ -15097,6 +15097,15 @@ index a48abea..50cc0312 100644
        ---help---
          Say Y here if you want to check for overflows of kernel, IRQ
          and exception stacks (if your archicture uses them). This
+@@ -1088,7 +1091,7 @@ menu "RCU Debugging"
+ 
+ config PROVE_RCU
+       bool "RCU debugging: prove RCU correctness"
+-      depends on PROVE_LOCKING
++      depends on PROVE_LOCKING && !IPIPE
+       default n
+       help
+        This feature enables lockdep extensions that check for correct
 diff --git a/lib/atomic64.c b/lib/atomic64.c
 index 08a4f06..15810f4 100644
 --- a/lib/atomic64.c
@@ -15288,7 +15297,7 @@ index 0862816..aacbf2df 100644
        help
          Transparent Hugepages allows the kernel to use huge pages and
 diff --git a/mm/memory.c b/mm/memory.c
-index 48d7365..e23fa86 100644
+index 7f30bea..100a209 100644
 --- a/mm/memory.c
 +++ b/mm/memory.c
 @@ -788,6 +788,34 @@ out:
@@ -15359,7 +15368,7 @@ index 48d7365..e23fa86 100644
                ptep_set_wrprotect(src_mm, addr, src_pte);
                pte = pte_wrprotect(pte);
        }
-@@ -888,13 +931,27 @@ int copy_pte_range(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
+@@ -888,13 +931,27 @@ static int copy_pte_range(struct mm_struct *dst_mm, 
struct mm_struct *src_mm,
        int progress = 0;
        int rss[NR_MM_COUNTERS];
        swp_entry_t entry = (swp_entry_t){0};
@@ -15527,7 +15536,7 @@ index b1eb536..ca23f83 100644
 +}
 +#endif
 diff --git a/mm/mmap.c b/mm/mmap.c
-index b91ac80..47ee8c0 100644
+index 085bcd8..ef56280 100644
 --- a/mm/mmap.c
 +++ b/mm/mmap.c
 @@ -45,6 +45,10 @@
@@ -15541,7 +15550,7 @@ index b91ac80..47ee8c0 100644
  #ifndef arch_mmap_check
  #define arch_mmap_check(addr, len, flags)     (0)
  #endif
-@@ -2613,7 +2617,7 @@ static unsigned long do_brk(unsigned long addr, unsigned 
long len)
+@@ -2616,7 +2620,7 @@ static unsigned long do_brk(unsigned long addr, unsigned 
long len)
  
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
  
@@ -15592,7 +15601,7 @@ index 769a67a..8c40894 100644
  
        return pages;
 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index 0fdf968..4a96f40 100644
+index aa3891e..5c756384 100644
 --- a/mm/vmalloc.c
 +++ b/mm/vmalloc.c
 @@ -191,6 +191,8 @@ static int vmap_page_range_noflush(unsigned long start, 
unsigned long end,
diff --git a/kernel/cobalt/arch/powerpc/patches/ipipe-core-3.16-powerpc-2.patch 
b/kernel/cobalt/arch/powerpc/patches/ipipe-core-3.16.7-powerpc-3.patch
similarity index 99%
rename from kernel/cobalt/arch/powerpc/patches/ipipe-core-3.16-powerpc-2.patch
rename to kernel/cobalt/arch/powerpc/patches/ipipe-core-3.16.7-powerpc-3.patch
index 72cf890..b67c921 100644
--- a/kernel/cobalt/arch/powerpc/patches/ipipe-core-3.16-powerpc-2.patch
+++ b/kernel/cobalt/arch/powerpc/patches/ipipe-core-3.16.7-powerpc-3.patch
@@ -341,7 +341,7 @@ index 10be1dd..cfad863 100644
   * or should we not care like we do now ? --BenH.
 diff --git a/arch/powerpc/include/asm/ipipe.h 
b/arch/powerpc/include/asm/ipipe.h
 new file mode 100644
-index 0000000..ac0d5fb
+index 0000000..5e12457
 --- /dev/null
 +++ b/arch/powerpc/include/asm/ipipe.h
 @@ -0,0 +1,157 @@
@@ -386,7 +386,7 @@ index 0000000..ac0d5fb
 +#include <linux/cache.h>
 +#include <linux/threads.h>
 +
-+#define IPIPE_CORE_RELEASE    2
++#define IPIPE_CORE_RELEASE    3
 +
 +struct ipipe_domain;
 +
@@ -3928,7 +3928,7 @@ index 115347f..dce0f48 100644
  
  /*
 diff --git a/arch/powerpc/mm/hash_native_64.c 
b/arch/powerpc/mm/hash_native_64.c
-index cf1d325..113329b 100644
+index afc0a82..37edc4b 100644
 --- a/arch/powerpc/mm/hash_native_64.c
 +++ b/arch/powerpc/mm/hash_native_64.c
 @@ -192,7 +192,7 @@ static long native_hpte_insert(unsigned long hpte_group, 
unsigned long vpn,
@@ -4684,7 +4684,7 @@ index 5f3b232..284d267 100644
        u64 thread_id;
        unsigned long ipi_debug_brk_mask;
 diff --git a/arch/powerpc/platforms/pseries/lpar.c 
b/arch/powerpc/platforms/pseries/lpar.c
-index b02af9e..82751da 100644
+index ccf6f16..ca49de2 100644
 --- a/arch/powerpc/platforms/pseries/lpar.c
 +++ b/arch/powerpc/platforms/pseries/lpar.c
 @@ -184,7 +184,7 @@ static long pSeries_lpar_hpte_insert(unsigned long 
hpte_group,
@@ -5909,7 +5909,7 @@ index a16b497..fa578e8 100644
  
  /*
 diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
-index 404a686..605479f 100644
+index 721de25..c4a10c8 100644
 --- a/include/linux/ftrace.h
 +++ b/include/linux/ftrace.h
 @@ -101,6 +101,7 @@ enum {
@@ -5919,7 +5919,7 @@ index 404a686..605479f 100644
 +      FTRACE_OPS_FL_IPIPE_EXCLUSIVE           = 1 << 9,
  };
  
- /*
+ #ifdef CONFIG_DYNAMIC_FTRACE
 diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
 index cba442e..b513a46 100644
 --- a/include/linux/hardirq.h
@@ -8366,7 +8366,7 @@ index 5b9b84b..6c8bb4d 100644
  static inline void __raw_read_lock(rwlock_t *lock)
  {
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 0376b05..82b6b3e 100644
+index c5cc872..6cdf1f7 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -26,6 +26,7 @@ struct sched_param {
@@ -8776,7 +8776,7 @@ index 36fb3b5..3b5e2dc6 100644
   * and other sensitive information are never written to disk.
   */
 diff --git a/init/Kconfig b/init/Kconfig
-index 9d76b99..0393673 100644
+index 35685a4..b533ab1 100644
 --- a/init/Kconfig
 +++ b/init/Kconfig
 @@ -69,6 +69,7 @@ config COMPILE_TEST
@@ -9007,7 +9007,7 @@ index e5c4668..5e6e8bd 100644
        if (unlikely(in_atomic()))
                printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
 diff --git a/kernel/fork.c b/kernel/fork.c
-index 6a13c46..7959178 100644
+index b41958b..46d23fe 100644
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
 @@ -317,6 +317,8 @@ static struct task_struct *dup_task_struct(struct 
task_struct *orig)
@@ -13986,7 +13986,7 @@ index 4b082b5..67447fc 100644
   * The __lock_function inlines are taken from
   * include/linux/spinlock_api_smp.h
 diff --git a/kernel/module.c b/kernel/module.c
-index 81e727c..49ef05f 100644
+index 673aeb0..a607c5d 100644
 --- a/kernel/module.c
 +++ b/kernel/module.c
 @@ -879,7 +879,7 @@ static inline void print_unload_info(struct seq_file *m, 
struct module *mod)
@@ -14126,7 +14126,7 @@ index fcc2611..92c110f 100644
        if (pm_wakeup_pending()) {
                error = -EAGAIN;
 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
-index 13e839d..4b678cd 100644
+index 971285d..39e1ddb 100644
 --- a/kernel/printk/printk.c
 +++ b/kernel/printk/printk.c
 @@ -1756,6 +1756,43 @@ asmlinkage int printk_emit(int facility, int level,
@@ -14243,7 +14243,7 @@ index 13e839d..4b678cd 100644
  
  #else /* CONFIG_PRINTK */
 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index bc1638b..c44802b 100644
+index 0acf96b..1e5c105 100644
 --- a/kernel/sched/core.c
 +++ b/kernel/sched/core.c
 @@ -1586,7 +1586,9 @@ void scheduler_ipi(void)
@@ -14411,7 +14411,7 @@ index bc1638b..c44802b 100644
  
        if (running)
                p->sched_class->set_curr_task(rq);
-@@ -4622,10 +4649,13 @@ int set_cpus_allowed_ptr(struct task_struct *p, const 
struct cpumask *new_mask)
+@@ -4623,10 +4650,13 @@ int set_cpus_allowed_ptr(struct task_struct *p, const 
struct cpumask *new_mask)
        do_set_cpus_allowed(p, new_mask);
  
        /* Can the task run on the task's current CPU? If so, we're done */
@@ -14426,7 +14426,7 @@ index bc1638b..c44802b 100644
        if (p->on_rq) {
                struct migration_arg arg = { p, dest_cpu };
                /* Need help from migration thread: drop lock and wait. */
-@@ -8094,3 +8124,42 @@ void dump_cpu_task(int cpu)
+@@ -8095,3 +8125,42 @@ void dump_cpu_task(int cpu)
        pr_info("Task dump for CPU %d:\n", cpu);
        sched_show_task(cpu_curr(cpu));
  }
@@ -14753,7 +14753,7 @@ index d440935..7958b28 100644
        help
          This option will modify all the calls to function tracing
 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index ac9d1da..d0b0bce 100644
+index ca167e6..ca49007 100644
 --- a/kernel/trace/ftrace.c
 +++ b/kernel/trace/ftrace.c
 @@ -32,6 +32,7 @@
@@ -14764,7 +14764,7 @@ index ac9d1da..d0b0bce 100644
  
  #include <trace/events/sched.h>
  
-@@ -244,8 +245,17 @@ static inline void update_function_graph_func(void) { }
+@@ -252,8 +253,17 @@ static inline void update_function_graph_func(void) { }
  
  static void update_ftrace_function(void)
  {
@@ -14782,7 +14782,7 @@ index ac9d1da..d0b0bce 100644
        /*
         * If we are at the end of the list and this ops is
         * recursion safe and not dynamic and the arch supports passing ops,
-@@ -2025,6 +2035,9 @@ void __weak arch_ftrace_update_code(int command)
+@@ -2069,6 +2079,9 @@ void __weak arch_ftrace_update_code(int command)
  
  static void ftrace_run_update_code(int command)
  {
@@ -14792,7 +14792,7 @@ index ac9d1da..d0b0bce 100644
        int ret;
  
        ret = ftrace_arch_code_modify_prepare();
-@@ -2043,7 +2056,13 @@ static void ftrace_run_update_code(int command)
+@@ -2087,7 +2100,13 @@ static void ftrace_run_update_code(int command)
         * is safe. The stop_machine() is the safest, but also
         * produces the most overhead.
         */
@@ -14806,7 +14806,7 @@ index ac9d1da..d0b0bce 100644
  
        function_trace_stop--;
  
-@@ -4196,10 +4215,10 @@ static int ftrace_process_locs(struct module *mod,
+@@ -4240,10 +4259,10 @@ static int ftrace_process_locs(struct module *mod,
         * reason to cause large interrupt latencies while we do it.
         */
        if (!mod)
@@ -14819,7 +14819,7 @@ index ac9d1da..d0b0bce 100644
        ret = 0;
   out:
        mutex_unlock(&ftrace_lock);
-@@ -4298,9 +4317,11 @@ void __init ftrace_init(void)
+@@ -4342,9 +4361,11 @@ void __init ftrace_init(void)
        unsigned long count, flags;
        int ret;
  
@@ -14834,10 +14834,10 @@ index ac9d1da..d0b0bce 100644
                goto failed;
  
 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
-index ff70271..c7ded52 100644
+index 5186298..bc3c286 100644
 --- a/kernel/trace/ring_buffer.c
 +++ b/kernel/trace/ring_buffer.c
-@@ -2640,7 +2640,8 @@ static DEFINE_PER_CPU(unsigned int, current_context);
+@@ -2654,7 +2654,8 @@ static DEFINE_PER_CPU(unsigned int, current_context);
  
  static __always_inline int trace_recursive_lock(void)
  {
@@ -14847,7 +14847,7 @@ index ff70271..c7ded52 100644
        int bit;
  
        if (in_interrupt()) {
-@@ -2653,22 +2654,35 @@ static __always_inline int trace_recursive_lock(void)
+@@ -2667,22 +2668,35 @@ static __always_inline int trace_recursive_lock(void)
        } else
                bit = 3;
  
@@ -15225,10 +15225,10 @@ index 3e9977a..be95cbe 100644
        help
          Transparent Hugepages allows the kernel to use huge pages and
 diff --git a/mm/memory.c b/mm/memory.c
-index 8b44f76..a2880d1 100644
+index 533023d..5b2c5d4 100644
 --- a/mm/memory.c
 +++ b/mm/memory.c
-@@ -794,6 +794,34 @@ out:
+@@ -793,6 +793,34 @@ out:
        return pfn_to_page(pfn);
  }
  
@@ -15263,7 +15263,7 @@ index 8b44f76..a2880d1 100644
  /*
   * copy one vm_area from one task to the other. Assumes the page tables
   * already present in the new task to be cleared in the whole range
-@@ -802,8 +830,8 @@ out:
+@@ -801,8 +829,8 @@ out:
  
  static inline unsigned long
  copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
@@ -15274,7 +15274,7 @@ index 8b44f76..a2880d1 100644
  {
        unsigned long vm_flags = vma->vm_flags;
        pte_t pte = *src_pte;
-@@ -857,6 +885,21 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct 
*src_mm,
+@@ -856,6 +884,21 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct 
*src_mm,
         * in the parent and the child
         */
        if (is_cow_mapping(vm_flags)) {
@@ -15296,7 +15296,7 @@ index 8b44f76..a2880d1 100644
                ptep_set_wrprotect(src_mm, addr, src_pte);
                pte = pte_wrprotect(pte);
        }
-@@ -894,13 +937,27 @@ int copy_pte_range(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
+@@ -893,13 +936,27 @@ int copy_pte_range(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
        int progress = 0;
        int rss[NR_MM_COUNTERS];
        swp_entry_t entry = (swp_entry_t){0};
@@ -15326,7 +15326,7 @@ index 8b44f76..a2880d1 100644
        src_pte = pte_offset_map(src_pmd, addr);
        src_ptl = pte_lockptr(src_mm, src_pmd);
        spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
-@@ -923,8 +980,25 @@ again:
+@@ -922,8 +979,25 @@ again:
                        progress++;
                        continue;
                }
@@ -15353,7 +15353,7 @@ index 8b44f76..a2880d1 100644
                if (entry.val)
                        break;
                progress += 8;
-@@ -1959,34 +2033,6 @@ static inline int pte_unmap_same(struct mm_struct *mm, 
pmd_t *pmd,
+@@ -1958,34 +2032,6 @@ static inline int pte_unmap_same(struct mm_struct *mm, 
pmd_t *pmd,
        return same;
  }
  
@@ -15388,7 +15388,7 @@ index 8b44f76..a2880d1 100644
  /*
   * Notify the address space that the page is about to become writable so that
   * it can prohibit this or wait for the page to get into an appropriate state.
-@@ -3804,6 +3850,41 @@ void copy_user_huge_page(struct page *dst, struct page 
*src,
+@@ -3803,6 +3849,41 @@ void copy_user_huge_page(struct page *dst, struct page 
*src,
  }
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
  
@@ -15512,10 +15512,35 @@ index f802c2d..b320432 100644
  #ifdef finish_arch_post_lock_switch
        finish_arch_post_lock_switch();
 diff --git a/mm/mprotect.c b/mm/mprotect.c
-index c43d557..a9bb4c2 100644
+index c43d557..54e21a6 100644
 --- a/mm/mprotect.c
 +++ b/mm/mprotect.c
-@@ -252,6 +252,12 @@ unsigned long change_protection(struct vm_area_struct 
*vma, unsigned long start,
+@@ -71,7 +71,7 @@ static unsigned long change_pte_range(struct vm_area_struct 
*vma, pmd_t *pmd,
+       struct mm_struct *mm = vma->vm_mm;
+       pte_t *pte, oldpte;
+       spinlock_t *ptl;
+-      unsigned long pages = 0;
++      unsigned long pages = 0, flags;
+ 
+       pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
+       if (!pte)
+@@ -85,6 +85,7 @@ static unsigned long change_pte_range(struct vm_area_struct 
*vma, pmd_t *pmd,
+                       bool updated = false;
+ 
+                       if (!prot_numa) {
++                              flags = hard_local_irq_save();
+                               ptent = ptep_modify_prot_start(mm, addr, pte);
+                               if (pte_numa(ptent))
+                                       ptent = pte_mknonnuma(ptent);
+@@ -96,6 +97,7 @@ static unsigned long change_pte_range(struct vm_area_struct 
*vma, pmd_t *pmd,
+                               if (dirty_accountable && pte_dirty(ptent))
+                                       ptent = pte_mkwrite(ptent);
+                               ptep_modify_prot_commit(mm, addr, pte, ptent);
++                              hard_local_irq_restore(flags);
+                               updated = true;
+                       } else {
+                               struct page *page;
+@@ -252,6 +254,12 @@ unsigned long change_protection(struct vm_area_struct 
*vma, unsigned long start,
                pages = hugetlb_change_protection(vma, start, end, newprot);
        else
                pages = change_protection_range(vma, start, end, newprot, 
dirty_accountable, prot_numa);


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to