commit b3bbd4851db6ea6934e82dbbf902bf9239601eae
Author: Jacek Konieczny <[email protected]>
Date:   Tue Oct 9 22:53:39 2018 +0200

    RT patch update

 kernel-rt.patch | 16180 +++++++++++++++++++++++++++++-------------------------
 kernel.spec     |     2 +-
 2 files changed, 8605 insertions(+), 7577 deletions(-)
---
diff --git a/kernel.spec b/kernel.spec
index 7b297fca..1071cc71 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -209,7 +209,7 @@ Patch146:   kernel-aufs4+vserver.patch
 Patch250:      kernel-fix_256colors_menuconfig.patch
 
 # https://rt.wiki.kernel.org/
-# 
https://www.kernel.org/pub/linux/kernel/projects/rt/4.14/patch-4.14.63-rt40.patch.xz
 with small updates
+# 
https://www.kernel.org/pub/linux/kernel/projects/rt/4.14/patch-4.14.71-rt44.patch.xz
 with small updates
 Patch500:      kernel-rt.patch
 
 Patch2000:     kernel-small_fixes.patch
diff --git a/kernel-rt.patch b/kernel-rt.patch
index 445d59e4..47d57e4b 100644
--- a/kernel-rt.patch
+++ b/kernel-rt.patch
@@ -1,3820 +1,46 @@
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/alpha/include/asm/spinlock_types.h 
linux-4.14/arch/alpha/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/alpha/include/asm/spinlock_types.h    2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/alpha/include/asm/spinlock_types.h 2018-09-05 
11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef _ALPHA_SPINLOCK_TYPES_H
- #define _ALPHA_SPINLOCK_TYPES_H
+diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
+index 2cc08d4a326e..e28f7f29f2b3 100644
+--- a/Documentation/trace/events.txt
++++ b/Documentation/trace/events.txt
+@@ -517,1550 +517,4 @@ The following commands are supported:
+   totals derived from one or more trace event format fields and/or
+   event counts (hitcount).
  
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
+-  The format of a hist trigger is as follows:
 -
- typedef struct {
-       volatile unsigned int lock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/include/asm/irq.h 
linux-4.14/arch/arm/include/asm/irq.h
---- linux-4.14.orig/arch/arm/include/asm/irq.h 2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/arch/arm/include/asm/irq.h      2018-09-05 11:05:07.000000000 
+0200
-@@ -23,6 +23,8 @@
- #endif
- 
- #ifndef __ASSEMBLY__
-+#include <linux/cpumask.h>
-+
- struct irqaction;
- struct pt_regs;
- extern void migrate_irqs(void);
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/arm/include/asm/spinlock_types.h 
linux-4.14/arch/arm/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/arm/include/asm/spinlock_types.h      2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/include/asm/spinlock_types.h   2018-09-05 
11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
- 
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
+-        hist:keys=<field1[,field2,...]>[:values=<field1[,field2,...]>]
+-          [:sort=<field1[,field2,...]>][:size=#entries][:pause][:continue]
+-          [:clear][:name=histname1] [if <filter>]
 -
- #define TICKET_SHIFT  16
- 
- typedef struct {
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/arm/include/asm/switch_to.h 
linux-4.14/arch/arm/include/asm/switch_to.h
---- linux-4.14.orig/arch/arm/include/asm/switch_to.h   2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/include/asm/switch_to.h        2018-09-05 
11:05:07.000000000 +0200
-@@ -4,6 +4,13 @@
- 
- #include <linux/thread_info.h>
- 
-+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
-+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
-+#else
-+static inline void
-+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
-+#endif
-+
- /*
-  * For v7 SMP cores running a preemptible kernel we may be pre-empted
-  * during a TLB maintenance operation, so execute an inner-shareable dsb
-@@ -26,6 +33,7 @@
- #define switch_to(prev,next,last)                                     \
- do {                                                                  \
-       __complete_pending_tlbi();                                      \
-+      switch_kmaps(prev, next);                                       \
-       last = __switch_to(prev,task_thread_info(prev), 
task_thread_info(next));        \
- } while (0)
- 
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/arm/include/asm/thread_info.h 
linux-4.14/arch/arm/include/asm/thread_info.h
---- linux-4.14.orig/arch/arm/include/asm/thread_info.h 2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/include/asm/thread_info.h      2018-09-05 
11:05:07.000000000 +0200
-@@ -49,6 +49,7 @@
- struct thread_info {
-       unsigned long           flags;          /* low level flags */
-       int                     preempt_count;  /* 0 => preemptable, <0 => bug 
*/
-+      int                     preempt_lazy_count; /* 0 => preemptable, <0 => 
bug */
-       mm_segment_t            addr_limit;     /* address limit */
-       struct task_struct      *task;          /* main task structure */
-       __u32                   cpu;            /* cpu */
-@@ -142,7 +143,8 @@
- #define TIF_SYSCALL_TRACE     4       /* syscall trace active */
- #define TIF_SYSCALL_AUDIT     5       /* syscall auditing active */
- #define TIF_SYSCALL_TRACEPOINT        6       /* syscall tracepoint 
instrumentation */
--#define TIF_SECCOMP           7       /* seccomp syscall filtering active */
-+#define TIF_SECCOMP           8       /* seccomp syscall filtering active */
-+#define TIF_NEED_RESCHED_LAZY 7
- 
- #define TIF_NOHZ              12      /* in adaptive nohz mode */
- #define TIF_USING_IWMMXT      17
-@@ -152,6 +154,7 @@
- #define _TIF_SIGPENDING               (1 << TIF_SIGPENDING)
- #define _TIF_NEED_RESCHED     (1 << TIF_NEED_RESCHED)
- #define _TIF_NOTIFY_RESUME    (1 << TIF_NOTIFY_RESUME)
-+#define _TIF_NEED_RESCHED_LAZY        (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_UPROBE           (1 << TIF_UPROBE)
- #define _TIF_SYSCALL_TRACE    (1 << TIF_SYSCALL_TRACE)
- #define _TIF_SYSCALL_AUDIT    (1 << TIF_SYSCALL_AUDIT)
-@@ -167,7 +170,8 @@
-  * Change these and you break ASM code in entry-common.S
-  */
- #define _TIF_WORK_MASK                (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
--                               _TIF_NOTIFY_RESUME | _TIF_UPROBE)
-+                               _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
-+                               _TIF_NEED_RESCHED_LAZY)
- 
- #endif /* __KERNEL__ */
- #endif /* __ASM_ARM_THREAD_INFO_H */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/Kconfig 
linux-4.14/arch/arm/Kconfig
---- linux-4.14.orig/arch/arm/Kconfig   2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/Kconfig        2018-09-05 11:05:07.000000000 +0200
-@@ -45,7 +45,7 @@
-       select HARDIRQS_SW_RESEND
-       select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
-       select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
--      select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
-+      select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU 
&& !PREEMPT_RT_BASE
-       select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
-       select HAVE_ARCH_MMAP_RND_BITS if MMU
-       select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
-@@ -85,6 +85,7 @@
-       select HAVE_PERF_EVENTS
-       select HAVE_PERF_REGS
-       select HAVE_PERF_USER_STACK_DUMP
-+      select HAVE_PREEMPT_LAZY
-       select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
-       select HAVE_REGS_AND_STACK_ACCESS_API
-       select HAVE_SYSCALL_TRACEPOINTS
-@@ -2164,7 +2165,7 @@
- 
- config KERNEL_MODE_NEON
-       bool "Support for NEON in kernel mode"
--      depends on NEON && AEABI
-+      depends on NEON && AEABI && !PREEMPT_RT_BASE
-       help
-         Say Y to include support for NEON in kernel mode.
- 
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/asm-offsets.c 
linux-4.14/arch/arm/kernel/asm-offsets.c
---- linux-4.14.orig/arch/arm/kernel/asm-offsets.c      2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/kernel/asm-offsets.c   2018-09-05 11:05:07.000000000 
+0200
-@@ -65,6 +65,7 @@
-   BLANK();
-   DEFINE(TI_FLAGS,            offsetof(struct thread_info, flags));
-   DEFINE(TI_PREEMPT,          offsetof(struct thread_info, preempt_count));
-+  DEFINE(TI_PREEMPT_LAZY,     offsetof(struct thread_info, 
preempt_lazy_count));
-   DEFINE(TI_ADDR_LIMIT,               offsetof(struct thread_info, 
addr_limit));
-   DEFINE(TI_TASK,             offsetof(struct thread_info, task));
-   DEFINE(TI_CPU,              offsetof(struct thread_info, cpu));
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/entry-armv.S 
linux-4.14/arch/arm/kernel/entry-armv.S
---- linux-4.14.orig/arch/arm/kernel/entry-armv.S       2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/kernel/entry-armv.S    2018-09-05 11:05:07.000000000 
+0200
-@@ -220,11 +220,18 @@
- 
- #ifdef CONFIG_PREEMPT
-       ldr     r8, [tsk, #TI_PREEMPT]          @ get preempt count
--      ldr     r0, [tsk, #TI_FLAGS]            @ get flags
-       teq     r8, #0                          @ if preempt count != 0
-+      bne     1f                              @ return from exeption
-+      ldr     r0, [tsk, #TI_FLAGS]            @ get flags
-+      tst     r0, #_TIF_NEED_RESCHED          @ if NEED_RESCHED is set
-+      blne    svc_preempt                     @ preempt!
-+
-+      ldr     r8, [tsk, #TI_PREEMPT_LAZY]     @ get preempt lazy count
-+      teq     r8, #0                          @ if preempt lazy count != 0
-       movne   r0, #0                          @ force flags to 0
--      tst     r0, #_TIF_NEED_RESCHED
-+      tst     r0, #_TIF_NEED_RESCHED_LAZY
-       blne    svc_preempt
-+1:
- #endif
- 
-       svc_exit r5, irq = 1                    @ return from exception
-@@ -239,8 +246,14 @@
- 1:    bl      preempt_schedule_irq            @ irq en/disable is done inside
-       ldr     r0, [tsk, #TI_FLAGS]            @ get new tasks TI_FLAGS
-       tst     r0, #_TIF_NEED_RESCHED
-+      bne     1b
-+      tst     r0, #_TIF_NEED_RESCHED_LAZY
-       reteq   r8                              @ go again
--      b       1b
-+      ldr     r0, [tsk, #TI_PREEMPT_LAZY]     @ get preempt lazy count
-+      teq     r0, #0                          @ if preempt lazy count != 0
-+      beq     1b
-+      ret     r8                              @ go again
-+
- #endif
- 
- __und_fault:
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/entry-common.S 
linux-4.14/arch/arm/kernel/entry-common.S
---- linux-4.14.orig/arch/arm/kernel/entry-common.S     2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/kernel/entry-common.S  2018-09-05 11:05:07.000000000 
+0200
-@@ -53,7 +53,9 @@
-       cmp     r2, #TASK_SIZE
-       blne    addr_limit_check_failed
-       ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
--      tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
-+      tst     r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
-+      bne     fast_work_pending
-+      tst     r1, #_TIF_SECCOMP
-       bne     fast_work_pending
- 
- 
-@@ -83,8 +85,11 @@
-       cmp     r2, #TASK_SIZE
-       blne    addr_limit_check_failed
-       ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
--      tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
-+      tst     r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
-+      bne     do_slower_path
-+      tst     r1, #_TIF_SECCOMP
-       beq     no_work_pending
-+do_slower_path:
-  UNWIND(.fnend                )
- ENDPROC(ret_fast_syscall)
- 
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/patch.c 
linux-4.14/arch/arm/kernel/patch.c
---- linux-4.14.orig/arch/arm/kernel/patch.c    2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/arch/arm/kernel/patch.c 2018-09-05 11:05:07.000000000 +0200
-@@ -16,7 +16,7 @@
-       unsigned int insn;
- };
- 
--static DEFINE_SPINLOCK(patch_lock);
-+static DEFINE_RAW_SPINLOCK(patch_lock);
- 
- static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
-       __acquires(&patch_lock)
-@@ -33,7 +33,7 @@
-               return addr;
- 
-       if (flags)
--              spin_lock_irqsave(&patch_lock, *flags);
-+              raw_spin_lock_irqsave(&patch_lock, *flags);
-       else
-               __acquire(&patch_lock);
- 
-@@ -48,7 +48,7 @@
-       clear_fixmap(fixmap);
- 
-       if (flags)
--              spin_unlock_irqrestore(&patch_lock, *flags);
-+              raw_spin_unlock_irqrestore(&patch_lock, *flags);
-       else
-               __release(&patch_lock);
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/process.c 
linux-4.14/arch/arm/kernel/process.c
---- linux-4.14.orig/arch/arm/kernel/process.c  2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/arch/arm/kernel/process.c       2018-09-05 11:05:07.000000000 
+0200
-@@ -325,6 +325,30 @@
- }
- 
- #ifdef CONFIG_MMU
-+/*
-+ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock.  If the lock is not
-+ * initialized by pgtable_page_ctor() then a coredump of the vector page will
-+ * fail.
-+ */
-+static int __init vectors_user_mapping_init_page(void)
-+{
-+      struct page *page;
-+      unsigned long addr = 0xffff0000;
-+      pgd_t *pgd;
-+      pud_t *pud;
-+      pmd_t *pmd;
-+
-+      pgd = pgd_offset_k(addr);
-+      pud = pud_offset(pgd, addr);
-+      pmd = pmd_offset(pud, addr);
-+      page = pmd_page(*(pmd));
-+
-+      pgtable_page_ctor(page);
-+
-+      return 0;
-+}
-+late_initcall(vectors_user_mapping_init_page);
-+
- #ifdef CONFIG_KUSER_HELPERS
- /*
-  * The vectors page is always readable from user space for the
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/signal.c 
linux-4.14/arch/arm/kernel/signal.c
---- linux-4.14.orig/arch/arm/kernel/signal.c   2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/arch/arm/kernel/signal.c        2018-09-05 11:05:07.000000000 
+0200
-@@ -615,7 +615,8 @@
-        */
-       trace_hardirqs_off();
-       do {
--              if (likely(thread_flags & _TIF_NEED_RESCHED)) {
-+              if (likely(thread_flags & (_TIF_NEED_RESCHED |
-+                                         _TIF_NEED_RESCHED_LAZY))) {
-                       schedule();
-               } else {
-                       if (unlikely(!user_mode(regs)))
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/smp.c 
linux-4.14/arch/arm/kernel/smp.c
---- linux-4.14.orig/arch/arm/kernel/smp.c      2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/arch/arm/kernel/smp.c   2018-09-05 11:05:07.000000000 +0200
-@@ -236,8 +236,6 @@
-       flush_cache_louis();
-       local_flush_tlb_all();
- 
--      clear_tasks_mm_cpumask(cpu);
--
-       return 0;
- }
- 
-@@ -255,6 +253,7 @@
-       }
-       pr_debug("CPU%u: shutdown\n", cpu);
- 
-+      clear_tasks_mm_cpumask(cpu);
-       /*
-        * platform_cpu_kill() is generally expected to do the powering off
-        * and/or cutting of clocks to the dying CPU.  Optionally, this may
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/kernel/unwind.c 
linux-4.14/arch/arm/kernel/unwind.c
---- linux-4.14.orig/arch/arm/kernel/unwind.c   2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/arch/arm/kernel/unwind.c        2018-09-05 11:05:07.000000000 
+0200
-@@ -93,7 +93,7 @@
- static const struct unwind_idx *__origin_unwind_idx;
- extern const struct unwind_idx __stop_unwind_idx[];
- 
--static DEFINE_SPINLOCK(unwind_lock);
-+static DEFINE_RAW_SPINLOCK(unwind_lock);
- static LIST_HEAD(unwind_tables);
- 
- /* Convert a prel31 symbol to an absolute address */
-@@ -201,7 +201,7 @@
-               /* module unwind tables */
-               struct unwind_table *table;
- 
--              spin_lock_irqsave(&unwind_lock, flags);
-+              raw_spin_lock_irqsave(&unwind_lock, flags);
-               list_for_each_entry(table, &unwind_tables, list) {
-                       if (addr >= table->begin_addr &&
-                           addr < table->end_addr) {
-@@ -213,7 +213,7 @@
-                               break;
-                       }
-               }
--              spin_unlock_irqrestore(&unwind_lock, flags);
-+              raw_spin_unlock_irqrestore(&unwind_lock, flags);
-       }
- 
-       pr_debug("%s: idx = %p\n", __func__, idx);
-@@ -529,9 +529,9 @@
-       tab->begin_addr = text_addr;
-       tab->end_addr = text_addr + text_size;
- 
--      spin_lock_irqsave(&unwind_lock, flags);
-+      raw_spin_lock_irqsave(&unwind_lock, flags);
-       list_add_tail(&tab->list, &unwind_tables);
--      spin_unlock_irqrestore(&unwind_lock, flags);
-+      raw_spin_unlock_irqrestore(&unwind_lock, flags);
- 
-       return tab;
- }
-@@ -543,9 +543,9 @@
-       if (!tab)
-               return;
- 
--      spin_lock_irqsave(&unwind_lock, flags);
-+      raw_spin_lock_irqsave(&unwind_lock, flags);
-       list_del(&tab->list);
--      spin_unlock_irqrestore(&unwind_lock, flags);
-+      raw_spin_unlock_irqrestore(&unwind_lock, flags);
- 
-       kfree(tab);
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-exynos/platsmp.c 
linux-4.14/arch/arm/mach-exynos/platsmp.c
---- linux-4.14.orig/arch/arm/mach-exynos/platsmp.c     2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/mach-exynos/platsmp.c  2018-09-05 11:05:07.000000000 
+0200
-@@ -229,7 +229,7 @@
-       return (void __iomem *)(S5P_VA_SCU);
- }
- 
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
- 
- static void exynos_secondary_init(unsigned int cpu)
- {
-@@ -242,8 +242,8 @@
-       /*
-        * Synchronise with the boot thread.
-        */
--      spin_lock(&boot_lock);
--      spin_unlock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
- }
- 
- int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
-@@ -307,7 +307,7 @@
-        * Set synchronisation state between this boot processor
-        * and the secondary one
-        */
--      spin_lock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
- 
-       /*
-        * The secondary processor is waiting to be released from
-@@ -334,7 +334,7 @@
- 
-               if (timeout == 0) {
-                       printk(KERN_ERR "cpu1 power enable failed");
--                      spin_unlock(&boot_lock);
-+                      raw_spin_unlock(&boot_lock);
-                       return -ETIMEDOUT;
-               }
-       }
-@@ -380,7 +380,7 @@
-        * calibrations, then wait for it to finish
-        */
- fail:
--      spin_unlock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
- 
-       return pen_release != -1 ? ret : 0;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-hisi/platmcpm.c 
linux-4.14/arch/arm/mach-hisi/platmcpm.c
---- linux-4.14.orig/arch/arm/mach-hisi/platmcpm.c      2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/mach-hisi/platmcpm.c   2018-09-05 11:05:07.000000000 
+0200
-@@ -61,7 +61,7 @@
- 
- static void __iomem *sysctrl, *fabric;
- static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
- static u32 fabric_phys_addr;
- /*
-  * [0]: bootwrapper physical address
-@@ -113,7 +113,7 @@
-       if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
-               return -EINVAL;
- 
--      spin_lock_irq(&boot_lock);
-+      raw_spin_lock_irq(&boot_lock);
- 
-       if (hip04_cpu_table[cluster][cpu])
-               goto out;
-@@ -147,7 +147,7 @@
- 
- out:
-       hip04_cpu_table[cluster][cpu]++;
--      spin_unlock_irq(&boot_lock);
-+      raw_spin_unlock_irq(&boot_lock);
- 
-       return 0;
- }
-@@ -162,11 +162,11 @@
-       cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
-       cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- 
--      spin_lock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
-       hip04_cpu_table[cluster][cpu]--;
-       if (hip04_cpu_table[cluster][cpu] == 1) {
-               /* A power_up request went ahead of us. */
--              spin_unlock(&boot_lock);
-+              raw_spin_unlock(&boot_lock);
-               return;
-       } else if (hip04_cpu_table[cluster][cpu] > 1) {
-               pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
-@@ -174,7 +174,7 @@
-       }
- 
-       last_man = hip04_cluster_is_down(cluster);
--      spin_unlock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
-       if (last_man) {
-               /* Since it's Cortex A15, disable L2 prefetching. */
-               asm volatile(
-@@ -203,7 +203,7 @@
-              cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
- 
-       count = TIMEOUT_MSEC / POLL_MSEC;
--      spin_lock_irq(&boot_lock);
-+      raw_spin_lock_irq(&boot_lock);
-       for (tries = 0; tries < count; tries++) {
-               if (hip04_cpu_table[cluster][cpu])
-                       goto err;
-@@ -211,10 +211,10 @@
-               data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
-               if (data & CORE_WFI_STATUS(cpu))
-                       break;
--              spin_unlock_irq(&boot_lock);
-+              raw_spin_unlock_irq(&boot_lock);
-               /* Wait for clean L2 when the whole cluster is down. */
-               msleep(POLL_MSEC);
--              spin_lock_irq(&boot_lock);
-+              raw_spin_lock_irq(&boot_lock);
-       }
-       if (tries >= count)
-               goto err;
-@@ -231,10 +231,10 @@
-               goto err;
-       if (hip04_cluster_is_down(cluster))
-               hip04_set_snoop_filter(cluster, 0);
--      spin_unlock_irq(&boot_lock);
-+      raw_spin_unlock_irq(&boot_lock);
-       return 1;
- err:
--      spin_unlock_irq(&boot_lock);
-+      raw_spin_unlock_irq(&boot_lock);
-       return 0;
- }
- #endif
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-omap2/omap-smp.c 
linux-4.14/arch/arm/mach-omap2/omap-smp.c
---- linux-4.14.orig/arch/arm/mach-omap2/omap-smp.c     2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/arm/mach-omap2/omap-smp.c  2018-09-05 11:05:07.000000000 
+0200
-@@ -69,7 +69,7 @@
-       .startup_addr = omap5_secondary_startup,
- };
- 
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
- 
- void __iomem *omap4_get_scu_base(void)
- {
-@@ -177,8 +177,8 @@
-       /*
-        * Synchronise with the boot thread.
-        */
--      spin_lock(&boot_lock);
--      spin_unlock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
- }
- 
- static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -191,7 +191,7 @@
-        * Set synchronisation state between this boot processor
-        * and the secondary one
-        */
--      spin_lock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
- 
-       /*
-        * Update the AuxCoreBoot0 with boot state for secondary core.
-@@ -270,7 +270,7 @@
-        * Now the secondary core is starting up let it run its
-        * calibrations, then wait for it to finish
-        */
--      spin_unlock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
- 
-       return 0;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-prima2/platsmp.c 
linux-4.14/arch/arm/mach-prima2/platsmp.c
---- linux-4.14.orig/arch/arm/mach-prima2/platsmp.c     2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/mach-prima2/platsmp.c  2018-09-05 11:05:07.000000000 
+0200
-@@ -22,7 +22,7 @@
- 
- static void __iomem *clk_base;
- 
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
- 
- static void sirfsoc_secondary_init(unsigned int cpu)
- {
-@@ -36,8 +36,8 @@
-       /*
-        * Synchronise with the boot thread.
-        */
--      spin_lock(&boot_lock);
--      spin_unlock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
- }
- 
- static const struct of_device_id clk_ids[]  = {
-@@ -75,7 +75,7 @@
-       /* make sure write buffer is drained */
-       mb();
- 
--      spin_lock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
- 
-       /*
-        * The secondary processor is waiting to be released from
-@@ -107,7 +107,7 @@
-        * now the secondary core is starting up let it run its
-        * calibrations, then wait for it to finish
-        */
--      spin_unlock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
- 
-       return pen_release != -1 ? -ENOSYS : 0;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-qcom/platsmp.c 
linux-4.14/arch/arm/mach-qcom/platsmp.c
---- linux-4.14.orig/arch/arm/mach-qcom/platsmp.c       2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/mach-qcom/platsmp.c    2018-09-05 11:05:07.000000000 
+0200
-@@ -46,7 +46,7 @@
- 
- extern void secondary_startup_arm(void);
- 
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
- 
- #ifdef CONFIG_HOTPLUG_CPU
- static void qcom_cpu_die(unsigned int cpu)
-@@ -60,8 +60,8 @@
-       /*
-        * Synchronise with the boot thread.
-        */
--      spin_lock(&boot_lock);
--      spin_unlock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
- }
- 
- static int scss_release_secondary(unsigned int cpu)
-@@ -284,7 +284,7 @@
-        * set synchronisation state between this boot processor
-        * and the secondary one
-        */
--      spin_lock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
- 
-       /*
-        * Send the secondary CPU a soft interrupt, thereby causing
-@@ -297,7 +297,7 @@
-        * now the secondary core is starting up let it run its
-        * calibrations, then wait for it to finish
-        */
--      spin_unlock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
- 
-       return ret;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-spear/platsmp.c 
linux-4.14/arch/arm/mach-spear/platsmp.c
---- linux-4.14.orig/arch/arm/mach-spear/platsmp.c      2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/mach-spear/platsmp.c   2018-09-05 11:05:07.000000000 
+0200
-@@ -32,7 +32,7 @@
-       sync_cache_w(&pen_release);
- }
- 
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
- 
- static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
- 
-@@ -47,8 +47,8 @@
-       /*
-        * Synchronise with the boot thread.
-        */
--      spin_lock(&boot_lock);
--      spin_unlock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
- }
- 
- static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct 
*idle)
-@@ -59,7 +59,7 @@
-        * set synchronisation state between this boot processor
-        * and the secondary one
-        */
--      spin_lock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
- 
-       /*
-        * The secondary processor is waiting to be released from
-@@ -84,7 +84,7 @@
-        * now the secondary core is starting up let it run its
-        * calibrations, then wait for it to finish
-        */
--      spin_unlock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
- 
-       return pen_release != -1 ? -ENOSYS : 0;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mach-sti/platsmp.c 
linux-4.14/arch/arm/mach-sti/platsmp.c
---- linux-4.14.orig/arch/arm/mach-sti/platsmp.c        2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/mach-sti/platsmp.c     2018-09-05 11:05:07.000000000 
+0200
-@@ -35,7 +35,7 @@
-       sync_cache_w(&pen_release);
- }
- 
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
- 
- static void sti_secondary_init(unsigned int cpu)
- {
-@@ -48,8 +48,8 @@
-       /*
-        * Synchronise with the boot thread.
-        */
--      spin_lock(&boot_lock);
--      spin_unlock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
- }
- 
- static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -60,7 +60,7 @@
-        * set synchronisation state between this boot processor
-        * and the secondary one
-        */
--      spin_lock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
- 
-       /*
-        * The secondary processor is waiting to be released from
-@@ -91,7 +91,7 @@
-        * now the secondary core is starting up let it run its
-        * calibrations, then wait for it to finish
-        */
--      spin_unlock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
- 
-       return pen_release != -1 ? -ENOSYS : 0;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mm/fault.c 
linux-4.14/arch/arm/mm/fault.c
---- linux-4.14.orig/arch/arm/mm/fault.c        2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/arch/arm/mm/fault.c     2018-09-05 11:05:07.000000000 +0200
-@@ -434,6 +434,9 @@
-       if (addr < TASK_SIZE)
-               return do_page_fault(addr, fsr, regs);
- 
-+      if (interrupts_enabled(regs))
-+              local_irq_enable();
-+
-       if (user_mode(regs))
-               goto bad_area;
- 
-@@ -501,6 +504,9 @@
- static int
- do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
- {
-+      if (interrupts_enabled(regs))
-+              local_irq_enable();
-+
-       do_bad_area(addr, fsr, regs);
-       return 0;
- }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm/mm/highmem.c 
linux-4.14/arch/arm/mm/highmem.c
---- linux-4.14.orig/arch/arm/mm/highmem.c      2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/arch/arm/mm/highmem.c   2018-09-05 11:05:07.000000000 +0200
-@@ -34,6 +34,11 @@
-       return *ptep;
- }
- 
-+static unsigned int fixmap_idx(int type)
-+{
-+      return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
-+}
-+
- void *kmap(struct page *page)
- {
-       might_sleep();
-@@ -54,12 +59,13 @@
- 
- void *kmap_atomic(struct page *page)
- {
-+      pte_t pte = mk_pte(page, kmap_prot);
-       unsigned int idx;
-       unsigned long vaddr;
-       void *kmap;
-       int type;
- 
--      preempt_disable();
-+      preempt_disable_nort();
-       pagefault_disable();
-       if (!PageHighMem(page))
-               return page_address(page);
-@@ -79,7 +85,7 @@
- 
-       type = kmap_atomic_idx_push();
- 
--      idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
-+      idx = fixmap_idx(type);
-       vaddr = __fix_to_virt(idx);
- #ifdef CONFIG_DEBUG_HIGHMEM
-       /*
-@@ -93,7 +99,10 @@
-        * in place, so the contained TLB flush ensures the TLB is updated
-        * with the new mapping.
-        */
--      set_fixmap_pte(idx, mk_pte(page, kmap_prot));
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+      current->kmap_pte[type] = pte;
-+#endif
-+      set_fixmap_pte(idx, pte);
- 
-       return (void *)vaddr;
- }
-@@ -106,44 +115,75 @@
- 
-       if (kvaddr >= (void *)FIXADDR_START) {
-               type = kmap_atomic_idx();
--              idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
-+              idx = fixmap_idx(type);
- 
-               if (cache_is_vivt())
-                       __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+              current->kmap_pte[type] = __pte(0);
-+#endif
- #ifdef CONFIG_DEBUG_HIGHMEM
-               BUG_ON(vaddr != __fix_to_virt(idx));
--              set_fixmap_pte(idx, __pte(0));
- #else
-               (void) idx;  /* to kill a warning */
- #endif
-+              set_fixmap_pte(idx, __pte(0));
-               kmap_atomic_idx_pop();
-       } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
-               /* this address was obtained through kmap_high_get() */
-               kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
-       }
-       pagefault_enable();
--      preempt_enable();
-+      preempt_enable_nort();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
- 
- void *kmap_atomic_pfn(unsigned long pfn)
- {
-+      pte_t pte = pfn_pte(pfn, kmap_prot);
-       unsigned long vaddr;
-       int idx, type;
-       struct page *page = pfn_to_page(pfn);
- 
--      preempt_disable();
-+      preempt_disable_nort();
-       pagefault_disable();
-       if (!PageHighMem(page))
-               return page_address(page);
- 
-       type = kmap_atomic_idx_push();
--      idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
-+      idx = fixmap_idx(type);
-       vaddr = __fix_to_virt(idx);
- #ifdef CONFIG_DEBUG_HIGHMEM
-       BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
- #endif
--      set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+      current->kmap_pte[type] = pte;
-+#endif
-+      set_fixmap_pte(idx, pte);
- 
-       return (void *)vaddr;
- }
-+#if defined CONFIG_PREEMPT_RT_FULL
-+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+      int i;
-+
-+      /*
-+       * Clear @prev's kmap_atomic mappings
-+       */
-+      for (i = 0; i < prev_p->kmap_idx; i++) {
-+              int idx = fixmap_idx(i);
-+
-+              set_fixmap_pte(idx, __pte(0));
-+      }
-+      /*
-+       * Restore @next_p's kmap_atomic mappings
-+       */
-+      for (i = 0; i < next_p->kmap_idx; i++) {
-+              int idx = fixmap_idx(i);
-+
-+              if (!pte_none(next_p->kmap_pte[i]))
-+                      set_fixmap_pte(idx, next_p->kmap_pte[i]);
-+      }
-+}
-+#endif
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/arm/plat-versatile/platsmp.c 
linux-4.14/arch/arm/plat-versatile/platsmp.c
---- linux-4.14.orig/arch/arm/plat-versatile/platsmp.c  2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm/plat-versatile/platsmp.c       2018-09-05 
11:05:07.000000000 +0200
-@@ -32,7 +32,7 @@
-       sync_cache_w(&pen_release);
- }
- 
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
- 
- void versatile_secondary_init(unsigned int cpu)
- {
-@@ -45,8 +45,8 @@
-       /*
-        * Synchronise with the boot thread.
-        */
--      spin_lock(&boot_lock);
--      spin_unlock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
- }
- 
- int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -57,7 +57,7 @@
-        * Set synchronisation state between this boot processor
-        * and the secondary one
-        */
--      spin_lock(&boot_lock);
-+      raw_spin_lock(&boot_lock);
- 
-       /*
-        * This is really belt and braces; we hold unintended secondary
-@@ -87,7 +87,7 @@
-        * now the secondary core is starting up let it run its
-        * calibrations, then wait for it to finish
-        */
--      spin_unlock(&boot_lock);
-+      raw_spin_unlock(&boot_lock);
- 
-       return pen_release != -1 ? -ENOSYS : 0;
- }
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/arm64/crypto/crc32-ce-glue.c 
linux-4.14/arch/arm64/crypto/crc32-ce-glue.c
---- linux-4.14.orig/arch/arm64/crypto/crc32-ce-glue.c  2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/arm64/crypto/crc32-ce-glue.c       2018-09-05 
11:05:07.000000000 +0200
-@@ -208,7 +208,8 @@
- 
- static int __init crc32_pmull_mod_init(void)
- {
--      if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_PMULL)) {
-+      if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
-+          !IS_ENABLED(CONFIG_PREEMPT_RT_BASE) && (elf_hwcap & HWCAP_PMULL)) {
-               crc32_pmull_algs[0].update = crc32_pmull_update;
-               crc32_pmull_algs[1].update = crc32c_pmull_update;
- 
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/crypto/Kconfig 
linux-4.14/arch/arm64/crypto/Kconfig
---- linux-4.14.orig/arch/arm64/crypto/Kconfig  2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/arch/arm64/crypto/Kconfig       2018-09-05 11:05:07.000000000 
+0200
-@@ -19,19 +19,19 @@
- 
- config CRYPTO_SHA1_ARM64_CE
-       tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
--      depends on KERNEL_MODE_NEON
-+      depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
-       select CRYPTO_HASH
-       select CRYPTO_SHA1
- 
- config CRYPTO_SHA2_ARM64_CE
-       tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
--      depends on KERNEL_MODE_NEON
-+      depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
-       select CRYPTO_HASH
-       select CRYPTO_SHA256_ARM64
- 
- config CRYPTO_GHASH_ARM64_CE
-       tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions"
--      depends on KERNEL_MODE_NEON
-+      depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
-       select CRYPTO_HASH
-       select CRYPTO_GF128MUL
-       select CRYPTO_AES
-@@ -39,7 +39,7 @@
- 
- config CRYPTO_CRCT10DIF_ARM64_CE
-       tristate "CRCT10DIF digest algorithm using PMULL instructions"
--      depends on KERNEL_MODE_NEON && CRC_T10DIF
-+      depends on KERNEL_MODE_NEON && CRC_T10DIF && !PREEMPT_RT_BASE
-       select CRYPTO_HASH
- 
- config CRYPTO_CRC32_ARM64_CE
-@@ -53,13 +53,13 @@
- 
- config CRYPTO_AES_ARM64_CE
-       tristate "AES core cipher using ARMv8 Crypto Extensions"
--      depends on ARM64 && KERNEL_MODE_NEON
-+      depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE
-       select CRYPTO_ALGAPI
-       select CRYPTO_AES_ARM64
- 
- config CRYPTO_AES_ARM64_CE_CCM
-       tristate "AES in CCM mode using ARMv8 Crypto Extensions"
--      depends on ARM64 && KERNEL_MODE_NEON
-+      depends on ARM64 && KERNEL_MODE_NEON && !PREEMPT_RT_BASE
-       select CRYPTO_ALGAPI
-       select CRYPTO_AES_ARM64_CE
-       select CRYPTO_AES_ARM64
-@@ -67,7 +67,7 @@
- 
- config CRYPTO_AES_ARM64_CE_BLK
-       tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
--      depends on KERNEL_MODE_NEON
-+      depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
-       select CRYPTO_BLKCIPHER
-       select CRYPTO_AES_ARM64_CE
-       select CRYPTO_AES_ARM64
-@@ -75,7 +75,7 @@
- 
- config CRYPTO_AES_ARM64_NEON_BLK
-       tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
--      depends on KERNEL_MODE_NEON
-+      depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
-       select CRYPTO_BLKCIPHER
-       select CRYPTO_AES_ARM64
-       select CRYPTO_AES
-@@ -83,13 +83,13 @@
- 
- config CRYPTO_CHACHA20_NEON
-       tristate "NEON accelerated ChaCha20 symmetric cipher"
--      depends on KERNEL_MODE_NEON
-+      depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
-       select CRYPTO_BLKCIPHER
-       select CRYPTO_CHACHA20
- 
- config CRYPTO_AES_ARM64_BS
-       tristate "AES in ECB/CBC/CTR/XTS modes using bit-sliced NEON algorithm"
--      depends on KERNEL_MODE_NEON
-+      depends on KERNEL_MODE_NEON && !PREEMPT_RT_BASE
-       select CRYPTO_BLKCIPHER
-       select CRYPTO_AES_ARM64_NEON_BLK
-       select CRYPTO_AES_ARM64
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/arm64/include/asm/spinlock_types.h 
linux-4.14/arch/arm64/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/arm64/include/asm/spinlock_types.h    2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/arm64/include/asm/spinlock_types.h 2018-09-05 
11:05:07.000000000 +0200
-@@ -16,10 +16,6 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
- 
--#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
--# error "please don't include this file directly"
--#endif
--
- #include <linux/types.h>
- 
- #define TICKET_SHIFT  16
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/arm64/include/asm/thread_info.h 
linux-4.14/arch/arm64/include/asm/thread_info.h
---- linux-4.14.orig/arch/arm64/include/asm/thread_info.h       2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/arm64/include/asm/thread_info.h    2018-09-05 
11:05:07.000000000 +0200
-@@ -43,6 +43,7 @@
-       u64                     ttbr0;          /* saved TTBR0_EL1 */
- #endif
-       int                     preempt_count;  /* 0 => preemptable, <0 => bug 
*/
-+      int                     preempt_lazy_count; /* 0 => preemptable, <0 => 
bug */
- };
- 
- #define INIT_THREAD_INFO(tsk)                                         \
-@@ -82,6 +83,7 @@
- #define TIF_FOREIGN_FPSTATE   3       /* CPU's FP state is not current's */
- #define TIF_UPROBE            4       /* uprobe breakpoint or singlestep */
- #define TIF_FSCHECK           5       /* Check FS is USER_DS on return */
-+#define TIF_NEED_RESCHED_LAZY 6
- #define TIF_NOHZ              7
- #define TIF_SYSCALL_TRACE     8
- #define TIF_SYSCALL_AUDIT     9
-@@ -98,6 +100,7 @@
- #define _TIF_NEED_RESCHED     (1 << TIF_NEED_RESCHED)
- #define _TIF_NOTIFY_RESUME    (1 << TIF_NOTIFY_RESUME)
- #define _TIF_FOREIGN_FPSTATE  (1 << TIF_FOREIGN_FPSTATE)
-+#define _TIF_NEED_RESCHED_LAZY        (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_NOHZ             (1 << TIF_NOHZ)
- #define _TIF_SYSCALL_TRACE    (1 << TIF_SYSCALL_TRACE)
- #define _TIF_SYSCALL_AUDIT    (1 << TIF_SYSCALL_AUDIT)
-@@ -109,8 +112,9 @@
- 
- #define _TIF_WORK_MASK                (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
-                                _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
--                               _TIF_UPROBE | _TIF_FSCHECK)
-+                               _TIF_UPROBE | _TIF_FSCHECK | 
_TIF_NEED_RESCHED_LAZY)
- 
-+#define _TIF_NEED_RESCHED_MASK        (_TIF_NEED_RESCHED | 
_TIF_NEED_RESCHED_LAZY)
- #define _TIF_SYSCALL_WORK     (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-                                _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
-                                _TIF_NOHZ)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/Kconfig 
linux-4.14/arch/arm64/Kconfig
---- linux-4.14.orig/arch/arm64/Kconfig 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/arm64/Kconfig      2018-09-05 11:05:07.000000000 +0200
-@@ -103,6 +103,7 @@
-       select HAVE_PERF_EVENTS
-       select HAVE_PERF_REGS
-       select HAVE_PERF_USER_STACK_DUMP
-+      select HAVE_PREEMPT_LAZY
-       select HAVE_REGS_AND_STACK_ACCESS_API
-       select HAVE_RCU_TABLE_FREE
-       select HAVE_SYSCALL_TRACEPOINTS
-@@ -791,7 +792,7 @@
- 
- config XEN
-       bool "Xen guest support on ARM64"
--      depends on ARM64 && OF
-+      depends on ARM64 && OF && !PREEMPT_RT_FULL
-       select SWIOTLB_XEN
-       select PARAVIRT
-       help
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/kernel/asm-offsets.c 
linux-4.14/arch/arm64/kernel/asm-offsets.c
---- linux-4.14.orig/arch/arm64/kernel/asm-offsets.c    2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/arm64/kernel/asm-offsets.c 2018-09-05 11:05:07.000000000 
+0200
-@@ -39,6 +39,7 @@
-   BLANK();
-   DEFINE(TSK_TI_FLAGS,                offsetof(struct task_struct, 
thread_info.flags));
-   DEFINE(TSK_TI_PREEMPT,      offsetof(struct task_struct, 
thread_info.preempt_count));
-+  DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, 
thread_info.preempt_lazy_count));
-   DEFINE(TSK_TI_ADDR_LIMIT,   offsetof(struct task_struct, 
thread_info.addr_limit));
- #ifdef CONFIG_ARM64_SW_TTBR0_PAN
-   DEFINE(TSK_TI_TTBR0,                offsetof(struct task_struct, 
thread_info.ttbr0));
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/kernel/entry.S 
linux-4.14/arch/arm64/kernel/entry.S
---- linux-4.14.orig/arch/arm64/kernel/entry.S  2018-09-05 11:03:20.000000000 
+0200
-+++ linux-4.14/arch/arm64/kernel/entry.S       2018-09-05 11:05:07.000000000 
+0200
-@@ -637,11 +637,16 @@
- 
- #ifdef CONFIG_PREEMPT
-       ldr     w24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
--      cbnz    w24, 1f                         // preempt count != 0
-+      cbnz    w24, 2f                         // preempt count != 0
-       ldr     x0, [tsk, #TSK_TI_FLAGS]        // get flags
--      tbz     x0, #TIF_NEED_RESCHED, 1f       // needs rescheduling?
--      bl      el1_preempt
-+      tbnz    x0, #TIF_NEED_RESCHED, 1f       // needs rescheduling?
-+
-+      ldr     w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count
-+      cbnz    w24, 2f                         // preempt lazy count != 0
-+      tbz     x0, #TIF_NEED_RESCHED_LAZY, 2f  // needs rescheduling?
- 1:
-+      bl      el1_preempt
-+2:
- #endif
- #ifdef CONFIG_TRACE_IRQFLAGS
-       bl      trace_hardirqs_on
-@@ -655,6 +660,7 @@
- 1:    bl      preempt_schedule_irq            // irq en/disable is done inside
-       ldr     x0, [tsk, #TSK_TI_FLAGS]        // get new tasks TI_FLAGS
-       tbnz    x0, #TIF_NEED_RESCHED, 1b       // needs rescheduling?
-+      tbnz    x0, #TIF_NEED_RESCHED_LAZY, 1b  // needs rescheduling?
-       ret     x24
- #endif
- 
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/arm64/kernel/signal.c 
linux-4.14/arch/arm64/kernel/signal.c
---- linux-4.14.orig/arch/arm64/kernel/signal.c 2018-09-05 11:03:20.000000000 
+0200
-+++ linux-4.14/arch/arm64/kernel/signal.c      2018-09-05 11:05:07.000000000 
+0200
-@@ -756,7 +756,7 @@
-               /* Check valid user FS if needed */
-               addr_limit_user_check();
- 
--              if (thread_flags & _TIF_NEED_RESCHED) {
-+              if (thread_flags & _TIF_NEED_RESCHED_MASK) {
-                       schedule();
-               } else {
-                       local_irq_enable();
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/blackfin/include/asm/spinlock_types.h 
linux-4.14/arch/blackfin/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/blackfin/include/asm/spinlock_types.h 2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/blackfin/include/asm/spinlock_types.h      2018-09-05 
11:05:07.000000000 +0200
-@@ -7,10 +7,6 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
- 
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- #include <asm/rwlock.h>
- 
- typedef struct {
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/hexagon/include/asm/spinlock_types.h 
linux-4.14/arch/hexagon/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/hexagon/include/asm/spinlock_types.h  2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/hexagon/include/asm/spinlock_types.h       2018-09-05 
11:05:07.000000000 +0200
-@@ -21,10 +21,6 @@
- #ifndef _ASM_SPINLOCK_TYPES_H
- #define _ASM_SPINLOCK_TYPES_H
- 
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
-       volatile unsigned int lock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/ia64/include/asm/spinlock_types.h 
linux-4.14/arch/ia64/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/ia64/include/asm/spinlock_types.h     2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/ia64/include/asm/spinlock_types.h  2018-09-05 
11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef _ASM_IA64_SPINLOCK_TYPES_H
- #define _ASM_IA64_SPINLOCK_TYPES_H
- 
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
-       volatile unsigned int lock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/ia64/kernel/mca.c 
linux-4.14/arch/ia64/kernel/mca.c
---- linux-4.14.orig/arch/ia64/kernel/mca.c     2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/arch/ia64/kernel/mca.c  2018-09-05 11:05:07.000000000 +0200
-@@ -1824,7 +1824,7 @@
-       ti->cpu = cpu;
-       p->stack = ti;
-       p->state = TASK_UNINTERRUPTIBLE;
--      cpumask_set_cpu(cpu, &p->cpus_allowed);
-+      cpumask_set_cpu(cpu, &p->cpus_mask);
-       INIT_LIST_HEAD(&p->tasks);
-       p->parent = p->real_parent = p->group_leader = p;
-       INIT_LIST_HEAD(&p->children);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/Kconfig 
linux-4.14/arch/Kconfig
---- linux-4.14.orig/arch/Kconfig       2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/Kconfig    2018-09-05 11:05:07.000000000 +0200
-@@ -20,6 +20,7 @@
-       tristate "OProfile system profiling"
-       depends on PROFILING
-       depends on HAVE_OPROFILE
-+      depends on !PREEMPT_RT_FULL
-       select RING_BUFFER
-       select RING_BUFFER_ALLOW_SWAP
-       help
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/m32r/include/asm/spinlock_types.h 
linux-4.14/arch/m32r/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/m32r/include/asm/spinlock_types.h     2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/m32r/include/asm/spinlock_types.h  2018-09-05 
11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef _ASM_M32R_SPINLOCK_TYPES_H
- #define _ASM_M32R_SPINLOCK_TYPES_H
- 
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
-       volatile int slock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/metag/include/asm/spinlock_types.h 
linux-4.14/arch/metag/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/metag/include/asm/spinlock_types.h    2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/metag/include/asm/spinlock_types.h 2018-09-05 
11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef _ASM_METAG_SPINLOCK_TYPES_H
- #define _ASM_METAG_SPINLOCK_TYPES_H
- 
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
-       volatile unsigned int lock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/mips/include/asm/switch_to.h 
linux-4.14/arch/mips/include/asm/switch_to.h
---- linux-4.14.orig/arch/mips/include/asm/switch_to.h  2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/mips/include/asm/switch_to.h       2018-09-05 
11:05:07.000000000 +0200
-@@ -42,7 +42,7 @@
-  * inline to try to keep the overhead down. If we have been forced to run on
-  * a "CPU" with an FPU because of a previous high level of FP computation,
-  * but did not actually use the FPU during the most recent time-slice (CU1
-- * isn't set), we undo the restriction on cpus_allowed.
-+ * isn't set), we undo the restriction on cpus_mask.
-  *
-  * We're not calling set_cpus_allowed() here, because we have no need to
-  * force prompt migration - we're already switching the current CPU to a
-@@ -57,7 +57,7 @@
-           test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) &&             \
-           (!(KSTK_STATUS(prev) & ST0_CU1))) {                         \
-               clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND);          \
--              prev->cpus_allowed = prev->thread.user_cpus_allowed;    \
-+              prev->cpus_mask = prev->thread.user_cpus_allowed;       \
-       }                                                               \
-       next->thread.emulated_fp = 0;                                   \
- } while(0)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/mips/Kconfig 
linux-4.14/arch/mips/Kconfig
---- linux-4.14.orig/arch/mips/Kconfig  2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/mips/Kconfig       2018-09-05 11:05:07.000000000 +0200
-@@ -2519,7 +2519,7 @@
- #
- config HIGHMEM
-       bool "High Memory Support"
--      depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && 
!CPU_MIPS32_3_5_EVA
-+      depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && 
!CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL
- 
- config CPU_SUPPORTS_HIGHMEM
-       bool
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/mips/kernel/mips-mt-fpaff.c 
linux-4.14/arch/mips/kernel/mips-mt-fpaff.c
---- linux-4.14.orig/arch/mips/kernel/mips-mt-fpaff.c   2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/mips/kernel/mips-mt-fpaff.c        2018-09-05 
11:05:07.000000000 +0200
-@@ -177,7 +177,7 @@
-       if (retval)
-               goto out_unlock;
- 
--      cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
-+      cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
-       cpumask_and(&mask, &allowed, cpu_active_mask);
- 
- out_unlock:
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/mips/kernel/traps.c 
linux-4.14/arch/mips/kernel/traps.c
---- linux-4.14.orig/arch/mips/kernel/traps.c   2018-09-05 11:03:20.000000000 
+0200
-+++ linux-4.14/arch/mips/kernel/traps.c        2018-09-05 11:05:07.000000000 
+0200
-@@ -1193,12 +1193,12 @@
-                * restricted the allowed set to exclude any CPUs with FPUs,
-                * we'll skip the procedure.
-                */
--              if (cpumask_intersects(&current->cpus_allowed, 
&mt_fpu_cpumask)) {
-+              if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) {
-                       cpumask_t tmask;
- 
-                       current->thread.user_cpus_allowed
--                              = current->cpus_allowed;
--                      cpumask_and(&tmask, &current->cpus_allowed,
-+                              = current->cpus_mask;
-+                      cpumask_and(&tmask, &current->cpus_mask,
-                                   &mt_fpu_cpumask);
-                       set_cpus_allowed_ptr(current, &tmask);
-                       set_thread_flag(TIF_FPUBOUND);
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/mn10300/include/asm/spinlock_types.h 
linux-4.14/arch/mn10300/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/mn10300/include/asm/spinlock_types.h  2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/mn10300/include/asm/spinlock_types.h       2018-09-05 
11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef _ASM_SPINLOCK_TYPES_H
- #define _ASM_SPINLOCK_TYPES_H
- 
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct arch_spinlock {
-       unsigned int slock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/powerpc/include/asm/spinlock_types.h 
linux-4.14/arch/powerpc/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/powerpc/include/asm/spinlock_types.h  2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/powerpc/include/asm/spinlock_types.h       2018-09-05 
11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
- #define _ASM_POWERPC_SPINLOCK_TYPES_H
- 
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
-       volatile unsigned int slock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/powerpc/include/asm/thread_info.h 
linux-4.14/arch/powerpc/include/asm/thread_info.h
---- linux-4.14.orig/arch/powerpc/include/asm/thread_info.h     2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/powerpc/include/asm/thread_info.h  2018-09-05 
11:05:07.000000000 +0200
-@@ -36,6 +36,8 @@
-       int             cpu;                    /* cpu we're on */
-       int             preempt_count;          /* 0 => preemptable,
-                                                  <0 => BUG */
-+      int             preempt_lazy_count;     /* 0 => preemptable,
-+                                                 <0 => BUG */
-       unsigned long   local_flags;            /* private flags for thread */
- #ifdef CONFIG_LIVEPATCH
-       unsigned long *livepatch_sp;
-@@ -81,8 +83,7 @@
- #define TIF_SYSCALL_TRACE     0       /* syscall trace active */
- #define TIF_SIGPENDING                1       /* signal pending */
- #define TIF_NEED_RESCHED      2       /* rescheduling necessary */
--#define TIF_POLLING_NRFLAG    3       /* true if poll_idle() is polling
--                                         TIF_NEED_RESCHED */
-+#define TIF_NEED_RESCHED_LAZY 3       /* lazy rescheduling necessary */
- #define TIF_32BIT             4       /* 32 bit binary */
- #define TIF_RESTORE_TM                5       /* need to restore TM 
FP/VEC/VSX */
- #define TIF_PATCH_PENDING     6       /* pending live patching update */
-@@ -101,6 +102,8 @@
- #if defined(CONFIG_PPC64)
- #define TIF_ELF2ABI           18      /* function descriptors must die! */
- #endif
-+#define TIF_POLLING_NRFLAG    19      /* true if poll_idle() is polling
-+                                         TIF_NEED_RESCHED */
- 
- /* as above, but as bit values */
- #define _TIF_SYSCALL_TRACE    (1<<TIF_SYSCALL_TRACE)
-@@ -120,14 +123,16 @@
- #define _TIF_SYSCALL_TRACEPOINT       (1<<TIF_SYSCALL_TRACEPOINT)
- #define _TIF_EMULATE_STACK_STORE      (1<<TIF_EMULATE_STACK_STORE)
- #define _TIF_NOHZ             (1<<TIF_NOHZ)
-+#define _TIF_NEED_RESCHED_LAZY        (1<<TIF_NEED_RESCHED_LAZY)
- #define _TIF_SYSCALL_DOTRACE  (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-                                _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
-                                _TIF_NOHZ)
- 
- #define _TIF_USER_WORK_MASK   (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
-                                _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
--                               _TIF_RESTORE_TM | _TIF_PATCH_PENDING)
-+                               _TIF_RESTORE_TM | _TIF_PATCH_PENDING | 
_TIF_NEED_RESCHED_LAZY)
- #define _TIF_PERSYSCALL_MASK  (_TIF_RESTOREALL|_TIF_NOERROR)
-+#define _TIF_NEED_RESCHED_MASK        (_TIF_NEED_RESCHED | 
_TIF_NEED_RESCHED_LAZY)
- 
- /* Bits in local_flags */
- /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/Kconfig 
linux-4.14/arch/powerpc/Kconfig
---- linux-4.14.orig/arch/powerpc/Kconfig       2018-09-05 11:03:20.000000000 
+0200
-+++ linux-4.14/arch/powerpc/Kconfig    2018-09-05 11:05:07.000000000 +0200
-@@ -111,10 +111,11 @@
- 
- config RWSEM_GENERIC_SPINLOCK
-       bool
-+      default y if PREEMPT_RT_FULL
- 
- config RWSEM_XCHGADD_ALGORITHM
-       bool
--      default y
-+      default y if !PREEMPT_RT_FULL
- 
- config GENERIC_LOCKBREAK
-       bool
-@@ -215,6 +216,7 @@
-       select HAVE_HARDLOCKUP_DETECTOR_PERF    if PERF_EVENTS && 
HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
-       select HAVE_PERF_REGS
-       select HAVE_PERF_USER_STACK_DUMP
-+      select HAVE_PREEMPT_LAZY
-       select HAVE_RCU_TABLE_FREE              if SMP
-       select HAVE_REGS_AND_STACK_ACCESS_API
-       select HAVE_SYSCALL_TRACEPOINTS
-@@ -390,7 +392,7 @@
- 
- config HIGHMEM
-       bool "High memory support"
--      depends on PPC32
-+      depends on PPC32 && !PREEMPT_RT_FULL
- 
- source kernel/Kconfig.hz
- source kernel/Kconfig.preempt
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/powerpc/kernel/asm-offsets.c 
linux-4.14/arch/powerpc/kernel/asm-offsets.c
---- linux-4.14.orig/arch/powerpc/kernel/asm-offsets.c  2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/powerpc/kernel/asm-offsets.c       2018-09-05 
11:05:07.000000000 +0200
-@@ -156,6 +156,7 @@
-       OFFSET(TI_FLAGS, thread_info, flags);
-       OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
-       OFFSET(TI_PREEMPT, thread_info, preempt_count);
-+      OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
-       OFFSET(TI_TASK, thread_info, task);
-       OFFSET(TI_CPU, thread_info, cpu);
- 
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/entry_32.S 
linux-4.14/arch/powerpc/kernel/entry_32.S
---- linux-4.14.orig/arch/powerpc/kernel/entry_32.S     2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/powerpc/kernel/entry_32.S  2018-09-05 11:05:07.000000000 
+0200
-@@ -866,7 +866,14 @@
-       cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
-       bne     restore
-       andi.   r8,r8,_TIF_NEED_RESCHED
-+      bne+    1f
-+      lwz     r0,TI_PREEMPT_LAZY(r9)
-+      cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
-+      bne     restore
-+      lwz     r0,TI_FLAGS(r9)
-+      andi.   r0,r0,_TIF_NEED_RESCHED_LAZY
-       beq+    restore
-+1:
-       lwz     r3,_MSR(r1)
-       andi.   r0,r3,MSR_EE    /* interrupts off? */
-       beq     restore         /* don't schedule if so */
-@@ -877,11 +884,11 @@
-        */
-       bl      trace_hardirqs_off
- #endif
--1:    bl      preempt_schedule_irq
-+2:    bl      preempt_schedule_irq
-       CURRENT_THREAD_INFO(r9, r1)
-       lwz     r3,TI_FLAGS(r9)
--      andi.   r0,r3,_TIF_NEED_RESCHED
--      bne-    1b
-+      andi.   r0,r3,_TIF_NEED_RESCHED_MASK
-+      bne-    2b
- #ifdef CONFIG_TRACE_IRQFLAGS
-       /* And now, to properly rebalance the above, we tell lockdep they
-        * are being turned back on, which will happen when we return
-@@ -1204,7 +1211,7 @@
- #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
- 
- do_work:                      /* r10 contains MSR_KERNEL here */
--      andi.   r0,r9,_TIF_NEED_RESCHED
-+      andi.   r0,r9,_TIF_NEED_RESCHED_MASK
-       beq     do_user_signal
- 
- do_resched:                   /* r10 contains MSR_KERNEL here */
-@@ -1225,7 +1232,7 @@
-       MTMSRD(r10)             /* disable interrupts */
-       CURRENT_THREAD_INFO(r9, r1)
-       lwz     r9,TI_FLAGS(r9)
--      andi.   r0,r9,_TIF_NEED_RESCHED
-+      andi.   r0,r9,_TIF_NEED_RESCHED_MASK
-       bne-    do_resched
-       andi.   r0,r9,_TIF_USER_WORK_MASK
-       beq     restore_user
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/entry_64.S 
linux-4.14/arch/powerpc/kernel/entry_64.S
---- linux-4.14.orig/arch/powerpc/kernel/entry_64.S     2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/powerpc/kernel/entry_64.S  2018-09-05 11:05:07.000000000 
+0200
-@@ -690,7 +690,7 @@
-       bl      restore_math
-       b       restore
- #endif
--1:    andi.   r0,r4,_TIF_NEED_RESCHED
-+1:    andi.   r0,r4,_TIF_NEED_RESCHED_MASK
-       beq     2f
-       bl      restore_interrupts
-       SCHEDULE_USER
-@@ -752,10 +752,18 @@
- 
- #ifdef CONFIG_PREEMPT
-       /* Check if we need to preempt */
-+      lwz     r8,TI_PREEMPT(r9)
-+      cmpwi   0,r8,0          /* if non-zero, just restore regs and return */
-+      bne     restore
-       andi.   r0,r4,_TIF_NEED_RESCHED
-+      bne+    check_count
-+
-+      andi.   r0,r4,_TIF_NEED_RESCHED_LAZY
-       beq+    restore
-+      lwz     r8,TI_PREEMPT_LAZY(r9)
-+
-       /* Check that preempt_count() == 0 and interrupts are enabled */
--      lwz     r8,TI_PREEMPT(r9)
-+check_count:
-       cmpwi   cr1,r8,0
-       ld      r0,SOFTE(r1)
-       cmpdi   r0,0
-@@ -772,7 +780,7 @@
-       /* Re-test flags and eventually loop */
-       CURRENT_THREAD_INFO(r9, r1)
-       ld      r4,TI_FLAGS(r9)
--      andi.   r0,r4,_TIF_NEED_RESCHED
-+      andi.   r0,r4,_TIF_NEED_RESCHED_MASK
-       bne     1b
- 
-       /*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/irq.c 
linux-4.14/arch/powerpc/kernel/irq.c
---- linux-4.14.orig/arch/powerpc/kernel/irq.c  2018-09-05 11:03:20.000000000 
+0200
-+++ linux-4.14/arch/powerpc/kernel/irq.c       2018-09-05 11:05:07.000000000 
+0200
-@@ -693,6 +693,7 @@
-       }
- }
- 
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
-       struct thread_info *curtp, *irqtp;
-@@ -710,6 +711,7 @@
-       if (irqtp->flags)
-               set_bits(irqtp->flags, &curtp->flags);
- }
-+#endif
- 
- irq_hw_number_t virq_to_hw(unsigned int virq)
- {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/misc_32.S 
linux-4.14/arch/powerpc/kernel/misc_32.S
---- linux-4.14.orig/arch/powerpc/kernel/misc_32.S      2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/powerpc/kernel/misc_32.S   2018-09-05 11:05:07.000000000 
+0200
-@@ -41,6 +41,7 @@
-  * We store the saved ksp_limit in the unused part
-  * of the STACK_FRAME_OVERHEAD
-  */
-+#ifndef CONFIG_PREEMPT_RT_FULL
- _GLOBAL(call_do_softirq)
-       mflr    r0
-       stw     r0,4(r1)
-@@ -57,6 +58,7 @@
-       stw     r10,THREAD+KSP_LIMIT(r2)
-       mtlr    r0
-       blr
-+#endif
- 
- /*
-  * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kernel/misc_64.S 
linux-4.14/arch/powerpc/kernel/misc_64.S
---- linux-4.14.orig/arch/powerpc/kernel/misc_64.S      2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/powerpc/kernel/misc_64.S   2018-09-05 11:05:07.000000000 
+0200
-@@ -31,6 +31,7 @@
- 
-       .text
- 
-+#ifndef CONFIG_PREEMPT_RT_FULL
- _GLOBAL(call_do_softirq)
-       mflr    r0
-       std     r0,16(r1)
-@@ -41,6 +42,7 @@
-       ld      r0,16(r1)
-       mtlr    r0
-       blr
-+#endif
- 
- _GLOBAL(call_do_irq)
-       mflr    r0
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/powerpc/kvm/Kconfig 
linux-4.14/arch/powerpc/kvm/Kconfig
---- linux-4.14.orig/arch/powerpc/kvm/Kconfig   2018-09-05 11:03:20.000000000 
+0200
-+++ linux-4.14/arch/powerpc/kvm/Kconfig        2018-09-05 11:05:07.000000000 
+0200
-@@ -177,6 +177,7 @@
- config KVM_MPIC
-       bool "KVM in-kernel MPIC emulation"
-       depends on KVM && E500
-+      depends on !PREEMPT_RT_FULL
-       select HAVE_KVM_IRQCHIP
-       select HAVE_KVM_IRQFD
-       select HAVE_KVM_IRQ_ROUTING
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/powerpc/platforms/cell/spufs/sched.c 
linux-4.14/arch/powerpc/platforms/cell/spufs/sched.c
---- linux-4.14.orig/arch/powerpc/platforms/cell/spufs/sched.c  2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/powerpc/platforms/cell/spufs/sched.c       2018-09-05 
11:05:07.000000000 +0200
-@@ -141,7 +141,7 @@
-        * runqueue. The context will be rescheduled on the proper node
-        * if it is timesliced or preempted.
-        */
--      cpumask_copy(&ctx->cpus_allowed, &current->cpus_allowed);
-+      cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
- 
-       /* Save the current cpu id for spu interrupt routing. */
-       ctx->last_ran = raw_smp_processor_id();
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/powerpc/platforms/ps3/device-init.c 
linux-4.14/arch/powerpc/platforms/ps3/device-init.c
---- linux-4.14.orig/arch/powerpc/platforms/ps3/device-init.c   2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/powerpc/platforms/ps3/device-init.c        2018-09-05 
11:05:07.000000000 +0200
-@@ -752,7 +752,7 @@
-       }
-       pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
- 
--      res = wait_event_interruptible(dev->done.wait,
-+      res = swait_event_interruptible(dev->done.wait,
-                                      dev->done.done || kthread_should_stop());
-       if (kthread_should_stop())
-               res = -EINTR;
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/s390/include/asm/spinlock_types.h 
linux-4.14/arch/s390/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/s390/include/asm/spinlock_types.h     2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/s390/include/asm/spinlock_types.h  2018-09-05 
11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
- 
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
-       int lock;
- } __attribute__ ((aligned (4))) arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/sh/include/asm/spinlock_types.h 
linux-4.14/arch/sh/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/sh/include/asm/spinlock_types.h       2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/sh/include/asm/spinlock_types.h    2018-09-05 
11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef __ASM_SH_SPINLOCK_TYPES_H
- #define __ASM_SH_SPINLOCK_TYPES_H
- 
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
-       volatile unsigned int lock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/sh/kernel/irq.c 
linux-4.14/arch/sh/kernel/irq.c
---- linux-4.14.orig/arch/sh/kernel/irq.c       2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/arch/sh/kernel/irq.c    2018-09-05 11:05:07.000000000 +0200
-@@ -148,6 +148,7 @@
-       hardirq_ctx[cpu] = NULL;
- }
- 
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
-       struct thread_info *curctx;
-@@ -175,6 +176,7 @@
-                 "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
-       );
- }
-+#endif
- #else
- static inline void handle_one_irq(unsigned int irq)
- {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/sparc/Kconfig 
linux-4.14/arch/sparc/Kconfig
---- linux-4.14.orig/arch/sparc/Kconfig 2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/arch/sparc/Kconfig      2018-09-05 11:05:07.000000000 +0200
-@@ -206,12 +206,10 @@
- source kernel/Kconfig.hz
- 
- config RWSEM_GENERIC_SPINLOCK
--      bool
--      default y if SPARC32
-+      def_bool PREEMPT_RT_FULL
- 
- config RWSEM_XCHGADD_ALGORITHM
--      bool
--      default y if SPARC64
-+      def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
- 
- config GENERIC_HWEIGHT
-       bool
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/sparc/kernel/irq_64.c 
linux-4.14/arch/sparc/kernel/irq_64.c
---- linux-4.14.orig/arch/sparc/kernel/irq_64.c 2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/arch/sparc/kernel/irq_64.c      2018-09-05 11:05:07.000000000 
+0200
-@@ -855,6 +855,7 @@
-       set_irq_regs(old_regs);
- }
- 
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
-       void *orig_sp, *sp = softirq_stack[smp_processor_id()];
-@@ -869,6 +870,7 @@
-       __asm__ __volatile__("mov %0, %%sp"
-                            : : "r" (orig_sp));
- }
-+#endif
- 
- #ifdef CONFIG_HOTPLUG_CPU
- void fixup_irqs(void)
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/tile/include/asm/setup.h 
linux-4.14/arch/tile/include/asm/setup.h
---- linux-4.14.orig/arch/tile/include/asm/setup.h      2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/tile/include/asm/setup.h   2018-09-05 11:05:07.000000000 
+0200
-@@ -49,7 +49,7 @@
- 
- /* Hook hardwall code into changes in affinity. */
- #define arch_set_cpus_allowed(p, new_mask) do { \
--      if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
-+      if (!cpumask_equal(p->cpus_ptr, new_mask)) \
-               hardwall_deactivate_all(p); \
- } while (0)
- #endif
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/tile/include/asm/spinlock_types.h 
linux-4.14/arch/tile/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/tile/include/asm/spinlock_types.h     2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/tile/include/asm/spinlock_types.h  2018-09-05 
11:05:07.000000000 +0200
-@@ -15,10 +15,6 @@
- #ifndef _ASM_TILE_SPINLOCK_TYPES_H
- #define _ASM_TILE_SPINLOCK_TYPES_H
- 
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- #ifdef __tilegx__
- 
- /* Low 15 bits are "next"; high 15 bits are "current". */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/tile/kernel/hardwall.c 
linux-4.14/arch/tile/kernel/hardwall.c
---- linux-4.14.orig/arch/tile/kernel/hardwall.c        2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/tile/kernel/hardwall.c     2018-09-05 11:05:07.000000000 
+0200
-@@ -590,12 +590,12 @@
-        * Get our affinity; if we're not bound to this tile uniquely,
-        * we can't access the network registers.
-        */
--      if (cpumask_weight(&p->cpus_allowed) != 1)
-+      if (p->nr_cpus_allowed != 1)
-               return -EPERM;
- 
-       /* Make sure we are bound to a cpu assigned to this resource. */
-       cpu = smp_processor_id();
--      BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
-+      BUG_ON(cpumask_first(p->cpus_ptr) != cpu);
-       if (!cpumask_test_cpu(cpu, &info->cpumask))
-               return -EINVAL;
- 
-@@ -621,17 +621,17 @@
-  * Deactivate a task's hardwall.  Must hold lock for hardwall_type.
-  * This method may be called from exit_thread(), so we don't want to
-  * rely on too many fields of struct task_struct still being valid.
-- * We assume the cpus_allowed, pid, and comm fields are still valid.
-+ * We assume the nr_cpus_allowed, pid, and comm fields are still valid.
-  */
- static void _hardwall_deactivate(struct hardwall_type *hwt,
-                                struct task_struct *task)
- {
-       struct thread_struct *ts = &task->thread;
- 
--      if (cpumask_weight(&task->cpus_allowed) != 1) {
-+      if (task->nr_cpus_allowed != 1) {
-               pr_err("pid %d (%s) releasing %s hardwall with an affinity mask 
containing %d cpus!\n",
-                      task->pid, task->comm, hwt->name,
--                     cpumask_weight(&task->cpus_allowed));
-+                     task->nr_cpus_allowed);
-               BUG();
-       }
- 
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/x86/crypto/aesni-intel_glue.c 
linux-4.14/arch/x86/crypto/aesni-intel_glue.c
---- linux-4.14.orig/arch/x86/crypto/aesni-intel_glue.c 2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/crypto/aesni-intel_glue.c      2018-09-05 
11:05:07.000000000 +0200
-@@ -387,14 +387,14 @@
- 
-       err = skcipher_walk_virt(&walk, req, true);
- 
--      kernel_fpu_begin();
-       while ((nbytes = walk.nbytes)) {
-+              kernel_fpu_begin();
-               aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-                             nbytes & AES_BLOCK_MASK);
-+              kernel_fpu_end();
-               nbytes &= AES_BLOCK_SIZE - 1;
-               err = skcipher_walk_done(&walk, nbytes);
-       }
--      kernel_fpu_end();
- 
-       return err;
- }
-@@ -409,14 +409,14 @@
- 
-       err = skcipher_walk_virt(&walk, req, true);
- 
--      kernel_fpu_begin();
-       while ((nbytes = walk.nbytes)) {
-+              kernel_fpu_begin();
-               aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-                             nbytes & AES_BLOCK_MASK);
-+              kernel_fpu_end();
-               nbytes &= AES_BLOCK_SIZE - 1;
-               err = skcipher_walk_done(&walk, nbytes);
-       }
--      kernel_fpu_end();
- 
-       return err;
- }
-@@ -431,14 +431,14 @@
- 
-       err = skcipher_walk_virt(&walk, req, true);
- 
--      kernel_fpu_begin();
-       while ((nbytes = walk.nbytes)) {
-+              kernel_fpu_begin();
-               aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-                             nbytes & AES_BLOCK_MASK, walk.iv);
-+              kernel_fpu_end();
-               nbytes &= AES_BLOCK_SIZE - 1;
-               err = skcipher_walk_done(&walk, nbytes);
-       }
--      kernel_fpu_end();
- 
-       return err;
- }
-@@ -453,14 +453,14 @@
- 
-       err = skcipher_walk_virt(&walk, req, true);
- 
--      kernel_fpu_begin();
-       while ((nbytes = walk.nbytes)) {
-+              kernel_fpu_begin();
-               aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-                             nbytes & AES_BLOCK_MASK, walk.iv);
-+              kernel_fpu_end();
-               nbytes &= AES_BLOCK_SIZE - 1;
-               err = skcipher_walk_done(&walk, nbytes);
-       }
--      kernel_fpu_end();
- 
-       return err;
- }
-@@ -510,18 +510,20 @@
- 
-       err = skcipher_walk_virt(&walk, req, true);
- 
--      kernel_fpu_begin();
-       while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
-+              kernel_fpu_begin();
-               aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-                                     nbytes & AES_BLOCK_MASK, walk.iv);
-+              kernel_fpu_end();
-               nbytes &= AES_BLOCK_SIZE - 1;
-               err = skcipher_walk_done(&walk, nbytes);
-       }
-       if (walk.nbytes) {
-+              kernel_fpu_begin();
-               ctr_crypt_final(ctx, &walk);
-+              kernel_fpu_end();
-               err = skcipher_walk_done(&walk, 0);
-       }
--      kernel_fpu_end();
- 
-       return err;
- }
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/x86/crypto/camellia_aesni_avx2_glue.c 
linux-4.14/arch/x86/crypto/camellia_aesni_avx2_glue.c
---- linux-4.14.orig/arch/x86/crypto/camellia_aesni_avx2_glue.c 2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/camellia_aesni_avx2_glue.c      2018-09-05 
11:05:07.000000000 +0200
-@@ -206,6 +206,20 @@
-       bool fpu_enabled;
- };
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void camellia_fpu_end_rt(struct crypt_priv *ctx)
-+{
-+       bool fpu_enabled = ctx->fpu_enabled;
-+
-+       if (!fpu_enabled)
-+               return;
-+       camellia_fpu_end(fpu_enabled);
-+       ctx->fpu_enabled = false;
-+}
-+#else
-+static void camellia_fpu_end_rt(struct crypt_priv *ctx) { }
-+#endif
-+
- static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
- {
-       const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
-@@ -221,16 +235,19 @@
-       }
- 
-       if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
-+              kernel_fpu_resched();
-               camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
-               srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
-               nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
-       }
- 
-       while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
-+              kernel_fpu_resched();
-               camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
-               srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
-               nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
-       }
-+      camellia_fpu_end_rt(ctx);
- 
-       for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
-               camellia_enc_blk(ctx->ctx, srcdst, srcdst);
-@@ -251,16 +268,19 @@
-       }
- 
-       if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
-+              kernel_fpu_resched();
-               camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
-               srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
-               nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
-       }
- 
-       while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
-+              kernel_fpu_resched();
-               camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
-               srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
-               nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
-       }
-+      camellia_fpu_end_rt(ctx);
- 
-       for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
-               camellia_dec_blk(ctx->ctx, srcdst, srcdst);
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/x86/crypto/camellia_aesni_avx_glue.c 
linux-4.14/arch/x86/crypto/camellia_aesni_avx_glue.c
---- linux-4.14.orig/arch/x86/crypto/camellia_aesni_avx_glue.c  2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/camellia_aesni_avx_glue.c       2018-09-05 
11:05:07.000000000 +0200
-@@ -210,6 +210,21 @@
-       bool fpu_enabled;
- };
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void camellia_fpu_end_rt(struct crypt_priv *ctx)
-+{
-+      bool fpu_enabled = ctx->fpu_enabled;
-+
-+      if (!fpu_enabled)
-+              return;
-+      camellia_fpu_end(fpu_enabled);
-+      ctx->fpu_enabled = false;
-+}
-+
-+#else
-+static void camellia_fpu_end_rt(struct crypt_priv *ctx) { }
-+#endif
-+
- static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
- {
-       const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
-@@ -225,10 +240,12 @@
-       }
- 
-       while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
-+              kernel_fpu_resched();
-               camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
-               srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
-               nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
-       }
-+      camellia_fpu_end_rt(ctx);
- 
-       for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
-               camellia_enc_blk(ctx->ctx, srcdst, srcdst);
-@@ -249,10 +266,12 @@
-       }
- 
-       while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
-+              kernel_fpu_resched();
-               camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
-               srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
-               nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
-       }
-+      camellia_fpu_end_rt(ctx);
- 
-       for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
-               camellia_dec_blk(ctx->ctx, srcdst, srcdst);
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/x86/crypto/cast5_avx_glue.c 
linux-4.14/arch/x86/crypto/cast5_avx_glue.c
---- linux-4.14.orig/arch/x86/crypto/cast5_avx_glue.c   2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/crypto/cast5_avx_glue.c        2018-09-05 
11:05:07.000000000 +0200
-@@ -59,7 +59,7 @@
- static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
-                    bool enc)
- {
--      bool fpu_enabled = false;
-+      bool fpu_enabled;
-       struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-       const unsigned int bsize = CAST5_BLOCK_SIZE;
-       unsigned int nbytes;
-@@ -73,7 +73,7 @@
-               u8 *wsrc = walk->src.virt.addr;
-               u8 *wdst = walk->dst.virt.addr;
- 
--              fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
-+              fpu_enabled = cast5_fpu_begin(false, nbytes);
- 
-               /* Process multi-block batch */
-               if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
-@@ -102,10 +102,9 @@
-               } while (nbytes >= bsize);
- 
- done:
-+              cast5_fpu_end(fpu_enabled);
-               err = blkcipher_walk_done(desc, walk, nbytes);
-       }
--
--      cast5_fpu_end(fpu_enabled);
-       return err;
- }
- 
-@@ -226,7 +225,7 @@
- static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-                      struct scatterlist *src, unsigned int nbytes)
- {
--      bool fpu_enabled = false;
-+      bool fpu_enabled;
-       struct blkcipher_walk walk;
-       int err;
- 
-@@ -235,12 +234,11 @@
-       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- 
-       while ((nbytes = walk.nbytes)) {
--              fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
-+              fpu_enabled = cast5_fpu_begin(false, nbytes);
-               nbytes = __cbc_decrypt(desc, &walk);
-+              cast5_fpu_end(fpu_enabled);
-               err = blkcipher_walk_done(desc, &walk, nbytes);
-       }
--
--      cast5_fpu_end(fpu_enabled);
-       return err;
- }
- 
-@@ -309,7 +307,7 @@
- static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-                    struct scatterlist *src, unsigned int nbytes)
- {
--      bool fpu_enabled = false;
-+      bool fpu_enabled;
-       struct blkcipher_walk walk;
-       int err;
- 
-@@ -318,13 +316,12 @@
-       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- 
-       while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
--              fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
-+              fpu_enabled = cast5_fpu_begin(false, nbytes);
-               nbytes = __ctr_crypt(desc, &walk);
-+              cast5_fpu_end(fpu_enabled);
-               err = blkcipher_walk_done(desc, &walk, nbytes);
-       }
- 
--      cast5_fpu_end(fpu_enabled);
--
-       if (walk.nbytes) {
-               ctr_crypt_final(desc, &walk);
-               err = blkcipher_walk_done(desc, &walk, 0);
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/x86/crypto/cast6_avx_glue.c 
linux-4.14/arch/x86/crypto/cast6_avx_glue.c
---- linux-4.14.orig/arch/x86/crypto/cast6_avx_glue.c   2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/cast6_avx_glue.c        2018-09-05 
11:05:07.000000000 +0200
-@@ -205,19 +205,33 @@
-       bool fpu_enabled;
- };
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void cast6_fpu_end_rt(struct crypt_priv *ctx)
-+{
-+      bool fpu_enabled = ctx->fpu_enabled;
-+
-+      if (!fpu_enabled)
-+              return;
-+      cast6_fpu_end(fpu_enabled);
-+      ctx->fpu_enabled = false;
-+}
-+
-+#else
-+static void cast6_fpu_end_rt(struct crypt_priv *ctx) { }
-+#endif
-+
- static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
- {
-       const unsigned int bsize = CAST6_BLOCK_SIZE;
-       struct crypt_priv *ctx = priv;
-       int i;
- 
--      ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
--
-       if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
-+              ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
-               cast6_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
-+              cast6_fpu_end_rt(ctx);
-               return;
-       }
--
-       for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
-               __cast6_encrypt(ctx->ctx, srcdst, srcdst);
- }
-@@ -228,10 +242,10 @@
-       struct crypt_priv *ctx = priv;
-       int i;
- 
--      ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
--
-       if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
-+              ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
-               cast6_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
-+              cast6_fpu_end_rt(ctx);
-               return;
-       }
- 
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/chacha20_glue.c 
linux-4.14/arch/x86/crypto/chacha20_glue.c
---- linux-4.14.orig/arch/x86/crypto/chacha20_glue.c    2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/chacha20_glue.c 2018-09-05 11:05:07.000000000 
+0200
-@@ -81,23 +81,24 @@
- 
-       crypto_chacha20_init(state, ctx, walk.iv);
- 
--      kernel_fpu_begin();
--
-       while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
-+              kernel_fpu_begin();
-+
-               chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
-                               rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
-+              kernel_fpu_end();
-               err = skcipher_walk_done(&walk,
-                                        walk.nbytes % CHACHA20_BLOCK_SIZE);
-       }
- 
-       if (walk.nbytes) {
-+              kernel_fpu_begin();
-               chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
-                               walk.nbytes);
-+              kernel_fpu_end();
-               err = skcipher_walk_done(&walk, 0);
-       }
- 
--      kernel_fpu_end();
--
-       return err;
- }
- 
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/crypto/glue_helper.c 
linux-4.14/arch/x86/crypto/glue_helper.c
---- linux-4.14.orig/arch/x86/crypto/glue_helper.c      2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/glue_helper.c   2018-09-05 11:05:07.000000000 
+0200
-@@ -40,7 +40,7 @@
-       void *ctx = crypto_blkcipher_ctx(desc->tfm);
-       const unsigned int bsize = 128 / 8;
-       unsigned int nbytes, i, func_bytes;
--      bool fpu_enabled = false;
-+      bool fpu_enabled;
-       int err;
- 
-       err = blkcipher_walk_virt(desc, walk);
-@@ -50,7 +50,7 @@
-               u8 *wdst = walk->dst.virt.addr;
- 
-               fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
--                                           desc, fpu_enabled, nbytes);
-+                                           desc, false, nbytes);
- 
-               for (i = 0; i < gctx->num_funcs; i++) {
-                       func_bytes = bsize * gctx->funcs[i].num_blocks;
-@@ -72,10 +72,10 @@
-               }
- 
- done:
-+              glue_fpu_end(fpu_enabled);
-               err = blkcipher_walk_done(desc, walk, nbytes);
-       }
- 
--      glue_fpu_end(fpu_enabled);
-       return err;
- }
- 
-@@ -192,7 +192,7 @@
-                           struct scatterlist *src, unsigned int nbytes)
- {
-       const unsigned int bsize = 128 / 8;
--      bool fpu_enabled = false;
-+      bool fpu_enabled;
-       struct blkcipher_walk walk;
-       int err;
- 
-@@ -201,12 +201,12 @@
- 
-       while ((nbytes = walk.nbytes)) {
-               fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
--                                           desc, fpu_enabled, nbytes);
-+                                           desc, false, nbytes);
-               nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
-+              glue_fpu_end(fpu_enabled);
-               err = blkcipher_walk_done(desc, &walk, nbytes);
-       }
- 
--      glue_fpu_end(fpu_enabled);
-       return err;
- }
- EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
-@@ -275,7 +275,7 @@
-                         struct scatterlist *src, unsigned int nbytes)
- {
-       const unsigned int bsize = 128 / 8;
--      bool fpu_enabled = false;
-+      bool fpu_enabled;
-       struct blkcipher_walk walk;
-       int err;
- 
-@@ -284,13 +284,12 @@
- 
-       while ((nbytes = walk.nbytes) >= bsize) {
-               fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
--                                           desc, fpu_enabled, nbytes);
-+                                           desc, false, nbytes);
-               nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
-+              glue_fpu_end(fpu_enabled);
-               err = blkcipher_walk_done(desc, &walk, nbytes);
-       }
- 
--      glue_fpu_end(fpu_enabled);
--
-       if (walk.nbytes) {
-               glue_ctr_crypt_final_128bit(
-                       gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
-@@ -380,7 +379,7 @@
-                         void *tweak_ctx, void *crypt_ctx)
- {
-       const unsigned int bsize = 128 / 8;
--      bool fpu_enabled = false;
-+      bool fpu_enabled;
-       struct blkcipher_walk walk;
-       int err;
- 
-@@ -393,21 +392,21 @@
- 
-       /* set minimum length to bsize, for tweak_fn */
-       fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
--                                   desc, fpu_enabled,
-+                                   desc, false,
-                                    nbytes < bsize ? bsize : nbytes);
--
-       /* calculate first value of T */
-       tweak_fn(tweak_ctx, walk.iv, walk.iv);
-+      glue_fpu_end(fpu_enabled);
- 
-       while (nbytes) {
-+              fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-+                              desc, false, nbytes);
-               nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
- 
-+              glue_fpu_end(fpu_enabled);
-               err = blkcipher_walk_done(desc, &walk, nbytes);
-               nbytes = walk.nbytes;
-       }
--
--      glue_fpu_end(fpu_enabled);
--
-       return err;
- }
- EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/x86/crypto/serpent_avx2_glue.c 
linux-4.14/arch/x86/crypto/serpent_avx2_glue.c
---- linux-4.14.orig/arch/x86/crypto/serpent_avx2_glue.c        2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/serpent_avx2_glue.c     2018-09-05 
11:05:07.000000000 +0200
-@@ -184,6 +184,21 @@
-       bool fpu_enabled;
- };
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void serpent_fpu_end_rt(struct crypt_priv *ctx)
-+{
-+       bool fpu_enabled = ctx->fpu_enabled;
-+
-+       if (!fpu_enabled)
-+               return;
-+       serpent_fpu_end(fpu_enabled);
-+       ctx->fpu_enabled = false;
-+}
-+
-+#else
-+static void serpent_fpu_end_rt(struct crypt_priv *ctx) { }
-+#endif
-+
- static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
- {
-       const unsigned int bsize = SERPENT_BLOCK_SIZE;
-@@ -199,10 +214,12 @@
-       }
- 
-       while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) {
-+              kernel_fpu_resched();
-               serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
-               srcdst += bsize * SERPENT_PARALLEL_BLOCKS;
-               nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
-       }
-+      serpent_fpu_end_rt(ctx);
- 
-       for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
-               __serpent_encrypt(ctx->ctx, srcdst, srcdst);
-@@ -223,10 +240,12 @@
-       }
- 
-       while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) {
-+              kernel_fpu_resched();
-               serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
-               srcdst += bsize * SERPENT_PARALLEL_BLOCKS;
-               nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
-       }
-+      serpent_fpu_end_rt(ctx);
- 
-       for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
-               __serpent_decrypt(ctx->ctx, srcdst, srcdst);
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/x86/crypto/serpent_avx_glue.c 
linux-4.14/arch/x86/crypto/serpent_avx_glue.c
---- linux-4.14.orig/arch/x86/crypto/serpent_avx_glue.c 2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/serpent_avx_glue.c      2018-09-05 
11:05:07.000000000 +0200
-@@ -218,16 +218,31 @@
-       bool fpu_enabled;
- };
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void serpent_fpu_end_rt(struct crypt_priv *ctx)
-+{
-+      bool fpu_enabled = ctx->fpu_enabled;
-+
-+      if (!fpu_enabled)
-+              return;
-+      serpent_fpu_end(fpu_enabled);
-+      ctx->fpu_enabled = false;
-+}
-+
-+#else
-+static void serpent_fpu_end_rt(struct crypt_priv *ctx) { }
-+#endif
-+
- static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
- {
-       const unsigned int bsize = SERPENT_BLOCK_SIZE;
-       struct crypt_priv *ctx = priv;
-       int i;
- 
--      ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
--
-       if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
-+              ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
-               serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
-+              serpent_fpu_end_rt(ctx);
-               return;
-       }
- 
-@@ -241,10 +256,10 @@
-       struct crypt_priv *ctx = priv;
-       int i;
- 
--      ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
--
-       if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
-+              ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
-               serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
-+              serpent_fpu_end_rt(ctx);
-               return;
-       }
- 
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/x86/crypto/serpent_sse2_glue.c 
linux-4.14/arch/x86/crypto/serpent_sse2_glue.c
---- linux-4.14.orig/arch/x86/crypto/serpent_sse2_glue.c        2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/serpent_sse2_glue.c     2018-09-05 
11:05:07.000000000 +0200
-@@ -187,16 +187,31 @@
-       bool fpu_enabled;
- };
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void serpent_fpu_end_rt(struct crypt_priv *ctx)
-+{
-+      bool fpu_enabled = ctx->fpu_enabled;
-+
-+      if (!fpu_enabled)
-+              return;
-+      serpent_fpu_end(fpu_enabled);
-+      ctx->fpu_enabled = false;
-+}
-+
-+#else
-+static void serpent_fpu_end_rt(struct crypt_priv *ctx) { }
-+#endif
-+
- static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
- {
-       const unsigned int bsize = SERPENT_BLOCK_SIZE;
-       struct crypt_priv *ctx = priv;
-       int i;
- 
--      ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
--
-       if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
-+              ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
-               serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
-+              serpent_fpu_end_rt(ctx);
-               return;
-       }
- 
-@@ -210,10 +225,10 @@
-       struct crypt_priv *ctx = priv;
-       int i;
- 
--      ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
--
-       if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
-+              ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
-               serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
-+              serpent_fpu_end_rt(ctx);
-               return;
-       }
- 
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/x86/crypto/twofish_avx_glue.c 
linux-4.14/arch/x86/crypto/twofish_avx_glue.c
---- linux-4.14.orig/arch/x86/crypto/twofish_avx_glue.c 2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/crypto/twofish_avx_glue.c      2018-09-05 
11:05:07.000000000 +0200
-@@ -218,6 +218,21 @@
-       bool fpu_enabled;
- };
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void twofish_fpu_end_rt(struct crypt_priv *ctx)
-+{
-+      bool fpu_enabled = ctx->fpu_enabled;
-+
-+      if (!fpu_enabled)
-+              return;
-+      twofish_fpu_end(fpu_enabled);
-+      ctx->fpu_enabled = false;
-+}
-+
-+#else
-+static void twofish_fpu_end_rt(struct crypt_priv *ctx) { }
-+#endif
-+
- static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
- {
-       const unsigned int bsize = TF_BLOCK_SIZE;
-@@ -228,12 +243,16 @@
- 
-       if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
-               twofish_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
-+              twofish_fpu_end_rt(ctx);
-               return;
-       }
- 
--      for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
-+      for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3) {
-+              kernel_fpu_resched();
-               twofish_enc_blk_3way(ctx->ctx, srcdst, srcdst);
-+      }
- 
-+      twofish_fpu_end_rt(ctx);
-       nbytes %= bsize * 3;
- 
-       for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
-@@ -250,11 +269,15 @@
- 
-       if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
-               twofish_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
-+              twofish_fpu_end_rt(ctx);
-               return;
-       }
- 
--      for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
-+      for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3) {
-+              kernel_fpu_resched();
-               twofish_dec_blk_3way(ctx->ctx, srcdst, srcdst);
-+      }
-+      twofish_fpu_end_rt(ctx);
- 
-       nbytes %= bsize * 3;
- 
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/entry/common.c 
linux-4.14/arch/x86/entry/common.c
---- linux-4.14.orig/arch/x86/entry/common.c    2018-09-05 11:03:20.000000000 
+0200
-+++ linux-4.14/arch/x86/entry/common.c 2018-09-05 11:05:07.000000000 +0200
-@@ -133,7 +133,7 @@
- 
- #define EXIT_TO_USERMODE_LOOP_FLAGS                           \
-       (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE |   \
--       _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
-+       _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
- 
- static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
- {
-@@ -148,9 +148,16 @@
-               /* We have work to do. */
-               local_irq_enable();
- 
--              if (cached_flags & _TIF_NEED_RESCHED)
-+              if (cached_flags & _TIF_NEED_RESCHED_MASK)
-                       schedule();
- 
-+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
-+              if (unlikely(current->forced_info.si_signo)) {
-+                      struct task_struct *t = current;
-+                      force_sig_info(t->forced_info.si_signo, 
&t->forced_info, t);
-+                      t->forced_info.si_signo = 0;
-+              }
-+#endif
-               if (cached_flags & _TIF_UPROBE)
-                       uprobe_notify_resume(regs);
- 
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/entry/entry_32.S 
linux-4.14/arch/x86/entry/entry_32.S
---- linux-4.14.orig/arch/x86/entry/entry_32.S  2018-09-05 11:03:20.000000000 
+0200
-+++ linux-4.14/arch/x86/entry/entry_32.S       2018-09-05 11:05:07.000000000 
+0200
-@@ -350,8 +350,25 @@
- ENTRY(resume_kernel)
-       DISABLE_INTERRUPTS(CLBR_ANY)
- .Lneed_resched:
-+      # preempt count == 0 + NEED_RS set?
-       cmpl    $0, PER_CPU_VAR(__preempt_count)
-+#ifndef CONFIG_PREEMPT_LAZY
-       jnz     restore_all
-+#else
-+      jz test_int_off
-+
-+      # atleast preempt count == 0 ?
-+      cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
-+      jne restore_all
-+
-+      movl    PER_CPU_VAR(current_task), %ebp
-+      cmpl    $0,TASK_TI_preempt_lazy_count(%ebp)     # non-zero 
preempt_lazy_count ?
-+      jnz     restore_all
-+
-+      testl   $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
-+      jz      restore_all
-+test_int_off:
-+#endif
-       testl   $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception 
path) ?
-       jz      restore_all
-       call    preempt_schedule_irq
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/entry/entry_64.S 
linux-4.14/arch/x86/entry/entry_64.S
---- linux-4.14.orig/arch/x86/entry/entry_64.S  2018-09-05 11:03:20.000000000 
+0200
-+++ linux-4.14/arch/x86/entry/entry_64.S       2018-09-05 11:05:07.000000000 
+0200
-@@ -633,7 +633,23 @@
-       bt      $9, EFLAGS(%rsp)                /* were interrupts off? */
-       jnc     1f
- 0:    cmpl    $0, PER_CPU_VAR(__preempt_count)
-+#ifndef CONFIG_PREEMPT_LAZY
-+      jnz     1f
-+#else
-+      jz      do_preempt_schedule_irq
-+
-+      # atleast preempt count == 0 ?
-+      cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
-+      jnz     1f
-+
-+      movq    PER_CPU_VAR(current_task), %rcx
-+      cmpl    $0, TASK_TI_preempt_lazy_count(%rcx)
-       jnz     1f
-+
-+      bt      $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx)
-+      jnc     1f
-+do_preempt_schedule_irq:
-+#endif
-       call    preempt_schedule_irq
-       jmp     0b
- 1:
-@@ -988,6 +1004,7 @@
-       jmp     2b
-       .previous
- 
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /* Call softirq on interrupt stack. Interrupts are off. */
- ENTRY(do_softirq_own_stack)
-       pushq   %rbp
-@@ -998,6 +1015,7 @@
-       leaveq
-       ret
- ENDPROC(do_softirq_own_stack)
-+#endif
- 
- #ifdef CONFIG_XEN
- idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/fpu/api.h 
linux-4.14/arch/x86/include/asm/fpu/api.h
---- linux-4.14.orig/arch/x86/include/asm/fpu/api.h     2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/include/asm/fpu/api.h  2018-09-05 11:05:07.000000000 
+0200
-@@ -25,6 +25,7 @@
- extern void __kernel_fpu_end(void);
- extern void kernel_fpu_begin(void);
- extern void kernel_fpu_end(void);
-+extern void kernel_fpu_resched(void);
- extern bool irq_fpu_usable(void);
- 
- /*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/preempt.h 
linux-4.14/arch/x86/include/asm/preempt.h
---- linux-4.14.orig/arch/x86/include/asm/preempt.h     2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/include/asm/preempt.h  2018-09-05 11:05:07.000000000 
+0200
-@@ -86,17 +86,46 @@
-  * a decrement which hits zero means we have no preempt_count and should
-  * reschedule.
-  */
--static __always_inline bool __preempt_count_dec_and_test(void)
-+static __always_inline bool ____preempt_count_dec_and_test(void)
- {
-       GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
- }
- 
-+static __always_inline bool __preempt_count_dec_and_test(void)
-+{
-+      if (____preempt_count_dec_and_test())
-+              return true;
-+#ifdef CONFIG_PREEMPT_LAZY
-+      if (current_thread_info()->preempt_lazy_count)
-+              return false;
-+      return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-+#else
-+      return false;
-+#endif
-+}
-+
- /*
-  * Returns true when we need to resched and can (barring IRQ state).
-  */
- static __always_inline bool should_resched(int preempt_offset)
- {
-+#ifdef CONFIG_PREEMPT_LAZY
-+      u32 tmp;
-+
-+      tmp = raw_cpu_read_4(__preempt_count);
-+      if (tmp == preempt_offset)
-+              return true;
-+
-+      /* preempt count == 0 ? */
-+      tmp &= ~PREEMPT_NEED_RESCHED;
-+      if (tmp)
-+              return false;
-+      if (current_thread_info()->preempt_lazy_count)
-+              return false;
-+      return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-+#else
-       return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
-+#endif
- }
- 
- #ifdef CONFIG_PREEMPT
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/include/asm/signal.h 
linux-4.14/arch/x86/include/asm/signal.h
---- linux-4.14.orig/arch/x86/include/asm/signal.h      2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/include/asm/signal.h   2018-09-05 11:05:07.000000000 
+0200
-@@ -28,6 +28,19 @@
- #define SA_IA32_ABI   0x02000000u
- #define SA_X32_ABI    0x01000000u
- 
-+/*
-+ * Because some traps use the IST stack, we must keep preemption
-+ * disabled while calling do_trap(), but do_trap() may call
-+ * force_sig_info() which will grab the signal spin_locks for the
-+ * task, which in PREEMPT_RT_FULL are mutexes.  By defining
-+ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
-+ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
-+ * trap.
-+ */
-+#if defined(CONFIG_PREEMPT_RT_FULL)
-+#define ARCH_RT_DELAYS_SIGNAL_SEND
-+#endif
-+
- #ifndef CONFIG_COMPAT
- typedef sigset_t compat_sigset_t;
- #endif
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/x86/include/asm/stackprotector.h 
linux-4.14/arch/x86/include/asm/stackprotector.h
---- linux-4.14.orig/arch/x86/include/asm/stackprotector.h      2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/include/asm/stackprotector.h   2018-09-05 
11:05:07.000000000 +0200
-@@ -60,7 +60,7 @@
-  */
- static __always_inline void boot_init_stack_canary(void)
- {
--      u64 canary;
-+      u64 uninitialized_var(canary);
-       u64 tsc;
- 
- #ifdef CONFIG_X86_64
-@@ -71,8 +71,14 @@
-        * of randomness. The TSC only matters for very early init,
-        * there it already has some randomness on most systems. Later
-        * on during the bootup the random pool has true entropy too.
-+       * For preempt-rt we need to weaken the randomness a bit, as
-+       * we can't call into the random generator from atomic context
-+       * due to locking constraints. We just leave canary
-+       * uninitialized and use the TSC based randomness on top of it.
-        */
-+#ifndef CONFIG_PREEMPT_RT_FULL
-       get_random_bytes(&canary, sizeof(canary));
-+#endif
-       tsc = rdtsc();
-       canary += tsc + (tsc << 32UL);
-       canary &= CANARY_MASK;
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/x86/include/asm/thread_info.h 
linux-4.14/arch/x86/include/asm/thread_info.h
---- linux-4.14.orig/arch/x86/include/asm/thread_info.h 2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/include/asm/thread_info.h      2018-09-05 
11:05:07.000000000 +0200
-@@ -56,11 +56,14 @@
- struct thread_info {
-       unsigned long           flags;          /* low level flags */
-       u32                     status;         /* thread synchronous flags */
-+      int                     preempt_lazy_count;     /* 0 => lazy preemptable
-+                                                        <0 => BUG */
- };
- 
- #define INIT_THREAD_INFO(tsk)                 \
- {                                             \
-       .flags          = 0,                    \
-+      .preempt_lazy_count = 0,                \
- }
- 
- #define init_stack            (init_thread_union.stack)
-@@ -69,6 +72,10 @@
- 
- #include <asm/asm-offsets.h>
- 
-+#define GET_THREAD_INFO(reg) \
-+      _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
-+      _ASM_SUB $(THREAD_SIZE),reg ;
-+
- #endif
- 
- /*
-@@ -85,6 +92,7 @@
- #define TIF_SYSCALL_EMU               6       /* syscall emulation active */
- #define TIF_SYSCALL_AUDIT     7       /* syscall auditing active */
- #define TIF_SECCOMP           8       /* secure computing */
-+#define TIF_NEED_RESCHED_LAZY 9       /* lazy rescheduling necessary */
- #define TIF_USER_RETURN_NOTIFY        11      /* notify kernel of userspace 
return */
- #define TIF_UPROBE            12      /* breakpointed or singlestepping */
- #define TIF_PATCH_PENDING     13      /* pending live patching update */
-@@ -112,6 +120,7 @@
- #define _TIF_SYSCALL_EMU      (1 << TIF_SYSCALL_EMU)
- #define _TIF_SYSCALL_AUDIT    (1 << TIF_SYSCALL_AUDIT)
- #define _TIF_SECCOMP          (1 << TIF_SECCOMP)
-+#define _TIF_NEED_RESCHED_LAZY        (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_USER_RETURN_NOTIFY       (1 << TIF_USER_RETURN_NOTIFY)
- #define _TIF_UPROBE           (1 << TIF_UPROBE)
- #define _TIF_PATCH_PENDING    (1 << TIF_PATCH_PENDING)
-@@ -153,6 +162,8 @@
- #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
- #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
- 
-+#define _TIF_NEED_RESCHED_MASK        (_TIF_NEED_RESCHED | 
_TIF_NEED_RESCHED_LAZY)
-+
- #define STACK_WARN            (THREAD_SIZE/8)
- 
- /*
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/x86/include/asm/uv/uv_bau.h 
linux-4.14/arch/x86/include/asm/uv/uv_bau.h
---- linux-4.14.orig/arch/x86/include/asm/uv/uv_bau.h   2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/include/asm/uv/uv_bau.h        2018-09-05 
11:05:07.000000000 +0200
-@@ -643,9 +643,9 @@
-       cycles_t                send_message;
-       cycles_t                period_end;
-       cycles_t                period_time;
--      spinlock_t              uvhub_lock;
--      spinlock_t              queue_lock;
--      spinlock_t              disable_lock;
-+      raw_spinlock_t          uvhub_lock;
-+      raw_spinlock_t          queue_lock;
-+      raw_spinlock_t          disable_lock;
-       /* tunables */
-       int                     max_concurr;
-       int                     max_concurr_const;
-@@ -847,15 +847,15 @@
-  * to be lowered below the current 'v'.  atomic_add_unless can only stop
-  * on equal.
-  */
--static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
-+static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int 
u)
- {
--      spin_lock(lock);
-+      raw_spin_lock(lock);
-       if (atomic_read(v) >= u) {
--              spin_unlock(lock);
-+              raw_spin_unlock(lock);
-               return 0;
-       }
-       atomic_inc(v);
--      spin_unlock(lock);
-+      raw_spin_unlock(lock);
-       return 1;
- }
- 
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/Kconfig 
linux-4.14/arch/x86/Kconfig
---- linux-4.14.orig/arch/x86/Kconfig   2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/Kconfig        2018-09-05 11:05:07.000000000 +0200
-@@ -169,6 +169,7 @@
-       select HAVE_HARDLOCKUP_DETECTOR_PERF    if PERF_EVENTS && 
HAVE_PERF_EVENTS_NMI
-       select HAVE_PERF_REGS
-       select HAVE_PERF_USER_STACK_DUMP
-+      select HAVE_PREEMPT_LAZY
-       select HAVE_RCU_TABLE_FREE
-       select HAVE_REGS_AND_STACK_ACCESS_API
-       select HAVE_RELIABLE_STACKTRACE         if X86_64 && 
UNWINDER_FRAME_POINTER && STACK_VALIDATION
-@@ -256,8 +257,11 @@
-       def_bool y
-       depends on ISA_DMA_API
- 
-+config RWSEM_GENERIC_SPINLOCK
-+      def_bool PREEMPT_RT_FULL
-+
- config RWSEM_XCHGADD_ALGORITHM
--      def_bool y
-+      def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
- 
- config GENERIC_CALIBRATE_DELAY
-       def_bool y
-@@ -932,7 +936,7 @@
- config MAXSMP
-       bool "Enable Maximum number of SMP Processors and NUMA Nodes"
-       depends on X86_64 && SMP && DEBUG_KERNEL
--      select CPUMASK_OFFSTACK
-+      select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
-       ---help---
-         Enable maximum number of CPUS and NUMA Nodes for this architecture.
-         If unsure, say N.
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/apic/io_apic.c 
linux-4.14/arch/x86/kernel/apic/io_apic.c
---- linux-4.14.orig/arch/x86/kernel/apic/io_apic.c     2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/kernel/apic/io_apic.c  2018-09-05 11:05:07.000000000 
+0200
-@@ -1691,7 +1691,8 @@
- static inline bool ioapic_irqd_mask(struct irq_data *data)
- {
-       /* If we are moving the irq we need to mask it */
--      if (unlikely(irqd_is_setaffinity_pending(data))) {
-+      if (unlikely(irqd_is_setaffinity_pending(data) &&
-+                   !irqd_irq_inprogress(data))) {
-               mask_ioapic_irq(data);
-               return true;
-       }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/asm-offsets.c 
linux-4.14/arch/x86/kernel/asm-offsets.c
---- linux-4.14.orig/arch/x86/kernel/asm-offsets.c      2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/kernel/asm-offsets.c   2018-09-05 11:05:07.000000000 
+0200
-@@ -38,6 +38,7 @@
- 
-       BLANK();
-       OFFSET(TASK_TI_flags, task_struct, thread_info.flags);
-+      OFFSET(TASK_TI_preempt_lazy_count, task_struct, 
thread_info.preempt_lazy_count);
-       OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
- 
-       BLANK();
-@@ -94,6 +95,7 @@
- 
-       BLANK();
-       DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
-+      DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
- 
-       /* TLB state for the entry code */
-       OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask);
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/x86/kernel/cpu/mcheck/dev-mcelog.c 
linux-4.14/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
---- linux-4.14.orig/arch/x86/kernel/cpu/mcheck/dev-mcelog.c    2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/kernel/cpu/mcheck/dev-mcelog.c 2018-09-05 
11:05:07.000000000 +0200
-@@ -14,6 +14,7 @@
- #include <linux/slab.h>
- #include <linux/kmod.h>
- #include <linux/poll.h>
-+#include <linux/swork.h>
- 
- #include "mce-internal.h"
- 
-@@ -86,13 +87,43 @@
- 
- static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
- 
--
--void mce_work_trigger(void)
-+static void __mce_work_trigger(struct swork_event *event)
- {
-       if (mce_helper[0])
-               schedule_work(&mce_trigger_work);
- }
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static bool notify_work_ready __read_mostly;
-+static struct swork_event notify_work;
-+
-+static int mce_notify_work_init(void)
-+{
-+      int err;
-+
-+      err = swork_get();
-+      if (err)
-+              return err;
-+
-+      INIT_SWORK(&notify_work, __mce_work_trigger);
-+      notify_work_ready = true;
-+      return 0;
-+}
-+
-+void mce_work_trigger(void)
-+{
-+      if (notify_work_ready)
-+              swork_queue(&notify_work);
-+}
-+
-+#else
-+void mce_work_trigger(void)
-+{
-+      __mce_work_trigger(NULL);
-+}
-+static inline int mce_notify_work_init(void) { return 0; }
-+#endif
-+
- static ssize_t
- show_trigger(struct device *s, struct device_attribute *attr, char *buf)
- {
-@@ -356,7 +387,7 @@
- 
-               return err;
-       }
--
-+      mce_notify_work_init();
-       mce_register_decode_chain(&dev_mcelog_nb);
-       return 0;
- }
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/x86/kernel/cpu/mcheck/mce.c 
linux-4.14/arch/x86/kernel/cpu/mcheck/mce.c
---- linux-4.14.orig/arch/x86/kernel/cpu/mcheck/mce.c   2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/kernel/cpu/mcheck/mce.c        2018-09-05 
11:05:07.000000000 +0200
-@@ -42,6 +42,7 @@
- #include <linux/debugfs.h>
- #include <linux/irq_work.h>
- #include <linux/export.h>
-+#include <linux/jiffies.h>
- #include <linux/jump_label.h>
- 
- #include <asm/intel-family.h>
-@@ -1365,7 +1366,7 @@
- static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
- 
- static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
--static DEFINE_PER_CPU(struct timer_list, mce_timer);
-+static DEFINE_PER_CPU(struct hrtimer, mce_timer);
- 
- static unsigned long mce_adjust_timer_default(unsigned long interval)
- {
-@@ -1374,27 +1375,19 @@
- 
- static unsigned long (*mce_adjust_timer)(unsigned long interval) = 
mce_adjust_timer_default;
- 
--static void __start_timer(struct timer_list *t, unsigned long interval)
-+static void __start_timer(struct hrtimer *t, unsigned long iv)
- {
--      unsigned long when = jiffies + interval;
--      unsigned long flags;
--
--      local_irq_save(flags);
--
--      if (!timer_pending(t) || time_before(when, t->expires))
--              mod_timer(t, round_jiffies(when));
-+      if (!iv)
-+              return;
- 
--      local_irq_restore(flags);
-+      hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
-+                             0, HRTIMER_MODE_REL_PINNED);
- }
- 
--static void mce_timer_fn(unsigned long data)
-+static  enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
- {
--      struct timer_list *t = this_cpu_ptr(&mce_timer);
--      int cpu = smp_processor_id();
-       unsigned long iv;
- 
--      WARN_ON(cpu != data);
--
-       iv = __this_cpu_read(mce_next_interval);
- 
-       if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1417,7 +1410,11 @@
- 
- done:
-       __this_cpu_write(mce_next_interval, iv);
--      __start_timer(t, iv);
-+      if (!iv)
-+              return HRTIMER_NORESTART;
-+
-+      hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(iv)));
-+      return HRTIMER_RESTART;
- }
- 
- /*
-@@ -1425,7 +1422,7 @@
-  */
- void mce_timer_kick(unsigned long interval)
- {
--      struct timer_list *t = this_cpu_ptr(&mce_timer);
-+      struct hrtimer *t = this_cpu_ptr(&mce_timer);
-       unsigned long iv = __this_cpu_read(mce_next_interval);
- 
-       __start_timer(t, interval);
-@@ -1440,7 +1437,7 @@
-       int cpu;
- 
-       for_each_online_cpu(cpu)
--              del_timer_sync(&per_cpu(mce_timer, cpu));
-+              hrtimer_cancel(&per_cpu(mce_timer, cpu));
- }
- 
- /*
-@@ -1769,7 +1766,7 @@
-       }
- }
- 
--static void mce_start_timer(struct timer_list *t)
-+static void mce_start_timer(struct hrtimer *t)
- {
-       unsigned long iv = check_interval * HZ;
- 
-@@ -1782,18 +1779,19 @@
- 
- static void __mcheck_cpu_setup_timer(void)
- {
--      struct timer_list *t = this_cpu_ptr(&mce_timer);
--      unsigned int cpu = smp_processor_id();
-+      struct hrtimer *t = this_cpu_ptr(&mce_timer);
- 
--      setup_pinned_timer(t, mce_timer_fn, cpu);
-+      hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+      t->function = mce_timer_fn;
- }
- 
- static void __mcheck_cpu_init_timer(void)
- {
--      struct timer_list *t = this_cpu_ptr(&mce_timer);
--      unsigned int cpu = smp_processor_id();
-+      struct hrtimer *t = this_cpu_ptr(&mce_timer);
-+
-+      hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+      t->function = mce_timer_fn;
- 
--      setup_pinned_timer(t, mce_timer_fn, cpu);
-       mce_start_timer(t);
- }
- 
-@@ -2309,7 +2307,7 @@
- 
- static int mce_cpu_online(unsigned int cpu)
- {
--      struct timer_list *t = this_cpu_ptr(&mce_timer);
-+      struct hrtimer *t = this_cpu_ptr(&mce_timer);
-       int ret;
- 
-       mce_device_create(cpu);
-@@ -2326,10 +2324,10 @@
- 
- static int mce_cpu_pre_down(unsigned int cpu)
- {
--      struct timer_list *t = this_cpu_ptr(&mce_timer);
-+      struct hrtimer *t = this_cpu_ptr(&mce_timer);
- 
-       mce_disable_cpu();
--      del_timer_sync(t);
-+      hrtimer_cancel(t);
-       mce_threshold_remove_device(cpu);
-       mce_device_remove(cpu);
-       return 0;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/fpu/core.c 
linux-4.14/arch/x86/kernel/fpu/core.c
---- linux-4.14.orig/arch/x86/kernel/fpu/core.c 2018-09-05 11:03:20.000000000 
+0200
-+++ linux-4.14/arch/x86/kernel/fpu/core.c      2018-09-05 11:05:07.000000000 
+0200
-@@ -138,6 +138,18 @@
- }
- EXPORT_SYMBOL_GPL(kernel_fpu_end);
- 
-+void kernel_fpu_resched(void)
-+{
-+      WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
-+
-+      if (should_resched(PREEMPT_OFFSET)) {
-+              kernel_fpu_end();
-+              cond_resched();
-+              kernel_fpu_begin();
-+      }
-+}
-+EXPORT_SYMBOL_GPL(kernel_fpu_resched);
-+
- /*
-  * Save the FPU state (mark it for reload if necessary):
-  *
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/irq_32.c 
linux-4.14/arch/x86/kernel/irq_32.c
---- linux-4.14.orig/arch/x86/kernel/irq_32.c   2018-09-05 11:03:20.000000000 
+0200
-+++ linux-4.14/arch/x86/kernel/irq_32.c        2018-09-05 11:05:07.000000000 
+0200
-@@ -130,6 +130,7 @@
-              cpu, per_cpu(hardirq_stack, cpu),  per_cpu(softirq_stack, cpu));
- }
- 
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
-       struct irq_stack *irqstk;
-@@ -146,6 +147,7 @@
- 
-       call_on_stack(__do_softirq, isp);
- }
-+#endif
- 
- bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
- {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kernel/process_32.c 
linux-4.14/arch/x86/kernel/process_32.c
---- linux-4.14.orig/arch/x86/kernel/process_32.c       2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/kernel/process_32.c    2018-09-05 11:05:07.000000000 
+0200
-@@ -38,6 +38,7 @@
- #include <linux/io.h>
- #include <linux/kdebug.h>
- #include <linux/syscalls.h>
-+#include <linux/highmem.h>
- 
- #include <asm/pgtable.h>
- #include <asm/ldt.h>
-@@ -198,6 +199,35 @@
- }
- EXPORT_SYMBOL_GPL(start_thread);
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void switch_kmaps(struct task_struct *prev_p, struct task_struct 
*next_p)
-+{
-+      int i;
-+
-+      /*
-+       * Clear @prev's kmap_atomic mappings
-+       */
-+      for (i = 0; i < prev_p->kmap_idx; i++) {
-+              int idx = i + KM_TYPE_NR * smp_processor_id();
-+              pte_t *ptep = kmap_pte - idx;
-+
-+              kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
-+      }
-+      /*
-+       * Restore @next_p's kmap_atomic mappings
-+       */
-+      for (i = 0; i < next_p->kmap_idx; i++) {
-+              int idx = i + KM_TYPE_NR * smp_processor_id();
-+
-+              if (!pte_none(next_p->kmap_pte[i]))
-+                      set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
-+      }
-+}
-+#else
-+static inline void
-+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
-+#endif
-+
- 
- /*
-  *    switch_to(x,y) should switch tasks from x to y.
-@@ -273,6 +303,8 @@
-                    task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
-               __switch_to_xtra(prev_p, next_p, tss);
- 
-+      switch_kmaps(prev_p, next_p);
-+
-       /*
-        * Leave lazy mode, flushing any hypercalls made here.
-        * This must be done before restoring TLS segments so
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kvm/lapic.c 
linux-4.14/arch/x86/kvm/lapic.c
---- linux-4.14.orig/arch/x86/kvm/lapic.c       2018-09-05 11:03:20.000000000 
+0200
-+++ linux-4.14/arch/x86/kvm/lapic.c    2018-09-05 11:05:07.000000000 +0200
-@@ -2120,7 +2120,7 @@
-       apic->vcpu = vcpu;
- 
-       hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
--                   HRTIMER_MODE_ABS_PINNED);
-+                   HRTIMER_MODE_ABS_PINNED_HARD);
-       apic->lapic_timer.timer.function = apic_timer_fn;
- 
-       /*
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/kvm/x86.c 
linux-4.14/arch/x86/kvm/x86.c
---- linux-4.14.orig/arch/x86/kvm/x86.c 2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/kvm/x86.c      2018-09-05 11:05:07.000000000 +0200
-@@ -6285,6 +6285,13 @@
-               goto out;
-       }
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+      if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
-+              printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
-+              return -EOPNOTSUPP;
-+      }
-+#endif
-+
-       r = kvm_mmu_module_init();
-       if (r)
-               goto out_free_percpu;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/mm/highmem_32.c 
linux-4.14/arch/x86/mm/highmem_32.c
---- linux-4.14.orig/arch/x86/mm/highmem_32.c   2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/arch/x86/mm/highmem_32.c        2018-09-05 11:05:07.000000000 
+0200
-@@ -32,10 +32,11 @@
-  */
- void *kmap_atomic_prot(struct page *page, pgprot_t prot)
- {
-+      pte_t pte = mk_pte(page, prot);
-       unsigned long vaddr;
-       int idx, type;
- 
--      preempt_disable();
-+      preempt_disable_nort();
-       pagefault_disable();
- 
-       if (!PageHighMem(page))
-@@ -45,7 +46,10 @@
-       idx = type + KM_TYPE_NR*smp_processor_id();
-       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-       BUG_ON(!pte_none(*(kmap_pte-idx)));
--      set_pte(kmap_pte-idx, mk_pte(page, prot));
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+      current->kmap_pte[type] = pte;
-+#endif
-+      set_pte(kmap_pte-idx, pte);
-       arch_flush_lazy_mmu_mode();
- 
-       return (void *)vaddr;
-@@ -88,6 +92,9 @@
-                * is a bad idea also, in case the page changes cacheability
-                * attributes or becomes a protected page in a hypervisor.
-                */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+              current->kmap_pte[type] = __pte(0);
-+#endif
-               kpte_clear_flush(kmap_pte-idx, vaddr);
-               kmap_atomic_idx_pop();
-               arch_flush_lazy_mmu_mode();
-@@ -100,7 +107,7 @@
- #endif
- 
-       pagefault_enable();
--      preempt_enable();
-+      preempt_enable_nort();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
- 
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/mm/iomap_32.c 
linux-4.14/arch/x86/mm/iomap_32.c
---- linux-4.14.orig/arch/x86/mm/iomap_32.c     2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/arch/x86/mm/iomap_32.c  2018-09-05 11:05:07.000000000 +0200
-@@ -56,6 +56,7 @@
- 
- void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
- {
-+      pte_t pte = pfn_pte(pfn, prot);
-       unsigned long vaddr;
-       int idx, type;
- 
-@@ -65,7 +66,12 @@
-       type = kmap_atomic_idx_push();
-       idx = type + KM_TYPE_NR * smp_processor_id();
-       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
--      set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
-+      WARN_ON(!pte_none(*(kmap_pte - idx)));
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+      current->kmap_pte[type] = pte;
-+#endif
-+      set_pte(kmap_pte - idx, pte);
-       arch_flush_lazy_mmu_mode();
- 
-       return (void *)vaddr;
-@@ -113,6 +119,9 @@
-                * is a bad idea also, in case the page changes cacheability
-                * attributes or becomes a protected page in a hypervisor.
-                */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+              current->kmap_pte[type] = __pte(0);
-+#endif
-               kpte_clear_flush(kmap_pte-idx, vaddr);
-               kmap_atomic_idx_pop();
-       }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/platform/uv/tlb_uv.c 
linux-4.14/arch/x86/platform/uv/tlb_uv.c
---- linux-4.14.orig/arch/x86/platform/uv/tlb_uv.c      2018-09-05 
11:03:20.000000000 +0200
-+++ linux-4.14/arch/x86/platform/uv/tlb_uv.c   2018-09-05 11:05:07.000000000 
+0200
-@@ -740,9 +740,9 @@
- 
-               quiesce_local_uvhub(hmaster);
- 
--              spin_lock(&hmaster->queue_lock);
-+              raw_spin_lock(&hmaster->queue_lock);
-               reset_with_ipi(&bau_desc->distribution, bcp);
--              spin_unlock(&hmaster->queue_lock);
-+              raw_spin_unlock(&hmaster->queue_lock);
- 
-               end_uvhub_quiesce(hmaster);
- 
-@@ -762,9 +762,9 @@
- 
-               quiesce_local_uvhub(hmaster);
- 
--              spin_lock(&hmaster->queue_lock);
-+              raw_spin_lock(&hmaster->queue_lock);
-               reset_with_ipi(&bau_desc->distribution, bcp);
--              spin_unlock(&hmaster->queue_lock);
-+              raw_spin_unlock(&hmaster->queue_lock);
- 
-               end_uvhub_quiesce(hmaster);
- 
-@@ -785,7 +785,7 @@
-       cycles_t tm1;
- 
-       hmaster = bcp->uvhub_master;
--      spin_lock(&hmaster->disable_lock);
-+      raw_spin_lock(&hmaster->disable_lock);
-       if (!bcp->baudisabled) {
-               stat->s_bau_disabled++;
-               tm1 = get_cycles();
-@@ -798,7 +798,7 @@
-                       }
-               }
-       }
--      spin_unlock(&hmaster->disable_lock);
-+      raw_spin_unlock(&hmaster->disable_lock);
- }
- 
- static void count_max_concurr(int stat, struct bau_control *bcp,
-@@ -861,7 +861,7 @@
-  */
- static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
- {
--      spinlock_t *lock = &hmaster->uvhub_lock;
-+      raw_spinlock_t *lock = &hmaster->uvhub_lock;
-       atomic_t *v;
- 
-       v = &hmaster->active_descriptor_count;
-@@ -995,7 +995,7 @@
-       struct bau_control *hmaster;
- 
-       hmaster = bcp->uvhub_master;
--      spin_lock(&hmaster->disable_lock);
-+      raw_spin_lock(&hmaster->disable_lock);
-       if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
-               stat->s_bau_reenabled++;
-               for_each_present_cpu(tcpu) {
-@@ -1007,10 +1007,10 @@
-                               tbcp->period_giveups = 0;
-                       }
-               }
--              spin_unlock(&hmaster->disable_lock);
-+              raw_spin_unlock(&hmaster->disable_lock);
-               return 0;
-       }
--      spin_unlock(&hmaster->disable_lock);
-+      raw_spin_unlock(&hmaster->disable_lock);
-       return -1;
- }
- 
-@@ -1942,9 +1942,9 @@
-               bcp->cong_reps                  = congested_reps;
-               bcp->disabled_period            = sec_2_cycles(disabled_period);
-               bcp->giveup_limit               = giveup_limit;
--              spin_lock_init(&bcp->queue_lock);
--              spin_lock_init(&bcp->uvhub_lock);
--              spin_lock_init(&bcp->disable_lock);
-+              raw_spin_lock_init(&bcp->queue_lock);
-+              raw_spin_lock_init(&bcp->uvhub_lock);
-+              raw_spin_lock_init(&bcp->disable_lock);
-       }
- }
- 
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/arch/x86/platform/uv/uv_time.c 
linux-4.14/arch/x86/platform/uv/uv_time.c
---- linux-4.14.orig/arch/x86/platform/uv/uv_time.c     2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/x86/platform/uv/uv_time.c  2018-09-05 11:05:07.000000000 
+0200
-@@ -57,7 +57,7 @@
- 
- /* There is one of these allocated per node */
- struct uv_rtc_timer_head {
--      spinlock_t      lock;
-+      raw_spinlock_t  lock;
-       /* next cpu waiting for timer, local node relative: */
-       int             next_cpu;
-       /* number of cpus on this node: */
-@@ -177,7 +177,7 @@
-                               uv_rtc_deallocate_timers();
-                               return -ENOMEM;
-                       }
--                      spin_lock_init(&head->lock);
-+                      raw_spin_lock_init(&head->lock);
-                       head->ncpus = uv_blade_nr_possible_cpus(bid);
-                       head->next_cpu = -1;
-                       blade_info[bid] = head;
-@@ -231,7 +231,7 @@
-       unsigned long flags;
-       int next_cpu;
- 
--      spin_lock_irqsave(&head->lock, flags);
-+      raw_spin_lock_irqsave(&head->lock, flags);
- 
-       next_cpu = head->next_cpu;
-       *t = expires;
-@@ -243,12 +243,12 @@
-               if (uv_setup_intr(cpu, expires)) {
-                       *t = ULLONG_MAX;
-                       uv_rtc_find_next_timer(head, pnode);
--                      spin_unlock_irqrestore(&head->lock, flags);
-+                      raw_spin_unlock_irqrestore(&head->lock, flags);
-                       return -ETIME;
-               }
-       }
- 
--      spin_unlock_irqrestore(&head->lock, flags);
-+      raw_spin_unlock_irqrestore(&head->lock, flags);
-       return 0;
- }
- 
-@@ -267,7 +267,7 @@
-       unsigned long flags;
-       int rc = 0;
- 
--      spin_lock_irqsave(&head->lock, flags);
-+      raw_spin_lock_irqsave(&head->lock, flags);
- 
-       if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
-               rc = 1;
-@@ -279,7 +279,7 @@
-                       uv_rtc_find_next_timer(head, pnode);
-       }
- 
--      spin_unlock_irqrestore(&head->lock, flags);
-+      raw_spin_unlock_irqrestore(&head->lock, flags);
- 
-       return rc;
- }
-@@ -299,13 +299,17 @@
- static u64 uv_read_rtc(struct clocksource *cs)
- {
-       unsigned long offset;
-+      u64 cycles;
- 
-+      preempt_disable();
-       if (uv_get_min_hub_revision_id() == 1)
-               offset = 0;
-       else
-               offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
- 
--      return (u64)uv_read_local_mmr(UVH_RTC | offset);
-+      cycles = (u64)uv_read_local_mmr(UVH_RTC | offset);
-+      preempt_enable();
-+      return cycles;
- }
- 
- /*
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/arch/xtensa/include/asm/spinlock_types.h 
linux-4.14/arch/xtensa/include/asm/spinlock_types.h
---- linux-4.14.orig/arch/xtensa/include/asm/spinlock_types.h   2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/arch/xtensa/include/asm/spinlock_types.h        2018-09-05 
11:05:07.000000000 +0200
-@@ -2,10 +2,6 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
- 
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
-       volatile unsigned int slock;
- } arch_spinlock_t;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/block/blk-core.c 
linux-4.14/block/blk-core.c
---- linux-4.14.orig/block/blk-core.c   2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/block/blk-core.c        2018-09-05 11:05:07.000000000 +0200
-@@ -116,6 +116,9 @@
- 
-       INIT_LIST_HEAD(&rq->queuelist);
-       INIT_LIST_HEAD(&rq->timeout_list);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+      INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
-+#endif
-       rq->cpu = -1;
-       rq->q = q;
-       rq->__sector = (sector_t) -1;
-@@ -280,7 +283,7 @@
- void blk_start_queue(struct request_queue *q)
- {
-       lockdep_assert_held(q->queue_lock);
--      WARN_ON(!in_interrupt() && !irqs_disabled());
-+      WARN_ON_NONRT(!in_interrupt() && !irqs_disabled());
-       WARN_ON_ONCE(q->mq_ops);
- 
-       queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-@@ -808,12 +811,21 @@
-       percpu_ref_put(&q->q_usage_counter);
- }
- 
-+static void blk_queue_usage_counter_release_swork(struct swork_event *sev)
-+{
-+      struct request_queue *q =
-+              container_of(sev, struct request_queue, mq_pcpu_wake);
-+
-+      wake_up_all(&q->mq_freeze_wq);
-+}
-+
- static void blk_queue_usage_counter_release(struct percpu_ref *ref)
- {
-       struct request_queue *q =
-               container_of(ref, struct request_queue, q_usage_counter);
- 
--      wake_up_all(&q->mq_freeze_wq);
-+      if (wq_has_sleeper(&q->mq_freeze_wq))
-+              swork_queue(&q->mq_pcpu_wake);
- }
- 
- static void blk_rq_timed_out_timer(unsigned long data)
-@@ -890,6 +902,7 @@
-       __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
- 
-       init_waitqueue_head(&q->mq_freeze_wq);
-+      INIT_SWORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_swork);
- 
-       /*
-        * Init percpu_ref in atomic mode so that it's faster to shutdown.
-@@ -3308,7 +3321,7 @@
-               blk_run_queue_async(q);
-       else
-               __blk_run_queue(q);
--      spin_unlock(q->queue_lock);
-+      spin_unlock_irq(q->queue_lock);
- }
- 
- static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3356,7 +3369,6 @@
- void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
- {
-       struct request_queue *q;
--      unsigned long flags;
-       struct request *rq;
-       LIST_HEAD(list);
-       unsigned int depth;
-@@ -3376,11 +3388,6 @@
-       q = NULL;
-       depth = 0;
- 
--      /*
--       * Save and disable interrupts here, to avoid doing it for every
--       * queue lock we have to take.
--       */
--      local_irq_save(flags);
-       while (!list_empty(&list)) {
-               rq = list_entry_rq(list.next);
-               list_del_init(&rq->queuelist);
-@@ -3393,7 +3400,7 @@
-                               queue_unplugged(q, depth, from_schedule);
-                       q = rq->q;
-                       depth = 0;
--                      spin_lock(q->queue_lock);
-+                      spin_lock_irq(q->queue_lock);
-               }
- 
-               /*
-@@ -3420,8 +3427,6 @@
-        */
-       if (q)
-               queue_unplugged(q, depth, from_schedule);
--
--      local_irq_restore(flags);
- }
- 
- void blk_finish_plug(struct blk_plug *plug)
-@@ -3631,6 +3636,8 @@
-       if (!kblockd_workqueue)
-               panic("Failed to create kblockd\n");
- 
-+      BUG_ON(swork_get());
-+
-       request_cachep = kmem_cache_create("blkdev_requests",
-                       sizeof(struct request), 0, SLAB_PANIC, NULL);
- 
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/block/blk-ioc.c 
linux-4.14/block/blk-ioc.c
---- linux-4.14.orig/block/blk-ioc.c    2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/block/blk-ioc.c 2018-09-05 11:05:07.000000000 +0200
-@@ -9,6 +9,7 @@
- #include <linux/blkdev.h>
- #include <linux/slab.h>
- #include <linux/sched/task.h>
-+#include <linux/delay.h>
- 
- #include "blk.h"
- 
-@@ -118,7 +119,7 @@
-                       spin_unlock(q->queue_lock);
-               } else {
-                       spin_unlock_irqrestore(&ioc->lock, flags);
--                      cpu_relax();
-+                      cpu_chill();
-                       spin_lock_irqsave_nested(&ioc->lock, flags, 1);
-               }
-       }
-@@ -202,7 +203,7 @@
-                               spin_unlock(icq->q->queue_lock);
-                       } else {
-                               spin_unlock_irqrestore(&ioc->lock, flags);
--                              cpu_relax();
-+                              cpu_chill();
-                               goto retry;
-                       }
-               }
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/block/blk-mq.c 
linux-4.14/block/blk-mq.c
---- linux-4.14.orig/block/blk-mq.c     2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/block/blk-mq.c  2018-09-05 11:05:07.000000000 +0200
-@@ -339,6 +339,9 @@
-       /* tag was already set */
-       rq->extra_len = 0;
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+      INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
-+#endif
-       INIT_LIST_HEAD(&rq->timeout_list);
-       rq->timeout = 0;
- 
-@@ -533,12 +536,24 @@
- }
- EXPORT_SYMBOL(blk_mq_end_request);
- 
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+
-+void __blk_mq_complete_request_remote_work(struct work_struct *work)
-+{
-+      struct request *rq = container_of(work, struct request, work);
-+
-+      rq->q->softirq_done_fn(rq);
-+}
-+
-+#else
-+
- static void __blk_mq_complete_request_remote(void *data)
- {
-       struct request *rq = data;
- 
-       rq->q->softirq_done_fn(rq);
- }
-+#endif
- 
- static void __blk_mq_complete_request(struct request *rq)
- {
-@@ -558,19 +573,27 @@
-               return;
-       }
- 
--      cpu = get_cpu();
-+      cpu = get_cpu_light();
-       if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
-               shared = cpus_share_cache(cpu, ctx->cpu);
- 
-       if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+              /*
-+               * We could force QUEUE_FLAG_SAME_FORCE then we would not get in
-+               * here. But we could try to invoke it one the CPU like this.
-+               */
-+              schedule_work_on(ctx->cpu, &rq->work);
-+#else
-               rq->csd.func = __blk_mq_complete_request_remote;
-               rq->csd.info = rq;
-               rq->csd.flags = 0;
-               smp_call_function_single_async(ctx->cpu, &rq->csd);
-+#endif
-       } else {
-               rq->q->softirq_done_fn(rq);
-       }
--      put_cpu();
-+      put_cpu_light();
- }
- 
- /**
-@@ -1238,14 +1261,14 @@
-               return;
- 
-       if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
--              int cpu = get_cpu();
-+              int cpu = get_cpu_light();
-               if (cpumask_test_cpu(cpu, hctx->cpumask)) {
-                       __blk_mq_run_hw_queue(hctx);
--                      put_cpu();
-+                      put_cpu_light();
-                       return;
-               }
- 
--              put_cpu();
-+              put_cpu_light();
-       }
- 
-       kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
-@@ -2863,10 +2886,9 @@
-       kt = nsecs;
- 
-       mode = HRTIMER_MODE_REL;
--      hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
-+      hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode, current);
-       hrtimer_set_expires(&hs.timer, kt);
- 
--      hrtimer_init_sleeper(&hs, current);
-       do {
-               if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
-                       break;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/block/blk-mq.h 
linux-4.14/block/blk-mq.h
---- linux-4.14.orig/block/blk-mq.h     2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/block/blk-mq.h  2018-09-05 11:05:07.000000000 +0200
-@@ -98,12 +98,12 @@
-  */
- static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
- {
--      return __blk_mq_get_ctx(q, get_cpu());
-+      return __blk_mq_get_ctx(q, get_cpu_light());
- }
- 
- static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
- {
--      put_cpu();
-+      put_cpu_light();
- }
- 
- struct blk_mq_alloc_data {
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/block/blk-softirq.c 
linux-4.14/block/blk-softirq.c
---- linux-4.14.orig/block/blk-softirq.c        2017-11-12 19:46:13.000000000 
+0100
-+++ linux-4.14/block/blk-softirq.c     2018-09-05 11:05:07.000000000 +0200
-@@ -53,6 +53,7 @@
-               raise_softirq_irqoff(BLOCK_SOFTIRQ);
- 
-       local_irq_restore(flags);
-+      preempt_check_resched_rt();
- }
- 
- /*
-@@ -91,6 +92,7 @@
-                        this_cpu_ptr(&blk_cpu_done));
-       raise_softirq_irqoff(BLOCK_SOFTIRQ);
-       local_irq_enable();
-+      preempt_check_resched_rt();
- 
-       return 0;
- }
-@@ -143,6 +145,7 @@
-               goto do_local;
- 
-       local_irq_restore(flags);
-+      preempt_check_resched_rt();
- }
- 
- /**
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/block/bounce.c 
linux-4.14/block/bounce.c
---- linux-4.14.orig/block/bounce.c     2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/block/bounce.c  2018-09-05 11:05:07.000000000 +0200
-@@ -66,11 +66,11 @@
-       unsigned long flags;
-       unsigned char *vto;
- 
--      local_irq_save(flags);
-+      local_irq_save_nort(flags);
-       vto = kmap_atomic(to->bv_page);
-       memcpy(vto + to->bv_offset, vfrom, to->bv_len);
-       kunmap_atomic(vto);
--      local_irq_restore(flags);
-+      local_irq_restore_nort(flags);
- }
- 
- #else /* CONFIG_HIGHMEM */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/crypto/algapi.c 
linux-4.14/crypto/algapi.c
---- linux-4.14.orig/crypto/algapi.c    2018-09-05 11:03:20.000000000 +0200
-+++ linux-4.14/crypto/algapi.c 2018-09-05 11:05:07.000000000 +0200
-@@ -731,13 +731,13 @@
- 
- int crypto_register_notifier(struct notifier_block *nb)
- {
--      return blocking_notifier_chain_register(&crypto_chain, nb);
-+      return srcu_notifier_chain_register(&crypto_chain, nb);
- }
- EXPORT_SYMBOL_GPL(crypto_register_notifier);
- 
- int crypto_unregister_notifier(struct notifier_block *nb)
- {
--      return blocking_notifier_chain_unregister(&crypto_chain, nb);
-+      return srcu_notifier_chain_unregister(&crypto_chain, nb);
- }
- EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
- 
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/crypto/api.c 
linux-4.14/crypto/api.c
---- linux-4.14.orig/crypto/api.c       2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/crypto/api.c    2018-09-05 11:05:07.000000000 +0200
-@@ -31,7 +31,7 @@
- DECLARE_RWSEM(crypto_alg_sem);
- EXPORT_SYMBOL_GPL(crypto_alg_sem);
- 
--BLOCKING_NOTIFIER_HEAD(crypto_chain);
-+SRCU_NOTIFIER_HEAD(crypto_chain);
- EXPORT_SYMBOL_GPL(crypto_chain);
- 
- static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
-@@ -236,10 +236,10 @@
- {
-       int ok;
- 
--      ok = blocking_notifier_call_chain(&crypto_chain, val, v);
-+      ok = srcu_notifier_call_chain(&crypto_chain, val, v);
-       if (ok == NOTIFY_DONE) {
-               request_module("cryptomgr");
--              ok = blocking_notifier_call_chain(&crypto_chain, val, v);
-+              ok = srcu_notifier_call_chain(&crypto_chain, val, v);
-       }
- 
-       return ok;
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/crypto/internal.h 
linux-4.14/crypto/internal.h
---- linux-4.14.orig/crypto/internal.h  2017-11-12 19:46:13.000000000 +0100
-+++ linux-4.14/crypto/internal.h       2018-09-05 11:05:07.000000000 +0200
-@@ -47,7 +47,7 @@
- 
- extern struct list_head crypto_alg_list;
- extern struct rw_semaphore crypto_alg_sem;
--extern struct blocking_notifier_head crypto_chain;
-+extern struct srcu_notifier_head crypto_chain;
- 
- #ifdef CONFIG_PROC_FS
- void __init crypto_init_proc(void);
-@@ -143,7 +143,7 @@
- 
- static inline void crypto_notify(unsigned long val, void *v)
- {
--      blocking_notifier_call_chain(&crypto_chain, val, v);
-+      srcu_notifier_call_chain(&crypto_chain, val, v);
- }
- 
- #endif        /* _CRYPTO_INTERNAL_H */
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/Documentation/trace/events.txt 
linux-4.14/Documentation/trace/events.txt
---- linux-4.14.orig/Documentation/trace/events.txt     2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/Documentation/trace/events.txt  2018-09-05 11:05:07.000000000 
+0200
-@@ -517,1550 +517,4 @@
-   totals derived from one or more trace event format fields and/or
-   event counts (hitcount).
- 
--  The format of a hist trigger is as follows:
--
--        hist:keys=<field1[,field2,...]>[:values=<field1[,field2,...]>]
--          [:sort=<field1[,field2,...]>][:size=#entries][:pause][:continue]
--          [:clear][:name=histname1] [if <filter>]
--
--  When a matching event is hit, an entry is added to a hash table
--  using the key(s) and value(s) named.  Keys and values correspond to
--  fields in the event's format description.  Values must correspond to
--  numeric fields - on an event hit, the value(s) will be added to a
--  sum kept for that field.  The special string 'hitcount' can be used
--  in place of an explicit value field - this is simply a count of
--  event hits.  If 'values' isn't specified, an implicit 'hitcount'
--  value will be automatically created and used as the only value.
--  Keys can be any field, or the special string 'stacktrace', which
--  will use the event's kernel stacktrace as the key.  The keywords
--  'keys' or 'key' can be used to specify keys, and the keywords
--  'values', 'vals', or 'val' can be used to specify values.  Compound
--  keys consisting of up to two fields can be specified by the 'keys'
--  keyword.  Hashing a compound key produces a unique entry in the
--  table for each unique combination of component keys, and can be
--  useful for providing more fine-grained summaries of event data.
--  Additionally, sort keys consisting of up to two fields can be
--  specified by the 'sort' keyword.  If more than one field is
--  specified, the result will be a 'sort within a sort': the first key
--  is taken to be the primary sort key and the second the secondary
--  key.  If a hist trigger is given a name using the 'name' parameter,
--  its histogram data will be shared with other triggers of the same
--  name, and trigger hits will update this common data.  Only triggers
--  with 'compatible' fields can be combined in this way; triggers are
--  'compatible' if the fields named in the trigger share the same
--  number and type of fields and those fields also have the same names.
--  Note that any two events always share the compatible 'hitcount' and
--  'stacktrace' fields and can therefore be combined using those
--  fields, however pointless that may be.
+-  When a matching event is hit, an entry is added to a hash table
+-  using the key(s) and value(s) named.  Keys and values correspond to
+-  fields in the event's format description.  Values must correspond to
+-  numeric fields - on an event hit, the value(s) will be added to a
+-  sum kept for that field.  The special string 'hitcount' can be used
+-  in place of an explicit value field - this is simply a count of
+-  event hits.  If 'values' isn't specified, an implicit 'hitcount'
+-  value will be automatically created and used as the only value.
+-  Keys can be any field, or the special string 'stacktrace', which
+-  will use the event's kernel stacktrace as the key.  The keywords
+-  'keys' or 'key' can be used to specify keys, and the keywords
+-  'values', 'vals', or 'val' can be used to specify values.  Compound
+-  keys consisting of up to two fields can be specified by the 'keys'
+-  keyword.  Hashing a compound key produces a unique entry in the
+-  table for each unique combination of component keys, and can be
+-  useful for providing more fine-grained summaries of event data.
+-  Additionally, sort keys consisting of up to two fields can be
+-  specified by the 'sort' keyword.  If more than one field is
+-  specified, the result will be a 'sort within a sort': the first key
+-  is taken to be the primary sort key and the second the secondary
+-  key.  If a hist trigger is given a name using the 'name' parameter,
+-  its histogram data will be shared with other triggers of the same
+-  name, and trigger hits will update this common data.  Only triggers
+-  with 'compatible' fields can be combined in this way; triggers are
+-  'compatible' if the fields named in the trigger share the same
+-  number and type of fields and those fields also have the same names.
+-  Note that any two events always share the compatible 'hitcount' and
+-  'stacktrace' fields and can therefore be combined using those
+-  fields, however pointless that may be.
 -
 -  'hist' triggers add a 'hist' file to each event's subdirectory.
 -  Reading the 'hist' file for the event will dump the hash table in
@@ -5328,10 +1554,11 @@ diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/Documentation/trace/events.txt li
 -        Entries: 7
 -        Dropped: 0
 +  See Documentation/trace/histogram.txt for details and examples.
-diff -durN -x '*~' -x '*.orig' linux-4.14.orig/Documentation/trace/ftrace.txt 
linux-4.14/Documentation/trace/ftrace.txt
---- linux-4.14.orig/Documentation/trace/ftrace.txt     2017-11-12 
19:46:13.000000000 +0100
-+++ linux-4.14/Documentation/trace/ftrace.txt  2018-09-05 11:05:07.000000000 
+0200
-@@ -539,6 +539,30 @@
+diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
+index d4601df6e72e..54213e5c23f6 100644
+--- a/Documentation/trace/ftrace.txt
++++ b/Documentation/trace/ftrace.txt
+@@ -539,6 +539,30 @@ of ftrace. Here is a list of some of the key files:
  
        See events.txt for more information.
  
@@ -5362,9 +1589,11 @@ diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/Documentation/trace/ftrace.txt li
    hwlat_detector:
  
        Directory for the Hardware Latency Detector.
-diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/Documentation/trace/histogram.txt 
linux-4.14/Documentation/trace/histogram.txt
---- linux-4.14.orig/Documentation/trace/histogram.txt  1970-01-01 
01:00:00.000000000 +0100
-+++ linux-4.14/Documentation/trace/histogram.txt       2018-09-05 
11:05:07.000000000 +0200
+diff --git a/Documentation/trace/histogram.txt 
b/Documentation/trace/histogram.txt
+new file mode 100644
+index 000000000000..6e05510afc28
+--- /dev/null
++++ b/Documentation/trace/histogram.txt
 @@ -0,0 +1,1995 @@
 +                           Event Histograms
 +
@@ -6967,404 +3196,4227 @@ diff -durN -x '*~' -x '*.orig' 
linux-4.14.orig/Documentation/trace/histogram.txt
 +             entry_SYSCALL_64_fastpath+0x12/0x6a
 +    } hitcount:        244
 +
-+    Totals:
-+        Hits: 489
-+        Entries: 7
-+        Dropped: 0
++    Totals:
++        Hits: 489
++        Entries: 7
++        Dropped: 0
++
++
++2.2 Inter-event hist triggers
++-----------------------------
++
++Inter-event hist triggers are hist triggers that combine values from
++one or more other events and create a histogram using that data.  Data
++from an inter-event histogram can in turn become the source for
++further combined histograms, thus providing a chain of related
++histograms, which is important for some applications.
++
++The most important example of an inter-event quantity that can be used
++in this manner is latency, which is simply a difference in timestamps
++between two events.  Although latency is the most important
++inter-event quantity, note that because the support is completely
++general across the trace event subsystem, any event field can be used
++in an inter-event quantity.
++
++An example of a histogram that combines data from other histograms
++into a useful chain would be a 'wakeupswitch latency' histogram that
++combines a 'wakeup latency' histogram and a 'switch latency'
++histogram.
++
++Normally, a hist trigger specification consists of a (possibly
++compound) key along with one or more numeric values, which are
++continually updated sums associated with that key.  A histogram
++specification in this case consists of individual key and value
++specifications that refer to trace event fields associated with a
++single event type.
++
++The inter-event hist trigger extension allows fields from multiple
++events to be referenced and combined into a multi-event histogram
++specification.  In support of this overall goal, a few enabling
++features have been added to the hist trigger support:
++
++  - In order to compute an inter-event quantity, a value from one
++    event needs to saved and then referenced from another event.  This
++    requires the introduction of support for histogram 'variables'.
++
++  - The computation of inter-event quantities and their combination
++    require some minimal amount of support for applying simple
++    expressions to variables (+ and -).
++
++  - A histogram consisting of inter-event quantities isn't logically a
++    histogram on either event (so having the 'hist' file for either
++    event host the histogram output doesn't really make sense).  To
++    address the idea that the histogram is associated with a
++    combination of events, support is added allowing the creation of
++    'synthetic' events that are events derived from other events.
++    These synthetic events are full-fledged events just like any other
++    and can be used as such, as for instance to create the
++    'combination' histograms mentioned previously.
++
++  - A set of 'actions' can be associated with histogram entries -
<Skipped 23575 lines>
================================================================

---- gitweb:

http://git.pld-linux.org/gitweb.cgi/packages/kernel.git/commitdiff/b3bbd4851db6ea6934e82dbbf902bf9239601eae

_______________________________________________
pld-cvs-commit mailing list
[email protected]
http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit

Reply via email to