commit b4de310e3b27a2231954d63e30c8ee4e9b24d8ba
Author: Jacek Konieczny <[email protected]>
Date:   Fri Oct 21 14:45:12 2016 +0200

    RT variant added
    
    CONFIG_PREEMPT_RT patch for fully preemptible kernel, required for low
    latencies.
    
    Note: some patches are disabled when building the rt kernel, as they
    fail compilation with -Werror=incompatible-pointer-types. Probably all
    of those errors are patch bugs, which should be fixed for the regular
    kernel too.

 kernel-rt.config |    27 +
 kernel-rt.patch  | 31316 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 kernel.spec      |    28 +
 3 files changed, 31371 insertions(+)
---
diff --git a/kernel.spec b/kernel.spec
index 0161654..e45d327 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -32,6 +32,8 @@
 
 %bcond_with    vserver         # support for VServer
 
+%bcond_with    rt              # real-time kernel (CONFIG_PREEMPT_RT) for low 
latencies
+
 %bcond_with    vanilla         # don't include any patches
 %bcond_with    rescuecd        # build kernel for our rescue
 %bcond_with    myown           # build with your own config 
(kernel-myown.config)
@@ -97,6 +99,9 @@
 %if %{without pae}
 %define                alt_kernel      nopae
 %endif
+%if %{with rt}
+%define                alt_kernel      rt
+%endif
 
 # kernel release (used in filesystem and eventually in uname -r)
 # modules will be looked from /lib/modules/%{kernel_release}
@@ -141,6 +146,7 @@ Source25:   kernel-ia64.config
 
 Source41:      kernel-patches.config
 Source43:      kernel-vserver.config
+Source44:      kernel-rt.config
 
 Source55:      kernel-imq.config
 
@@ -215,6 +221,10 @@ Patch250:  kernel-fix_256colors_menuconfig.patch
 # https://patchwork.kernel.org/patch/236261/
 Patch400:      kernel-virtio-gl-accel.patch
 
+# https://rt.wiki.kernel.org/
+# 
https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patch-4.4.23-rt33.patch.xz
+Patch500:      kernel-rt.patch
+
 Patch2000:     kernel-small_fixes.patch
 Patch2001:     kernel-pwc-uncompress.patch
 Patch2003:     kernel-regressions.patch
@@ -390,6 +400,7 @@ BuildRoot:  %{tmpdir}/%{name}-%{version}-root-%(id -u -n)
 %{?with_fbcondecor:Fbsplash/fbcondecor - enabled }\
 %{?with_nfsroot:Root on NFS - enabled}\
 %{?with_vserver:Linux-VServer - %{vserver_patch}}\
+%{?with_rt:CONFIG_PREEMPT_RT - enabled}\
 
 %define Features %(echo "%{__features}" | sed '/^$/d')
 
@@ -656,7 +667,10 @@ cd linux-%{basever}
 #
 
 # kernel-pom-ng-IPV4OPTSSTRIP.patch
+%if %{without rt}
+# fails on -Werror=incompatible-pointer-types
 %patch10 -p1
+%endif
 
 # kernel-owner-xid.patch
 %if %{with vserver}
@@ -681,8 +695,11 @@ cd linux-%{basever}
 %patch53 -p1
 %endif
 
+%if %{without rt}
+# fails on -Werror=incompatible-pointer-types
 %patch55 -p1
 %patch56 -p1
+%endif
 
 # kernel-rndis_host-wm5.patch
 %patch59 -p1
@@ -706,13 +723,21 @@ cd linux-%{basever}
 %patch7000 -p1
 %endif
 
+%if %{with rt}
+%patch500 -p1
+rm -f localversion-rt
+%endif
+
 # apparmor
 %patch5000 -p1
 
 %patch250 -p1
 
 # virtio-gl
+%if %{without rt}
+# fails on -Werror=incompatible-pointer-types
 %patch400 -p1
+%endif
 
 %endif # vanilla
 
@@ -913,6 +938,9 @@ EOCONFIG
 %if %{with vserver}
                %{SOURCE43} \
 %endif
+%if %{with rt}
+               %{SOURCE44} \
+%endif
                %{SOURCE41} %{?0:patches} \
 %endif
                %{SOURCE20} \
diff --git a/kernel-rt.config b/kernel-rt.config
new file mode 100644
index 0000000..98633d5
--- /dev/null
+++ b/kernel-rt.config
@@ -0,0 +1,27 @@
+AUFS_FS all=n
+DEBUG_PREEMPT all=y
+HWLAT_DETECTOR all=m
+HZ_1000 all=y
+HZ_100 all=n
+HZ_250 all=n
+HZ_300 all=n
+HZ all=1000
+MISSED_TIMER_OFFSETS_HIST all=y
+PREEMPT all=y
+PREEMPT_COUNT all=y
+PREEMPT_LAZY all=y
+PREEMPT__LL all=n
+PREEMPT_NONE all=n
+PREEMPT_NOTIFIERS all=y
+PREEMPT_OFF_HIST all=y
+PREEMPT_RCU all=y
+PREEMPT_RTB all=n
+PREEMPT_RT_BASE all=y
+PREEMPT_RT_FULL all=y
+PREEMPT_TRACER all=y
+PREEMPT_VOLUNTARY all=n
+TASKS_RCU all=y
+TRACER_SNAPSHOT_PER_CPU_SWAP all=y
+TRANSPARENT_HUGEPAGE_ALWAYS all=n
+TREE_RCU all=n
+WAKEUP_LATENCY_HIST all=y
diff --git a/kernel-rt.patch b/kernel-rt.patch
new file mode 100644
index 0000000..0ce35b8
--- /dev/null
+++ b/kernel-rt.patch
@@ -0,0 +1,31316 @@
+diff --git a/Documentation/hwlat_detector.txt 
b/Documentation/hwlat_detector.txt
+new file mode 100644
+index 000000000000..cb61516483d3
+--- /dev/null
++++ b/Documentation/hwlat_detector.txt
+@@ -0,0 +1,64 @@
++Introduction:
++-------------
++
++The module hwlat_detector is a special purpose kernel module that is used to
++detect large system latencies induced by the behavior of certain underlying
++hardware or firmware, independent of Linux itself. The code was developed
++originally to detect SMIs (System Management Interrupts) on x86 systems,
++however there is nothing x86 specific about this patchset. It was
++originally written for use by the "RT" patch since the Real Time
++kernel is highly latency sensitive.
++
++SMIs are usually not serviced by the Linux kernel, which typically does not
++even know that they are occuring. SMIs are instead are set up by BIOS code
++and are serviced by BIOS code, usually for "critical" events such as
++management of thermal sensors and fans. Sometimes though, SMIs are used for
++other tasks and those tasks can spend an inordinate amount of time in the
++handler (sometimes measured in milliseconds). Obviously this is a problem if
++you are trying to keep event service latencies down in the microsecond range.
++
++The hardware latency detector works by hogging all of the cpus for 
configurable
++amounts of time (by calling stop_machine()), polling the CPU Time Stamp 
Counter
++for some period, then looking for gaps in the TSC data. Any gap indicates a
++time when the polling was interrupted and since the machine is stopped and
++interrupts turned off the only thing that could do that would be an SMI.
++
++Note that the SMI detector should *NEVER* be used in a production environment.
++It is intended to be run manually to determine if the hardware platform has a
++problem with long system firmware service routines.
++
++Usage:
++------
++
++Loading the module hwlat_detector passing the parameter "enabled=1" (or by
++setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only
++step required to start the hwlat_detector. It is possible to redefine the
++threshold in microseconds (us) above which latency spikes will be taken
++into account (parameter "threshold=").
++
++Example:
++
++      # modprobe hwlat_detector enabled=1 threshold=100
++
++After the module is loaded, it creates a directory named "hwlat_detector" 
under
++the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary
++to have debugfs mounted, which might be on /sys/debug on your system.
++
++The /debug/hwlat_detector interface contains the following files:
++
++count                 - number of latency spikes observed since last reset
++enable                        - a global enable/disable toggle (0/1), resets 
count
++max                   - maximum hardware latency actually observed (usecs)
++sample                        - a pipe from which to read current raw sample 
data
++                        in the format <timestamp> <latency observed usecs>
++                        (can be opened O_NONBLOCK for a single sample)
++threshold             - minimum latency value to be considered (usecs)
++width                 - time period to sample with CPUs held (usecs)
++                        must be less than the total window size (enforced)
++window                        - total period of sampling, width being inside 
(usecs)
++
++By default we will set width to 500,000 and window to 1,000,000, meaning that
++we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we
++observe any latencies that exceed the threshold (initially 100 usecs),
++then we write to a global sample ring buffer of 8K samples, which is
++consumed by reading from the "sample" (pipe) debugfs file interface.
+diff --git a/Documentation/kernel-parameters.txt 
b/Documentation/kernel-parameters.txt
+index 0e4102ae1a61..26b5f39d57a8 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1629,6 +1629,15 @@ bytes respectively. Such letter suffixes can also be 
entirely omitted.
+       ip=             [IP_PNP]
+                       See Documentation/filesystems/nfs/nfsroot.txt.
+ 
++      irqaffinity=    [SMP] Set the default irq affinity mask
++                      Format:
++                      <cpu number>,...,<cpu number>
++                      or
++                      <cpu number>-<cpu number>
++                      (must be a positive range in ascending order)
++                      or a mixture
++                      <cpu number>,...,<cpu number>-<cpu number>
++
+       irqfixup        [HW]
+                       When an interrupt is not handled search all handlers
+                       for it. Intended to get systems with badly broken
+diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
+index 13f5619b2203..f64d075ba647 100644
+--- a/Documentation/sysrq.txt
++++ b/Documentation/sysrq.txt
+@@ -59,10 +59,17 @@ On PowerPC - Press 'ALT - Print Screen (or F13) - <command 
key>,
+ On other - If you know of the key combos for other architectures, please
+            let me know so I can add them to this section.
+ 
+-On all -  write a character to /proc/sysrq-trigger.  e.g.:
+-
++On all -  write a character to /proc/sysrq-trigger, e.g.:
+               echo t > /proc/sysrq-trigger
+ 
++On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
++              echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
++       Send an ICMP echo request with this pattern plus the particular
++       SysRq command key. Example:
++              # ping -c1 -s57 -p0102030468
++       will trigger the SysRq-H (help) command.
++
++
+ *  What are the 'command' keys?
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 'b'     - Will immediately reboot the system without syncing or unmounting
+diff --git a/Documentation/trace/histograms.txt 
b/Documentation/trace/histograms.txt
+new file mode 100644
+index 000000000000..6f2aeabf7faa
+--- /dev/null
++++ b/Documentation/trace/histograms.txt
+@@ -0,0 +1,186 @@
++              Using the Linux Kernel Latency Histograms
++
++
++This document gives a short explanation how to enable, configure and use
++latency histograms. Latency histograms are primarily relevant in the
++context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
++and are used in the quality management of the Linux real-time
++capabilities.
++
++
++* Purpose of latency histograms
++
++A latency histogram continuously accumulates the frequencies of latency
++data. There are two types of histograms
++- potential sources of latencies
++- effective latencies
++
++
++* Potential sources of latencies
++
++Potential sources of latencies are code segments where interrupts,
++preemption or both are disabled (aka critical sections). To create
++histograms of potential sources of latency, the kernel stores the time
++stamp at the start of a critical section, determines the time elapsed
++when the end of the section is reached, and increments the frequency
++counter of that latency value - irrespective of whether any concurrently
++running process is affected by latency or not.
++- Configuration items (in the Kernel hacking/Tracers submenu)
++  CONFIG_INTERRUPT_OFF_LATENCY
++  CONFIG_PREEMPT_OFF_LATENCY
++
++
++* Effective latencies
++
++Effective latencies are actually occuring during wakeup of a process. To
++determine effective latencies, the kernel stores the time stamp when a
++process is scheduled to be woken up, and determines the duration of the
++wakeup time shortly before control is passed over to this process. Note
++that the apparent latency in user space may be somewhat longer, since the
++process may be interrupted after control is passed over to it but before
++the execution in user space takes place. Simply measuring the interval
++between enqueuing and wakeup may also not appropriate in cases when a
++process is scheduled as a result of a timer expiration. The timer may have
++missed its deadline, e.g. due to disabled interrupts, but this latency
++would not be registered. Therefore, the offsets of missed timers are
++recorded in a separate histogram. If both wakeup latency and missed timer
++offsets are configured and enabled, a third histogram may be enabled that
++records the overall latency as a sum of the timer latency, if any, and the
++wakeup latency. This histogram is called "timerandwakeup".
++- Configuration items (in the Kernel hacking/Tracers submenu)
++  CONFIG_WAKEUP_LATENCY
++  CONFIG_MISSED_TIMER_OFSETS
++
++
++* Usage
++
++The interface to the administration of the latency histograms is located
++in the debugfs file system. To mount it, either enter
++
++mount -t sysfs nodev /sys
++mount -t debugfs nodev /sys/kernel/debug
++
++from shell command line level, or add
++
++nodev /sys                    sysfs   defaults        0 0
++nodev /sys/kernel/debug       debugfs defaults        0 0
++
++to the file /etc/fstab. All latency histogram related files are then
++available in the directory /sys/kernel/debug/tracing/latency_hist. A
++particular histogram type is enabled by writing non-zero to the related
++variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
++Select "preemptirqsoff" for the histograms of potential sources of
++latencies and "wakeup" for histograms of effective latencies etc. The
++histogram data - one per CPU - are available in the files
++
++/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
++/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
++/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
++/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
++
++The histograms are reset by writing non-zero to the file "reset" in a
++particular latency directory. To reset all latency data, use
++
++#!/bin/sh
++
++TRACINGDIR=/sys/kernel/debug/tracing
++HISTDIR=$TRACINGDIR/latency_hist
++
++if test -d $HISTDIR
++then
++  cd $HISTDIR
++  for i in `find . | grep /reset$`
++  do
++    echo 1 >$i
++  done
++fi
++
++
++* Data format
++
++Latency data are stored with a resolution of one microsecond. The
++maximum latency is 10,240 microseconds. The data are only valid, if the
++overflow register is empty. Every output line contains the latency in
++microseconds in the first row and the number of samples in the second
++row. To display only lines with a positive latency count, use, for
++example,
++
++grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
++
++#Minimum latency: 0 microseconds.
++#Average latency: 0 microseconds.
++#Maximum latency: 25 microseconds.
++#Total samples: 3104770694
++#There are 0 samples greater or equal than 10240 microseconds
++#usecs                 samples
++    0       2984486876
++    1         49843506
++    2         58219047
++    3          5348126
++    4          2187960
++    5          3388262
++    6           959289
++    7           208294
++    8            40420
++    9             4485
++   10            14918
++   11            18340
++   12            25052
++   13            19455
++   14             5602
++   15              969
++   16               47
++   17               18
++   18               14
++   19                1
++   20                3
++   21                2
++   22                5
++   23                2
++   25                1
++
++
++* Wakeup latency of a selected process
++
++To only collect wakeup latency data of a particular process, write the
++PID of the requested process to
++
++/sys/kernel/debug/tracing/latency_hist/wakeup/pid
++
++PIDs are not considered, if this variable is set to 0.
++
++
++* Details of the process with the highest wakeup latency so far
++
++Selected data of the process that suffered from the highest wakeup
++latency that occurred in a particular CPU are available in the file
++
++/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
++
++In addition, other relevant system data at the time when the
++latency occurred are given.
++
++The format of the data is (all in one line):
++<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
++<- <PID> <Priority> <Command> <Timestamp>
++
++The value of <Timeroffset> is only relevant in the combined timer
++and wakeup latency recording. In the wakeup recording, it is
++always 0, in the missed_timer_offsets recording, it is the same
++as <Latency>.
++
++When retrospectively searching for the origin of a latency and
++tracing was not enabled, it may be helpful to know the name and
++some basic data of the task that (finally) was switching to the
++late real-tlme task. In addition to the victim's data, also the
++data of the possible culprit are therefore displayed after the
++"<-" symbol.
++
++Finally, the timestamp of the time when the latency occurred
++in <seconds>.<microseconds> after the most recent system boot
++is provided.
++
++These data are also reset when the wakeup histogram is reset.
+diff --git a/Makefile b/Makefile
+index 95421b688f23..336590d1c969 100644
+--- a/Makefile
++++ b/Makefile
+@@ -783,6 +783,9 @@ KBUILD_CFLAGS   += $(call 
cc-option,-Werror=strict-prototypes)
+ # Prohibit date/time macros, which would make the build non-deterministic
+ KBUILD_CFLAGS   += $(call cc-option,-Werror=date-time)
+ 
++# enforce correct pointer usage
++KBUILD_CFLAGS   += $(call cc-option,-Werror=incompatible-pointer-types)
++
+ # use the deterministic mode of AR if available
+ KBUILD_ARFLAGS := $(call ar-option,D)
+ 
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 4e949e58b192..3b26d76933fb 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -9,6 +9,7 @@ config OPROFILE
+       tristate "OProfile system profiling"
+       depends on PROFILING
+       depends on HAVE_OPROFILE
++      depends on !PREEMPT_RT_FULL
+       select RING_BUFFER
+       select RING_BUFFER_ALLOW_SWAP
+       help
+@@ -52,6 +53,7 @@ config KPROBES
+ config JUMP_LABEL
+        bool "Optimize very unlikely/likely branches"
+        depends on HAVE_ARCH_JUMP_LABEL
++       depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && 
!WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST)
+        help
+          This option enables a transparent branch optimization that
+        makes certain almost-always-true or almost-always-false branch
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 34e1569a11ee..79c4603e9453 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -33,7 +33,7 @@ config ARM
+       select HARDIRQS_SW_RESEND
+       select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
+       select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+-      select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
++      select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && 
!PREEMPT_RT_BASE
+       select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
+       select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
+       select HAVE_ARCH_TRACEHOOK
+@@ -68,6 +68,7 @@ config ARM
+       select HAVE_PERF_EVENTS
+       select HAVE_PERF_REGS
+       select HAVE_PERF_USER_STACK_DUMP
++      select HAVE_PREEMPT_LAZY
+       select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
+       select HAVE_REGS_AND_STACK_ACCESS_API
+       select HAVE_SYSCALL_TRACEPOINTS
+diff --git a/arch/arm/include/asm/switch_to.h 
b/arch/arm/include/asm/switch_to.h
+index 12ebfcc1d539..c962084605bc 100644
+--- a/arch/arm/include/asm/switch_to.h
++++ b/arch/arm/include/asm/switch_to.h
+@@ -3,6 +3,13 @@
+ 
+ #include <linux/thread_info.h>
+ 
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
++#else
++static inline void
++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
++#endif
++
+ /*
+  * For v7 SMP cores running a preemptible kernel we may be pre-empted
+  * during a TLB maintenance operation, so execute an inner-shareable dsb
+@@ -25,6 +32,7 @@ extern struct task_struct *__switch_to(struct task_struct *, 
struct thread_info
+ #define switch_to(prev,next,last)                                     \
+ do {                                                                  \
+       __complete_pending_tlbi();                                      \
++      switch_kmaps(prev, next);                                       \
+       last = __switch_to(prev,task_thread_info(prev), 
task_thread_info(next));        \
+ } while (0)
+ 
+diff --git a/arch/arm/include/asm/thread_info.h 
b/arch/arm/include/asm/thread_info.h
+index 776757d1604a..1f36a4eccc72 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -49,6 +49,7 @@ struct cpu_context_save {
+ struct thread_info {
+       unsigned long           flags;          /* low level flags */
+       int                     preempt_count;  /* 0 => preemptable, <0 => bug 
*/
++      int                     preempt_lazy_count; /* 0 => preemptable, <0 => 
bug */
+       mm_segment_t            addr_limit;     /* address limit */
+       struct task_struct      *task;          /* main task structure */
+       __u32                   cpu;            /* cpu */
+@@ -142,7 +143,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user 
*,
+ #define TIF_SYSCALL_TRACE     4       /* syscall trace active */
+ #define TIF_SYSCALL_AUDIT     5       /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT        6       /* syscall tracepoint 
instrumentation */
+-#define TIF_SECCOMP           7       /* seccomp syscall filtering active */
++#define TIF_SECCOMP           8       /* seccomp syscall filtering active */
++#define TIF_NEED_RESCHED_LAZY 7
+ 
+ #define TIF_NOHZ              12      /* in adaptive nohz mode */
+ #define TIF_USING_IWMMXT      17
+@@ -152,6 +154,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user 
*,
+ #define _TIF_SIGPENDING               (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED     (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME    (1 << TIF_NOTIFY_RESUME)
++#define _TIF_NEED_RESCHED_LAZY        (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_UPROBE           (1 << TIF_UPROBE)
+ #define _TIF_SYSCALL_TRACE    (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_SYSCALL_AUDIT    (1 << TIF_SYSCALL_AUDIT)
+@@ -167,7 +170,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user 
*,
+  * Change these and you break ASM code in entry-common.S
+  */
+ #define _TIF_WORK_MASK                (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+-                               _TIF_NOTIFY_RESUME | _TIF_UPROBE)
++                               _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
++                               _TIF_NEED_RESCHED_LAZY)
+ 
+ #endif /* __KERNEL__ */
+ #endif /* __ASM_ARM_THREAD_INFO_H */
+diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
+index 871b8267d211..4dbe70de7318 100644
+--- a/arch/arm/kernel/asm-offsets.c
++++ b/arch/arm/kernel/asm-offsets.c
+@@ -65,6 +65,7 @@ int main(void)
+   BLANK();
+   DEFINE(TI_FLAGS,            offsetof(struct thread_info, flags));
+   DEFINE(TI_PREEMPT,          offsetof(struct thread_info, preempt_count));
++  DEFINE(TI_PREEMPT_LAZY,     offsetof(struct thread_info, 
preempt_lazy_count));
+   DEFINE(TI_ADDR_LIMIT,               offsetof(struct thread_info, 
addr_limit));
+   DEFINE(TI_TASK,             offsetof(struct thread_info, task));
+   DEFINE(TI_CPU,              offsetof(struct thread_info, cpu));
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index 3ce377f7251f..d044cea59f54 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -215,11 +215,18 @@ __irq_svc:
+ #ifdef CONFIG_PREEMPT
+       get_thread_info tsk
+       ldr     r8, [tsk, #TI_PREEMPT]          @ get preempt count
+-      ldr     r0, [tsk, #TI_FLAGS]            @ get flags
+       teq     r8, #0                          @ if preempt count != 0
++      bne     1f                              @ return from exeption
++      ldr     r0, [tsk, #TI_FLAGS]            @ get flags
++      tst     r0, #_TIF_NEED_RESCHED          @ if NEED_RESCHED is set
++      blne    svc_preempt                     @ preempt!
++
++      ldr     r8, [tsk, #TI_PREEMPT_LAZY]     @ get preempt lazy count
++      teq     r8, #0                          @ if preempt lazy count != 0
+       movne   r0, #0                          @ force flags to 0
+-      tst     r0, #_TIF_NEED_RESCHED
++      tst     r0, #_TIF_NEED_RESCHED_LAZY
+       blne    svc_preempt
++1:
+ #endif
+ 
+       svc_exit r5, irq = 1                    @ return from exception
+@@ -234,8 +241,14 @@ svc_preempt:
+ 1:    bl      preempt_schedule_irq            @ irq en/disable is done inside
+       ldr     r0, [tsk, #TI_FLAGS]            @ get new tasks TI_FLAGS
+       tst     r0, #_TIF_NEED_RESCHED
++      bne     1b
++      tst     r0, #_TIF_NEED_RESCHED_LAZY
+       reteq   r8                              @ go again
+-      b       1b
++      ldr     r0, [tsk, #TI_PREEMPT_LAZY]     @ get preempt lazy count
++      teq     r0, #0                          @ if preempt lazy count != 0
++      beq     1b
++      ret     r8                              @ go again
++
+ #endif
+ 
+ __und_fault:
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index 30a7228eaceb..c3bd6cbfce4b 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -36,7 +36,9 @@ ret_fast_syscall:
+  UNWIND(.cantunwind   )
+       disable_irq_notrace                     @ disable interrupts
+       ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
+-      tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++      tst     r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
++      bne     fast_work_pending
++      tst     r1, #_TIF_SECCOMP
+       bne     fast_work_pending
+ 
+       /* perform architecture specific actions before user return */
+@@ -62,8 +64,11 @@ ret_fast_syscall:
+       str     r0, [sp, #S_R0 + S_OFF]!        @ save returned r0
+       disable_irq_notrace                     @ disable interrupts
+       ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
+-      tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++      tst     r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
++      bne     do_slower_path
++      tst     r1, #_TIF_SECCOMP
+       beq     no_work_pending
++do_slower_path:
+  UNWIND(.fnend                )
+ ENDPROC(ret_fast_syscall)
+ 
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 4adfb46e3ee9..15f1d94b47c5 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -319,6 +319,30 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
+ }
+ 
+ #ifdef CONFIG_MMU
++/*
++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock.  If the lock is not
++ * initialized by pgtable_page_ctor() then a coredump of the vector page will
++ * fail.
++ */
++static int __init vectors_user_mapping_init_page(void)
++{
++      struct page *page;
++      unsigned long addr = 0xffff0000;
++      pgd_t *pgd;
++      pud_t *pud;
++      pmd_t *pmd;
++
++      pgd = pgd_offset_k(addr);
++      pud = pud_offset(pgd, addr);
++      pmd = pmd_offset(pud, addr);
++      page = pmd_page(*(pmd));
++
++      pgtable_page_ctor(page);
++
++      return 0;
++}
++late_initcall(vectors_user_mapping_init_page);
++
+ #ifdef CONFIG_KUSER_HELPERS
+ /*
+  * The vectors page is always readable from user space for the
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index 7b8f2141427b..96541e00b74a 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -572,7 +572,8 @@ do_work_pending(struct pt_regs *regs, unsigned int 
thread_flags, int syscall)
+        */
+       trace_hardirqs_off();
+       do {
+-              if (likely(thread_flags & _TIF_NEED_RESCHED)) {
++              if (likely(thread_flags & (_TIF_NEED_RESCHED |
++                                         _TIF_NEED_RESCHED_LAZY))) {
+                       schedule();
+               } else {
+                       if (unlikely(!user_mode(regs)))
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index b26361355dae..e5754e3b03c4 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -230,8 +230,6 @@ int __cpu_disable(void)
+       flush_cache_louis();
+       local_flush_tlb_all();
+ 
+-      clear_tasks_mm_cpumask(cpu);
+-
+       return 0;
+ }
+ 
+@@ -247,6 +245,9 @@ void __cpu_die(unsigned int cpu)
+               pr_err("CPU%u: cpu didn't die\n", cpu);
+               return;
+       }
++
++      clear_tasks_mm_cpumask(cpu);
++
+       pr_notice("CPU%u: shutdown\n", cpu);
+ 
+       /*
+diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
+index 0bee233fef9a..314cfb232a63 100644
+--- a/arch/arm/kernel/unwind.c
++++ b/arch/arm/kernel/unwind.c
+@@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
+ static const struct unwind_idx *__origin_unwind_idx;
+ extern const struct unwind_idx __stop_unwind_idx[];
+ 
+-static DEFINE_SPINLOCK(unwind_lock);
++static DEFINE_RAW_SPINLOCK(unwind_lock);
+ static LIST_HEAD(unwind_tables);
+ 
+ /* Convert a prel31 symbol to an absolute address */
+@@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned 
long addr)
+               /* module unwind tables */
+               struct unwind_table *table;
+ 
+-              spin_lock_irqsave(&unwind_lock, flags);
++              raw_spin_lock_irqsave(&unwind_lock, flags);
+               list_for_each_entry(table, &unwind_tables, list) {
+                       if (addr >= table->begin_addr &&
+                           addr < table->end_addr) {
+@@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned 
long addr)
+                               break;
+                       }
+               }
+-              spin_unlock_irqrestore(&unwind_lock, flags);
++              raw_spin_unlock_irqrestore(&unwind_lock, flags);
+       }
+ 
+       pr_debug("%s: idx = %p\n", __func__, idx);
+@@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, 
unsigned long size,
+       tab->begin_addr = text_addr;
+       tab->end_addr = text_addr + text_size;
+ 
+-      spin_lock_irqsave(&unwind_lock, flags);
++      raw_spin_lock_irqsave(&unwind_lock, flags);
+       list_add_tail(&tab->list, &unwind_tables);
+-      spin_unlock_irqrestore(&unwind_lock, flags);
++      raw_spin_unlock_irqrestore(&unwind_lock, flags);
+ 
+       return tab;
+ }
+@@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
+       if (!tab)
+               return;
+ 
+-      spin_lock_irqsave(&unwind_lock, flags);
++      raw_spin_lock_irqsave(&unwind_lock, flags);
+       list_del(&tab->list);
+-      spin_unlock_irqrestore(&unwind_lock, flags);
++      raw_spin_unlock_irqrestore(&unwind_lock, flags);
+ 
+       kfree(tab);
+ }
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index d7bef2144760..36a3e51492f7 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -496,18 +496,18 @@ static void kvm_arm_resume_guest(struct kvm *kvm)
+       struct kvm_vcpu *vcpu;
+ 
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+-              wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
++              struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
+ 
+               vcpu->arch.pause = false;
+-              wake_up_interruptible(wq);
++              swake_up(wq);
+       }
+ }
+ 
+ static void vcpu_sleep(struct kvm_vcpu *vcpu)
+ {
+-      wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
++      struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
+ 
+-      wait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
++      swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
+                                      (!vcpu->arch.pause)));
+ }
+ 
+@@ -566,7 +566,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct 
kvm_run *run)
+                * involves poking the GIC, which must be done in a
+                * non-preemptible context.
+                */
+-              preempt_disable();
++              migrate_disable();
+               kvm_timer_flush_hwstate(vcpu);
+               kvm_vgic_flush_hwstate(vcpu);
+ 
+@@ -585,7 +585,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct 
kvm_run *run)
+                       local_irq_enable();
+                       kvm_timer_sync_hwstate(vcpu);
+                       kvm_vgic_sync_hwstate(vcpu);
+-                      preempt_enable();
++                      migrate_enable();
+                       continue;
+               }
+ 
+@@ -639,7 +639,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct 
kvm_run *run)
+ 
+               kvm_vgic_sync_hwstate(vcpu);
+ 
+-              preempt_enable();
++              migrate_enable();
+ 
+               ret = handle_exit(vcpu, run, ret);
+       }
+diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
+index a9b3b905e661..c2b131527a64 100644
+--- a/arch/arm/kvm/psci.c
++++ b/arch/arm/kvm/psci.c
+@@ -70,7 +70,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu 
*source_vcpu)
+ {
+       struct kvm *kvm = source_vcpu->kvm;
+       struct kvm_vcpu *vcpu = NULL;
+-      wait_queue_head_t *wq;
++      struct swait_queue_head *wq;
+       unsigned long cpu_id;
+       unsigned long context_id;
+       phys_addr_t target_pc;
+@@ -119,7 +119,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu 
*source_vcpu)
+       smp_mb();               /* Make sure the above is visible */
+ 
+       wq = kvm_arch_vcpu_wq(vcpu);
+-      wake_up_interruptible(wq);
++      swake_up(wq);
+ 
+       return PSCI_RET_SUCCESS;
+ }
+diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
+index 28656c2b54a0..3f501305ca26 100644
+--- a/arch/arm/mach-at91/Kconfig
++++ b/arch/arm/mach-at91/Kconfig
+@@ -99,6 +99,7 @@ config HAVE_AT91_USB_CLK
+ config COMMON_CLK_AT91
+       bool
+       select COMMON_CLK
++      select MFD_SYSCON
+ 
+ config HAVE_AT91_SMD
+       bool
+diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
+index c1a7c6cc00e1..63b4fa25b48a 100644
+--- a/arch/arm/mach-at91/at91rm9200.c
++++ b/arch/arm/mach-at91/at91rm9200.c
+@@ -12,7 +12,6 @@
+ #include <linux/of_platform.h>
+ 
+ #include <asm/mach/arch.h>
+-#include <asm/system_misc.h>
+ 
+ #include "generic.h"
+ #include "soc.h"
+@@ -33,7 +32,6 @@ static void __init at91rm9200_dt_device_init(void)
+ 
+       of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
+ 
+-      arm_pm_idle = at91rm9200_idle;
+       at91rm9200_pm_init();
+ }
+ 
+diff --git a/arch/arm/mach-at91/at91sam9.c b/arch/arm/mach-at91/at91sam9.c
+index 7eb64f763034..cada2a6412b3 100644
+--- a/arch/arm/mach-at91/at91sam9.c
++++ b/arch/arm/mach-at91/at91sam9.c
+@@ -62,8 +62,6 @@ static void __init at91sam9_common_init(void)
+               soc_dev = soc_device_to_device(soc);
+ 
+       of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
+-
+-      arm_pm_idle = at91sam9_idle;
+ }
+ 
+ static void __init at91sam9_dt_device_init(void)
+diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
+index b0fa7dc7286d..28ca57a2060f 100644
+--- a/arch/arm/mach-at91/generic.h
++++ b/arch/arm/mach-at91/generic.h
+@@ -11,27 +11,18 @@
+ #ifndef _AT91_GENERIC_H
+ #define _AT91_GENERIC_H
+ 
+-#include <linux/of.h>
+-#include <linux/reboot.h>
+-
+- /* Map io */
+-extern void __init at91_map_io(void);
+-extern void __init at91_alt_map_io(void);
+-
+-/* idle */
+-extern void at91rm9200_idle(void);
+-extern void at91sam9_idle(void);
+-
+ #ifdef CONFIG_PM
+ extern void __init at91rm9200_pm_init(void);
+ extern void __init at91sam9260_pm_init(void);
+ extern void __init at91sam9g45_pm_init(void);
+ extern void __init at91sam9x5_pm_init(void);
++extern void __init sama5_pm_init(void);
+ #else
+ static inline void __init at91rm9200_pm_init(void) { }
+ static inline void __init at91sam9260_pm_init(void) { }
+ static inline void __init at91sam9g45_pm_init(void) { }
+ static inline void __init at91sam9x5_pm_init(void) { }
++static inline void __init sama5_pm_init(void) { }
+ #endif
+ 
+ #endif /* _AT91_GENERIC_H */
+diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
+index 23726fb31741..f06270198bf1 100644
+--- a/arch/arm/mach-at91/pm.c
++++ b/arch/arm/mach-at91/pm.c
+@@ -31,10 +31,13 @@
+ #include <asm/mach/irq.h>
+ #include <asm/fncpy.h>
+ #include <asm/cacheflush.h>
++#include <asm/system_misc.h>
+ 
+ #include "generic.h"
+ #include "pm.h"
+ 
++static void __iomem *pmc;
++
+ /*
+  * FIXME: this is needed to communicate between the pinctrl driver and
+  * the PM implementation in the machine. Possibly part of the PM
+@@ -87,7 +90,7 @@ static int at91_pm_verify_clocks(void)
+       unsigned long scsr;
+       int i;
+ 
+-      scsr = at91_pmc_read(AT91_PMC_SCSR);
++      scsr = readl(pmc + AT91_PMC_SCSR);
+ 
+       /* USB must not be using PLLB */
+       if ((scsr & at91_pm_data.uhp_udp_mask) != 0) {
+@@ -101,8 +104,7 @@ static int at91_pm_verify_clocks(void)
+ 
+               if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
+                       continue;
+-
+-              css = at91_pmc_read(AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
++              css = readl(pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
+               if (css != AT91_PMC_CSS_SLOW) {
+                       pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", 
i, css);
+                       return 0;
+@@ -145,8 +147,8 @@ static void at91_pm_suspend(suspend_state_t state)
+       flush_cache_all();
+       outer_disable();
+ 
+-      at91_suspend_sram_fn(at91_pmc_base, at91_ramc_base[0],
+-                              at91_ramc_base[1], pm_data);
++      at91_suspend_sram_fn(pmc, at91_ramc_base[0],
++                           at91_ramc_base[1], pm_data);
+ 
+       outer_resume();
+ }
+@@ -353,6 +355,21 @@ static __init void at91_dt_ramc(void)
+       at91_pm_set_standby(standby);
+ }
+ 
++void at91rm9200_idle(void)
++{
++      /*
++       * Disable the processor clock.  The processor will be automatically
++       * re-enabled by an interrupt or by a reset.
++       */
++      writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR);
++}
++
++void at91sam9_idle(void)
++{
++      writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR);
++      cpu_do_idle();
++}
++
+ static void __init at91_pm_sram_init(void)
+ {
+       struct gen_pool *sram_pool;
+@@ -399,13 +416,36 @@ static void __init at91_pm_sram_init(void)
+                       &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
+ }
+ 
+-static void __init at91_pm_init(void)
++static const struct of_device_id atmel_pmc_ids[] __initconst = {
++      { .compatible = "atmel,at91rm9200-pmc"  },
++      { .compatible = "atmel,at91sam9260-pmc" },
++      { .compatible = "atmel,at91sam9g45-pmc" },
++      { .compatible = "atmel,at91sam9n12-pmc" },
++      { .compatible = "atmel,at91sam9x5-pmc" },
++      { .compatible = "atmel,sama5d3-pmc" },
++      { .compatible = "atmel,sama5d2-pmc" },
++      { /* sentinel */ },
++};
++
++static void __init at91_pm_init(void (*pm_idle)(void))
+ {
+-      at91_pm_sram_init();
++      struct device_node *pmc_np;
+ 
+       if (at91_cpuidle_device.dev.platform_data)
+               platform_device_register(&at91_cpuidle_device);
+ 
++      pmc_np = of_find_matching_node(NULL, atmel_pmc_ids);
++      pmc = of_iomap(pmc_np, 0);
++      if (!pmc) {
++              pr_err("AT91: PM not supported, PMC not found\n");
++              return;
++      }
++
++      if (pm_idle)
++              arm_pm_idle = pm_idle;
++
++      at91_pm_sram_init();
++
+       if (at91_suspend_sram_fn)
+               suspend_set_ops(&at91_pm_ops);
+       else
+@@ -424,7 +464,7 @@ void __init at91rm9200_pm_init(void)
+       at91_pm_data.uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP;
+       at91_pm_data.memctrl = AT91_MEMCTRL_MC;
+ 
+-      at91_pm_init();
++      at91_pm_init(at91rm9200_idle);
+ }
+ 
+ void __init at91sam9260_pm_init(void)
+@@ -432,7 +472,7 @@ void __init at91sam9260_pm_init(void)
+       at91_dt_ramc();
+       at91_pm_data.memctrl = AT91_MEMCTRL_SDRAMC;
+       at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
+-      return at91_pm_init();
++      at91_pm_init(at91sam9_idle);
+ }
+ 
+ void __init at91sam9g45_pm_init(void)
+@@ -440,7 +480,7 @@ void __init at91sam9g45_pm_init(void)
+       at91_dt_ramc();
+       at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP;
+       at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
+-      return at91_pm_init();
++      at91_pm_init(at91sam9_idle);
+ }
+ 
+ void __init at91sam9x5_pm_init(void)
+@@ -448,5 +488,13 @@ void __init at91sam9x5_pm_init(void)
+       at91_dt_ramc();
+       at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
+       at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
+-      return at91_pm_init();
++      at91_pm_init(at91sam9_idle);
++}
++
++void __init sama5_pm_init(void)
++{
++      at91_dt_ramc();
++      at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
++      at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
++      at91_pm_init(NULL);
+ }
+diff --git a/arch/arm/mach-at91/sama5.c b/arch/arm/mach-at91/sama5.c
+index d9cf6799aec0..df8fdf1cf66d 100644
+--- a/arch/arm/mach-at91/sama5.c
++++ b/arch/arm/mach-at91/sama5.c
+@@ -51,7 +51,7 @@ static void __init sama5_dt_device_init(void)
+               soc_dev = soc_device_to_device(soc);
+ 
+       of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev);
+-      at91sam9x5_pm_init();
++      sama5_pm_init();
+ }
+ 
+ static const char *const sama5_dt_board_compat[] __initconst = {
+diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
+index 98a2c0cbb833..310dce500d3e 100644
+--- a/arch/arm/mach-exynos/platsmp.c
++++ b/arch/arm/mach-exynos/platsmp.c
+@@ -230,7 +230,7 @@ static void __iomem *scu_base_addr(void)
+       return (void __iomem *)(S5P_VA_SCU);
+ }
+ 
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ 
+ static void exynos_secondary_init(unsigned int cpu)
+ {
+@@ -243,8 +243,8 @@ static void exynos_secondary_init(unsigned int cpu)
+       /*
+        * Synchronise with the boot thread.
+        */
+-      spin_lock(&boot_lock);
+-      spin_unlock(&boot_lock);
++      raw_spin_lock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+ }
+ 
+ int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
+@@ -308,7 +308,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
+        * Set synchronisation state between this boot processor
+        * and the secondary one
+        */
+-      spin_lock(&boot_lock);
++      raw_spin_lock(&boot_lock);
+ 
+       /*
+        * The secondary processor is waiting to be released from
+@@ -335,7 +335,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
+ 
+               if (timeout == 0) {
+                       printk(KERN_ERR "cpu1 power enable failed");
+-                      spin_unlock(&boot_lock);
++                      raw_spin_unlock(&boot_lock);
+                       return -ETIMEDOUT;
+               }
+       }
+@@ -381,7 +381,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
+        * calibrations, then wait for it to finish
+        */
+ fail:
+-      spin_unlock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+ 
+       return pen_release != -1 ? ret : 0;
+ }
+diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c
+index b5f8f5ffda79..9753a84df9c4 100644
+--- a/arch/arm/mach-hisi/platmcpm.c
++++ b/arch/arm/mach-hisi/platmcpm.c
+@@ -61,7 +61,7 @@
+ 
+ static void __iomem *sysctrl, *fabric;
+ static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ static u32 fabric_phys_addr;
+ /*
+  * [0]: bootwrapper physical address
+@@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct 
task_struct *idle)
+       if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
+               return -EINVAL;
+ 
+-      spin_lock_irq(&boot_lock);
++      raw_spin_lock_irq(&boot_lock);
+ 
+       if (hip04_cpu_table[cluster][cpu])
+               goto out;
+@@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct 
task_struct *idle)
+ 
+ out:
+       hip04_cpu_table[cluster][cpu]++;
+-      spin_unlock_irq(&boot_lock);
++      raw_spin_unlock_irq(&boot_lock);
+ 
+       return 0;
+ }
+@@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l_cpu)
+       cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+       cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ 
+-      spin_lock(&boot_lock);
++      raw_spin_lock(&boot_lock);
+       hip04_cpu_table[cluster][cpu]--;
+       if (hip04_cpu_table[cluster][cpu] == 1) {
+               /* A power_up request went ahead of us. */
+-              spin_unlock(&boot_lock);
++              raw_spin_unlock(&boot_lock);
+               return;
+       } else if (hip04_cpu_table[cluster][cpu] > 1) {
+               pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
+@@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l_cpu)
+       }
+ 
+       last_man = hip04_cluster_is_down(cluster);
+-      spin_unlock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+       if (last_man) {
+               /* Since it's Cortex A15, disable L2 prefetching. */
+               asm volatile(
+@@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l_cpu)
+              cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
+ 
+       count = TIMEOUT_MSEC / POLL_MSEC;
+-      spin_lock_irq(&boot_lock);
++      raw_spin_lock_irq(&boot_lock);
+       for (tries = 0; tries < count; tries++) {
+               if (hip04_cpu_table[cluster][cpu])
+                       goto err;
+@@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
+               data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
+               if (data & CORE_WFI_STATUS(cpu))
+                       break;
+-              spin_unlock_irq(&boot_lock);
++              raw_spin_unlock_irq(&boot_lock);
+               /* Wait for clean L2 when the whole cluster is down. */
+               msleep(POLL_MSEC);
+-              spin_lock_irq(&boot_lock);
++              raw_spin_lock_irq(&boot_lock);
+       }
+       if (tries >= count)
+               goto err;
+@@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
+               goto err;
+       if (hip04_cluster_is_down(cluster))
+               hip04_set_snoop_filter(cluster, 0);
+-      spin_unlock_irq(&boot_lock);
++      raw_spin_unlock_irq(&boot_lock);
+       return 1;
+ err:
+-      spin_unlock_irq(&boot_lock);
++      raw_spin_unlock_irq(&boot_lock);
+       return 0;
+ }
+ #endif
+diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
+index 8ceda2844c4f..08bcf8fb76f2 100644
+--- a/arch/arm/mach-imx/Kconfig
++++ b/arch/arm/mach-imx/Kconfig
+@@ -524,7 +524,7 @@ config SOC_IMX6Q
+       bool "i.MX6 Quad/DualLite support"
+       select ARM_ERRATA_764369 if SMP
+       select HAVE_ARM_SCU if SMP
+-      select HAVE_ARM_TWD if SMP
++      select HAVE_ARM_TWD
+       select PCI_DOMAINS if PCI
+       select PINCTRL_IMX6Q
+       select SOC_IMX6
+diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
+index 79e1f876d1c9..7e625c17f78e 100644
+--- a/arch/arm/mach-omap2/omap-smp.c
++++ b/arch/arm/mach-omap2/omap-smp.c
+@@ -43,7 +43,7 @@
+ /* SCU base address */
+ static void __iomem *scu_base;
+ 
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ 
+ void __iomem *omap4_get_scu_base(void)
+ {
+@@ -74,8 +74,8 @@ static void omap4_secondary_init(unsigned int cpu)
+       /*
+        * Synchronise with the boot thread.
+        */
+-      spin_lock(&boot_lock);
+-      spin_unlock(&boot_lock);
++      raw_spin_lock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+ }
+ 
+ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -89,7 +89,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
+        * Set synchronisation state between this boot processor
+        * and the secondary one
+        */
+-      spin_lock(&boot_lock);
++      raw_spin_lock(&boot_lock);
+ 
+       /*
+        * Update the AuxCoreBoot0 with boot state for secondary core.
+@@ -166,7 +166,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
+        * Now the secondary core is starting up let it run its
+        * calibrations, then wait for it to finish
+        */
+-      spin_unlock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+ 
+       return 0;
+ }
+diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
+index e46c91094dde..dcb3ed0c26da 100644
+--- a/arch/arm/mach-prima2/platsmp.c
++++ b/arch/arm/mach-prima2/platsmp.c
+@@ -22,7 +22,7 @@
+ 
+ static void __iomem *clk_base;
+ 
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ 
+ static void sirfsoc_secondary_init(unsigned int cpu)
+ {
+@@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsigned int cpu)
+       /*
+        * Synchronise with the boot thread.
+        */
+-      spin_lock(&boot_lock);
+-      spin_unlock(&boot_lock);
++      raw_spin_lock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+ }
+ 
+ static const struct of_device_id clk_ids[]  = {
+@@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
+       /* make sure write buffer is drained */
+       mb();
+ 
+-      spin_lock(&boot_lock);
++      raw_spin_lock(&boot_lock);
+ 
+       /*
+        * The secondary processor is waiting to be released from
+@@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
+        * now the secondary core is starting up let it run its
+        * calibrations, then wait for it to finish
+        */
+-      spin_unlock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+ 
+       return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
+index 9b00123a315d..0a49fe1bc8cf 100644
+--- a/arch/arm/mach-qcom/platsmp.c
++++ b/arch/arm/mach-qcom/platsmp.c
+@@ -46,7 +46,7 @@
+ 
+ extern void secondary_startup_arm(void);
+ 
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+ static void qcom_cpu_die(unsigned int cpu)
+@@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned int cpu)
+       /*
+        * Synchronise with the boot thread.
+        */
+-      spin_lock(&boot_lock);
+-      spin_unlock(&boot_lock);
++      raw_spin_lock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+ }
+ 
+ static int scss_release_secondary(unsigned int cpu)
+@@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned int cpu, int 
(*func)(unsigned int))
+        * set synchronisation state between this boot processor
+        * and the secondary one
+        */
+-      spin_lock(&boot_lock);
++      raw_spin_lock(&boot_lock);
+ 
+       /*
+        * Send the secondary CPU a soft interrupt, thereby causing
+@@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned int cpu, int 
(*func)(unsigned int))
+        * now the secondary core is starting up let it run its
+        * calibrations, then wait for it to finish
+        */
+-      spin_unlock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+ 
+       return ret;
+ }
+diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
+index fd4297713d67..b0553b2c2d53 100644
+--- a/arch/arm/mach-spear/platsmp.c
++++ b/arch/arm/mach-spear/platsmp.c
+@@ -32,7 +32,7 @@ static void write_pen_release(int val)
+       sync_cache_w(&pen_release);
+ }
+ 
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ 
+ static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
+ 
+@@ -47,8 +47,8 @@ static void spear13xx_secondary_init(unsigned int cpu)
+       /*
+        * Synchronise with the boot thread.
+        */
+-      spin_lock(&boot_lock);
+-      spin_unlock(&boot_lock);
++      raw_spin_lock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+ }
+ 
+ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct 
*idle)
+@@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
+        * set synchronisation state between this boot processor
+        * and the secondary one
+        */
+-      spin_lock(&boot_lock);
++      raw_spin_lock(&boot_lock);
+ 
+       /*
+        * The secondary processor is waiting to be released from
+@@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
+        * now the secondary core is starting up let it run its
+        * calibrations, then wait for it to finish
+        */
+-      spin_unlock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+ 
+       return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
+index c4ad6eae67fa..e830b20b212f 100644
+--- a/arch/arm/mach-sti/platsmp.c
++++ b/arch/arm/mach-sti/platsmp.c
+@@ -35,7 +35,7 @@ static void write_pen_release(int val)
+       sync_cache_w(&pen_release);
+ }
+ 
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ 
+ static void sti_secondary_init(unsigned int cpu)
+ {
+@@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned int cpu)
+       /*
+        * Synchronise with the boot thread.
+        */
+-      spin_lock(&boot_lock);
+-      spin_unlock(&boot_lock);
++      raw_spin_lock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+ }
+ 
+ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
+        * set synchronisation state between this boot processor
+        * and the secondary one
+        */
+-      spin_lock(&boot_lock);
++      raw_spin_lock(&boot_lock);
+ 
+       /*
+        * The secondary processor is waiting to be released from
+@@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
+        * now the secondary core is starting up let it run its
+        * calibrations, then wait for it to finish
+        */
+-      spin_unlock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+ 
+       return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index daafcf121ce0..b8aa1e9ee8ee 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -430,6 +430,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
+       if (addr < TASK_SIZE)
+               return do_page_fault(addr, fsr, regs);
+ 
++      if (interrupts_enabled(regs))
++              local_irq_enable();
++
+       if (user_mode(regs))
+               goto bad_area;
+ 
+@@ -497,6 +500,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
+ static int
+ do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ {
++      if (interrupts_enabled(regs))
++              local_irq_enable();
++
+       do_bad_area(addr, fsr, regs);
+       return 0;
+ }
+diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
+index d02f8187b1cc..542692dbd40a 100644
+--- a/arch/arm/mm/highmem.c
++++ b/arch/arm/mm/highmem.c
+@@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr)
+       return *ptep;
+ }
+ 
++static unsigned int fixmap_idx(int type)
++{
++      return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
++}
++
+ void *kmap(struct page *page)
+ {
+       might_sleep();
+@@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap);
+ 
+ void *kmap_atomic(struct page *page)
+ {
++      pte_t pte = mk_pte(page, kmap_prot);
+       unsigned int idx;
+       unsigned long vaddr;
+       void *kmap;
+       int type;
+ 
+-      preempt_disable();
++      preempt_disable_nort();
+       pagefault_disable();
+       if (!PageHighMem(page))
+               return page_address(page);
+@@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page)
+ 
+       type = kmap_atomic_idx_push();
+ 
+-      idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
++      idx = fixmap_idx(type);
+       vaddr = __fix_to_virt(idx);
+ #ifdef CONFIG_DEBUG_HIGHMEM
+       /*
+@@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page)
+        * in place, so the contained TLB flush ensures the TLB is updated
+        * with the new mapping.
+        */
+-      set_fixmap_pte(idx, mk_pte(page, kmap_prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++      current->kmap_pte[type] = pte;
++#endif
++      set_fixmap_pte(idx, pte);
+ 
+       return (void *)vaddr;
+ }
+@@ -106,44 +115,75 @@ void __kunmap_atomic(void *kvaddr)
+ 
+       if (kvaddr >= (void *)FIXADDR_START) {
+               type = kmap_atomic_idx();
+-              idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
++              idx = fixmap_idx(type);
+ 
+               if (cache_is_vivt())
+                       __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
++#ifdef CONFIG_PREEMPT_RT_FULL
++              current->kmap_pte[type] = __pte(0);
++#endif
+ #ifdef CONFIG_DEBUG_HIGHMEM
+               BUG_ON(vaddr != __fix_to_virt(idx));
+-              set_fixmap_pte(idx, __pte(0));
+ #else
+               (void) idx;  /* to kill a warning */
+ #endif
++              set_fixmap_pte(idx, __pte(0));
+               kmap_atomic_idx_pop();
+       } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
+               /* this address was obtained through kmap_high_get() */
+               kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
+       }
+       pagefault_enable();
+-      preempt_enable();
++      preempt_enable_nort();
+ }
+ EXPORT_SYMBOL(__kunmap_atomic);
+ 
+ void *kmap_atomic_pfn(unsigned long pfn)
+ {
++      pte_t pte = pfn_pte(pfn, kmap_prot);
+       unsigned long vaddr;
+       int idx, type;
+       struct page *page = pfn_to_page(pfn);
+ 
+-      preempt_disable();
++      preempt_disable_nort();
+       pagefault_disable();
+       if (!PageHighMem(page))
+               return page_address(page);
+ 
+       type = kmap_atomic_idx_push();
+-      idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
++      idx = fixmap_idx(type);
+       vaddr = __fix_to_virt(idx);
+ #ifdef CONFIG_DEBUG_HIGHMEM
+       BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
+ #endif
+-      set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++      current->kmap_pte[type] = pte;
++#endif
++      set_fixmap_pte(idx, pte);
+ 
+       return (void *)vaddr;
+ }
++#if defined CONFIG_PREEMPT_RT_FULL
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
++{
++      int i;
++
++      /*
++       * Clear @prev's kmap_atomic mappings
++       */
++      for (i = 0; i < prev_p->kmap_idx; i++) {
++              int idx = fixmap_idx(i);
++
++              set_fixmap_pte(idx, __pte(0));
++      }
++      /*
++       * Restore @next_p's kmap_atomic mappings
++       */
++      for (i = 0; i < next_p->kmap_idx; i++) {
++              int idx = fixmap_idx(i);
++
++              if (!pte_none(next_p->kmap_pte[i]))
++                      set_fixmap_pte(idx, next_p->kmap_pte[i]);
++      }
++}
++#endif
+diff --git a/arch/arm/plat-versatile/platsmp.c 
b/arch/arm/plat-versatile/platsmp.c
+index 53feb90c840c..b4a8d54fc3f3 100644
+--- a/arch/arm/plat-versatile/platsmp.c
++++ b/arch/arm/plat-versatile/platsmp.c
+@@ -30,7 +30,7 @@ static void write_pen_release(int val)
+       sync_cache_w(&pen_release);
+ }
+ 
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ 
+ void versatile_secondary_init(unsigned int cpu)
+ {
+@@ -43,8 +43,8 @@ void versatile_secondary_init(unsigned int cpu)
+       /*
+        * Synchronise with the boot thread.
+        */
+-      spin_lock(&boot_lock);
+-      spin_unlock(&boot_lock);
++      raw_spin_lock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+ }
+ 
+ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -55,7 +55,7 @@ int versatile_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
+        * Set synchronisation state between this boot processor
+        * and the secondary one
+        */
+-      spin_lock(&boot_lock);
++      raw_spin_lock(&boot_lock);
+ 
+       /*
+        * This is really belt and braces; we hold unintended secondary
+@@ -85,7 +85,7 @@ int versatile_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
+        * now the secondary core is starting up let it run its
+        * calibrations, then wait for it to finish
+        */
+-      spin_unlock(&boot_lock);
++      raw_spin_unlock(&boot_lock);
+ 
+       return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 14cdc6dea493..9196cf82f7be 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -76,6 +76,7 @@ config ARM64
+       select HAVE_PERF_REGS
+       select HAVE_PERF_USER_STACK_DUMP
+       select HAVE_RCU_TABLE_FREE
++      select HAVE_PREEMPT_LAZY
+       select HAVE_SYSCALL_TRACEPOINTS
+       select IOMMU_DMA if IOMMU_SUPPORT
+       select IRQ_DOMAIN
+@@ -582,7 +583,7 @@ config XEN_DOM0
+ 
+ config XEN
+       bool "Xen guest support on ARM64"
+-      depends on ARM64 && OF
++      depends on ARM64 && OF && !PREEMPT_RT_FULL
+       select SWIOTLB_XEN
+       help
+         Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
+diff --git a/arch/arm64/include/asm/thread_info.h 
b/arch/arm64/include/asm/thread_info.h
+index 90c7ff233735..5f4e89fbc290 100644
+--- a/arch/arm64/include/asm/thread_info.h
++++ b/arch/arm64/include/asm/thread_info.h
+@@ -49,6 +49,7 @@ struct thread_info {
+       mm_segment_t            addr_limit;     /* address limit */
+       struct task_struct      *task;          /* main task structure */
+       int                     preempt_count;  /* 0 => preemptable, <0 => bug 
*/
++      int                     preempt_lazy_count; /* 0 => preemptable, <0 => 
bug */
+       int                     cpu;            /* cpu */
+ };
+ 
+@@ -103,6 +104,7 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_NEED_RESCHED      1
+ #define TIF_NOTIFY_RESUME     2       /* callback before returning to user */
+ #define TIF_FOREIGN_FPSTATE   3       /* CPU's FP state is not current's */
++#define TIF_NEED_RESCHED_LAZY 4
+ #define TIF_NOHZ              7
+ #define TIF_SYSCALL_TRACE     8
+ #define TIF_SYSCALL_AUDIT     9
+@@ -118,6 +120,7 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_NEED_RESCHED     (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME    (1 << TIF_NOTIFY_RESUME)
+ #define _TIF_FOREIGN_FPSTATE  (1 << TIF_FOREIGN_FPSTATE)
++#define _TIF_NEED_RESCHED_LAZY        (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_NOHZ             (1 << TIF_NOHZ)
+ #define _TIF_SYSCALL_TRACE    (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_SYSCALL_AUDIT    (1 << TIF_SYSCALL_AUDIT)
+@@ -126,7 +129,8 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_32BIT            (1 << TIF_32BIT)
+ 
+ #define _TIF_WORK_MASK                (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+-                               _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
++                               _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
++                               _TIF_NEED_RESCHED_LAZY)
+ 
+ #define _TIF_SYSCALL_WORK     (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+                                _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
+diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
+index 087cf9a65359..d74475928399 100644
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -35,6 +35,7 @@ int main(void)
+   BLANK();
+   DEFINE(TI_FLAGS,            offsetof(struct thread_info, flags));
+   DEFINE(TI_PREEMPT,          offsetof(struct thread_info, preempt_count));
++  DEFINE(TI_PREEMPT_LAZY,     offsetof(struct thread_info, 
preempt_lazy_count));
+   DEFINE(TI_ADDR_LIMIT,               offsetof(struct thread_info, 
addr_limit));
+   DEFINE(TI_TASK,             offsetof(struct thread_info, task));
+   DEFINE(TI_CPU,              offsetof(struct thread_info, cpu));
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 5a3753d09e20..05d73c4c03f6 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -376,11 +376,16 @@ el1_irq:
+ #ifdef CONFIG_PREEMPT
+       get_thread_info tsk
+       ldr     w24, [tsk, #TI_PREEMPT]         // get preempt count
+-      cbnz    w24, 1f                         // preempt count != 0
++      cbnz    w24, 2f                         // preempt count != 0
+       ldr     x0, [tsk, #TI_FLAGS]            // get flags
+-      tbz     x0, #TIF_NEED_RESCHED, 1f       // needs rescheduling?
+-      bl      el1_preempt
++      tbnz    x0, #TIF_NEED_RESCHED, 1f       // needs rescheduling?
++
++      ldr     w24, [tsk, #TI_PREEMPT_LAZY]    // get preempt lazy count
++      cbnz    w24, 2f                         // preempt lazy count != 0
++      tbz     x0, #TIF_NEED_RESCHED_LAZY, 2f  // needs rescheduling?
+ 1:
++      bl      el1_preempt
++2:
+ #endif
+ #ifdef CONFIG_TRACE_IRQFLAGS
+       bl      trace_hardirqs_on
+@@ -394,6 +399,7 @@ el1_preempt:
+ 1:    bl      preempt_schedule_irq            // irq en/disable is done inside
+       ldr     x0, [tsk, #TI_FLAGS]            // get new tasks TI_FLAGS
+       tbnz    x0, #TIF_NEED_RESCHED, 1b       // needs rescheduling?
++      tbnz    x0, #TIF_NEED_RESCHED_LAZY, 1b  // needs rescheduling?
+       ret     x24
+ #endif
+ 
+@@ -638,6 +644,7 @@ ret_fast_syscall_trace:
+  */
+ work_pending:
+       tbnz    x1, #TIF_NEED_RESCHED, work_resched
++      tbnz    x1, #TIF_NEED_RESCHED_LAZY, work_resched
+       /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
+       ldr     x2, [sp, #S_PSTATE]
+       mov     x0, sp                          // 'regs'
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index db459612de44..bd8be6a0e745 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2410,7 +2410,7 @@ config CPU_R4400_WORKAROUNDS
+ #
+ config HIGHMEM
+       bool "High Memory Support"
+-      depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && 
!CPU_MIPS32_3_5_EVA
++      depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && 
!CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL
+ 
+ config CPU_SUPPORTS_HIGHMEM
+       bool
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index e86b7499921a..b2a2f678c5dc 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -454,8 +454,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ 
+       dvcpu->arch.wait = 0;
+ 
+-      if (waitqueue_active(&dvcpu->wq))
+-              wake_up_interruptible(&dvcpu->wq);
++      if (swait_active(&dvcpu->wq))
++              swake_up(&dvcpu->wq);
+ 
+       return 0;
+ }
+@@ -1183,8 +1183,8 @@ static void kvm_mips_comparecount_func(unsigned long 
data)
+       kvm_mips_callbacks->queue_timer_int(vcpu);
+ 
+       vcpu->arch.wait = 0;
+-      if (waitqueue_active(&vcpu->wq))
+-              wake_up_interruptible(&vcpu->wq);
++      if (swait_active(&vcpu->wq))
++              swake_up(&vcpu->wq);
+ }
+ 
+ /* low level hrtimer wake routine */
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index db49e0d796b1..1d2be228661c 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -60,10 +60,11 @@ config LOCKDEP_SUPPORT
+ 
+ config RWSEM_GENERIC_SPINLOCK
+       bool
++      default y if PREEMPT_RT_FULL
+ 
+ config RWSEM_XCHGADD_ALGORITHM
+       bool
+-      default y
++      default y if !PREEMPT_RT_FULL
+ 
+ config GENERIC_LOCKBREAK
+       bool
+@@ -141,6 +142,7 @@ config PPC
+       select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+       select GENERIC_STRNCPY_FROM_USER
+       select GENERIC_STRNLEN_USER
++      select HAVE_PREEMPT_LAZY
+       select HAVE_MOD_ARCH_SPECIFIC
+       select MODULES_USE_ELF_RELA
+       select CLONE_BACKWARDS
+@@ -319,7 +321,7 @@ menu "Kernel options"
+ 
+ config HIGHMEM
+       bool "High memory support"
+-      depends on PPC32
++      depends on PPC32 && !PREEMPT_RT_FULL
+ 
+ source kernel/Kconfig.hz
+ source kernel/Kconfig.preempt
+diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
+index cfa758c6b4f6..f8673ff84b31 100644
+--- a/arch/powerpc/include/asm/kvm_host.h
++++ b/arch/powerpc/include/asm/kvm_host.h
+@@ -286,7 +286,7 @@ struct kvmppc_vcore {
+       struct list_head runnable_threads;
+       struct list_head preempt_list;
+       spinlock_t lock;
+-      wait_queue_head_t wq;
++      struct swait_queue_head wq;
+       spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
+       u64 stolen_tb;
+       u64 preempt_tb;
+@@ -626,7 +626,7 @@ struct kvm_vcpu_arch {
+       u8 prodded;
+       u32 last_inst;
+ 
+-      wait_queue_head_t *wqp;
++      struct swait_queue_head *wqp;
+       struct kvmppc_vcore *vcore;
+       int ret;
+       int trap;
+diff --git a/arch/powerpc/include/asm/thread_info.h 
b/arch/powerpc/include/asm/thread_info.h
+index 7efee4a3240b..40e6fa1b85b2 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -42,6 +42,8 @@ struct thread_info {
+       int             cpu;                    /* cpu we're on */
+       int             preempt_count;          /* 0 => preemptable,
+                                                  <0 => BUG */
++      int             preempt_lazy_count;      /* 0 => preemptable,
++                                                 <0 => BUG */
+       unsigned long   local_flags;            /* private flags for thread */
+ 
+       /* low level flags - has atomic operations done on it */
+@@ -82,8 +84,7 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_SYSCALL_TRACE     0       /* syscall trace active */
+ #define TIF_SIGPENDING                1       /* signal pending */
+ #define TIF_NEED_RESCHED      2       /* rescheduling necessary */
+-#define TIF_POLLING_NRFLAG    3       /* true if poll_idle() is polling
+-                                         TIF_NEED_RESCHED */
++#define TIF_NEED_RESCHED_LAZY 3       /* lazy rescheduling necessary */
+ #define TIF_32BIT             4       /* 32 bit binary */
+ #define TIF_RESTORE_TM                5       /* need to restore TM 
FP/VEC/VSX */
+ #define TIF_SYSCALL_AUDIT     7       /* syscall auditing active */
+@@ -101,6 +102,8 @@ static inline struct thread_info *current_thread_info(void)
+ #if defined(CONFIG_PPC64)
+ #define TIF_ELF2ABI           18      /* function descriptors must die! */
+ #endif
++#define TIF_POLLING_NRFLAG    19      /* true if poll_idle() is polling
++                                         TIF_NEED_RESCHED */
+ 
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE    (1<<TIF_SYSCALL_TRACE)
+@@ -119,14 +122,16 @@ static inline struct thread_info 
*current_thread_info(void)
+ #define _TIF_SYSCALL_TRACEPOINT       (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_EMULATE_STACK_STORE      (1<<TIF_EMULATE_STACK_STORE)
+ #define _TIF_NOHZ             (1<<TIF_NOHZ)
++#define _TIF_NEED_RESCHED_LAZY        (1<<TIF_NEED_RESCHED_LAZY)
+ #define _TIF_SYSCALL_DOTRACE  (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+                                _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+                                _TIF_NOHZ)
+ 
+ #define _TIF_USER_WORK_MASK   (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+                                _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+-                               _TIF_RESTORE_TM)
++                               _TIF_RESTORE_TM | _TIF_NEED_RESCHED_LAZY)
+ #define _TIF_PERSYSCALL_MASK  (_TIF_RESTOREALL|_TIF_NOERROR)
++#define _TIF_NEED_RESCHED_MASK        (_TIF_NEED_RESCHED | 
_TIF_NEED_RESCHED_LAZY)
+ 
+ /* Bits in local_flags */
+ /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
+diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
+index 221d584d089f..d6d0c59ef8ae 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -160,6 +160,7 @@ int main(void)
+       DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+       DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
+       DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++      DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, 
preempt_lazy_count));
+       DEFINE(TI_TASK, offsetof(struct thread_info, task));
+       DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ 
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index 2405631e91a2..c21b4b42eaa0 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -818,7 +818,14 @@ resume_kernel:
+       cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
+       bne     restore
+       andi.   r8,r8,_TIF_NEED_RESCHED
++      bne+    1f
++      lwz     r0,TI_PREEMPT_LAZY(r9)
++      cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
++      bne     restore
++      lwz     r0,TI_FLAGS(r9)
++      andi.   r0,r0,_TIF_NEED_RESCHED_LAZY
+       beq+    restore
++1:
+       lwz     r3,_MSR(r1)
+       andi.   r0,r3,MSR_EE    /* interrupts off? */
+       beq     restore         /* don't schedule if so */
+@@ -829,11 +836,11 @@ resume_kernel:
+        */
+       bl      trace_hardirqs_off
+ #endif
+-1:    bl      preempt_schedule_irq
++2:    bl      preempt_schedule_irq
+       CURRENT_THREAD_INFO(r9, r1)
+       lwz     r3,TI_FLAGS(r9)
+-      andi.   r0,r3,_TIF_NEED_RESCHED
+-      bne-    1b
++      andi.   r0,r3,_TIF_NEED_RESCHED_MASK
++      bne-    2b
+ #ifdef CONFIG_TRACE_IRQFLAGS
+       /* And now, to properly rebalance the above, we tell lockdep they
+        * are being turned back on, which will happen when we return
+@@ -1154,7 +1161,7 @@ global_dbcr0:
+ #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
+ 
+ do_work:                      /* r10 contains MSR_KERNEL here */
+-      andi.   r0,r9,_TIF_NEED_RESCHED
++      andi.   r0,r9,_TIF_NEED_RESCHED_MASK
+       beq     do_user_signal
+ 
+ do_resched:                   /* r10 contains MSR_KERNEL here */
+@@ -1175,7 +1182,7 @@ recheck:
+       MTMSRD(r10)             /* disable interrupts */
+       CURRENT_THREAD_INFO(r9, r1)
+       lwz     r9,TI_FLAGS(r9)
+-      andi.   r0,r9,_TIF_NEED_RESCHED
++      andi.   r0,r9,_TIF_NEED_RESCHED_MASK
+       bne-    do_resched
+       andi.   r0,r9,_TIF_USER_WORK_MASK
+       beq     restore_user
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index edba294620db..1aae3fdb0c2a 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -683,7 +683,7 @@ _GLOBAL(ret_from_except_lite)
+ #else
+       beq     restore
+ #endif
+-1:    andi.   r0,r4,_TIF_NEED_RESCHED
++1:    andi.   r0,r4,_TIF_NEED_RESCHED_MASK
+       beq     2f
+       bl      restore_interrupts
+       SCHEDULE_USER
+@@ -745,10 +745,18 @@ resume_kernel:
+ 
+ #ifdef CONFIG_PREEMPT
+       /* Check if we need to preempt */
++      lwz     r8,TI_PREEMPT(r9)
++      cmpwi   0,r8,0          /* if non-zero, just restore regs and return */
++      bne     restore
+       andi.   r0,r4,_TIF_NEED_RESCHED
++      bne+    check_count
++
++      andi.   r0,r4,_TIF_NEED_RESCHED_LAZY
+       beq+    restore
++      lwz     r8,TI_PREEMPT_LAZY(r9)
++
+       /* Check that preempt_count() == 0 and interrupts are enabled */
+-      lwz     r8,TI_PREEMPT(r9)
++check_count:
+       cmpwi   cr1,r8,0
+       ld      r0,SOFTE(r1)
+       cmpdi   r0,0
+@@ -765,7 +773,7 @@ resume_kernel:
+       /* Re-test flags and eventually loop */
+       CURRENT_THREAD_INFO(r9, r1)
+       ld      r4,TI_FLAGS(r9)
+-      andi.   r0,r4,_TIF_NEED_RESCHED
++      andi.   r0,r4,_TIF_NEED_RESCHED_MASK
+       bne     1b
+ 
+       /*
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 290559df1e8b..070afa6da35d 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -614,6 +614,7 @@ void irq_ctx_init(void)
+       }
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+       struct thread_info *curtp, *irqtp;
+@@ -631,6 +632,7 @@ void do_softirq_own_stack(void)
+       if (irqtp->flags)
+               set_bits(irqtp->flags, &curtp->flags);
+ }
++#endif
+ 
+ irq_hw_number_t virq_to_hw(unsigned int virq)
+ {
+diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
+index ed3ab509faca..8b261416c070 100644
+--- a/arch/powerpc/kernel/misc_32.S
++++ b/arch/powerpc/kernel/misc_32.S
+@@ -40,6 +40,7 @@
+  * We store the saved ksp_limit in the unused part
+  * of the STACK_FRAME_OVERHEAD
+  */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ _GLOBAL(call_do_softirq)
+       mflr    r0
+       stw     r0,4(r1)
+@@ -56,6 +57,7 @@ _GLOBAL(call_do_softirq)
+       stw     r10,THREAD+KSP_LIMIT(r2)
+       mtlr    r0
+       blr
++#endif
+ 
+ /*
+  * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
+diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
+index db475d41b57a..96b7ef80e05d 100644
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -30,6 +30,7 @@
+ 
+       .text
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ _GLOBAL(call_do_softirq)
+       mflr    r0
+       std     r0,16(r1)
+@@ -40,6 +41,7 @@ _GLOBAL(call_do_softirq)
+       ld      r0,16(r1)
+       mtlr    r0
+       blr
++#endif
+ 
+ _GLOBAL(call_do_irq)
+       mflr    r0
+diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
+index c2024ac9d4e8..2303788da7e1 100644
+--- a/arch/powerpc/kvm/Kconfig
++++ b/arch/powerpc/kvm/Kconfig
+@@ -172,6 +172,7 @@ config KVM_E500MC
+ config KVM_MPIC
+       bool "KVM in-kernel MPIC emulation"
+       depends on KVM && E500
++      depends on !PREEMPT_RT_FULL
+       select HAVE_KVM_IRQCHIP
+       select HAVE_KVM_IRQFD
+       select HAVE_KVM_IRQ_ROUTING
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index a7352b59e6f9..df34a6432873 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -114,11 +114,11 @@ static bool kvmppc_ipi_thread(int cpu)
+ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
+ {
+       int cpu;
+-      wait_queue_head_t *wqp;
++      struct swait_queue_head *wqp;
+ 
+       wqp = kvm_arch_vcpu_wq(vcpu);
+-      if (waitqueue_active(wqp)) {
+-              wake_up_interruptible(wqp);
++      if (swait_active(wqp)) {
++              swake_up(wqp);
+               ++vcpu->stat.halt_wakeup;
+       }
+ 
+@@ -707,8 +707,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
+               tvcpu->arch.prodded = 1;
+               smp_mb();
+               if (vcpu->arch.ceded) {
+-                      if (waitqueue_active(&vcpu->wq)) {
+-                              wake_up_interruptible(&vcpu->wq);
++                      if (swait_active(&vcpu->wq)) {
++                              swake_up(&vcpu->wq);
+                               vcpu->stat.halt_wakeup++;
+                       }
+               }
+@@ -1447,7 +1447,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct 
kvm *kvm, int core)
+       INIT_LIST_HEAD(&vcore->runnable_threads);
+       spin_lock_init(&vcore->lock);
+       spin_lock_init(&vcore->stoltb_lock);
+-      init_waitqueue_head(&vcore->wq);
++      init_swait_queue_head(&vcore->wq);
+       vcore->preempt_tb = TB_NIL;
+       vcore->lpcr = kvm->arch.lpcr;
+       vcore->first_vcpuid = core * threads_per_subcore;
+@@ -2519,10 +2519,9 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore 
*vc)
+ {
+       struct kvm_vcpu *vcpu;
+       int do_sleep = 1;
++      DECLARE_SWAITQUEUE(wait);
+ 
+-      DEFINE_WAIT(wait);
+-
+-      prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
++      prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+ 
+       /*
+        * Check one last time for pending exceptions and ceded state after
+@@ -2536,7 +2535,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
+       }
+ 
+       if (!do_sleep) {
+-              finish_wait(&vc->wq, &wait);
++              finish_swait(&vc->wq, &wait);
+               return;
+       }
+ 
+@@ -2544,7 +2543,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
+       trace_kvmppc_vcore_blocked(vc, 0);
+       spin_unlock(&vc->lock);
+       schedule();
+-      finish_wait(&vc->wq, &wait);
++      finish_swait(&vc->wq, &wait);
+       spin_lock(&vc->lock);
+       vc->vcore_state = VCORE_INACTIVE;
+       trace_kvmppc_vcore_blocked(vc, 1);
+@@ -2600,7 +2599,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, 
struct kvm_vcpu *vcpu)
+                       kvmppc_start_thread(vcpu, vc);
+                       trace_kvm_guest_enter(vcpu);
+               } else if (vc->vcore_state == VCORE_SLEEPING) {
+-                      wake_up(&vc->wq);
++                      swake_up(&vc->wq);
+               }
+ 
+       }
+diff --git a/arch/powerpc/platforms/ps3/device-init.c 
b/arch/powerpc/platforms/ps3/device-init.c
+index 3f175e8aedb4..c4c02f91904c 100644
+--- a/arch/powerpc/platforms/ps3/device-init.c
++++ b/arch/powerpc/platforms/ps3/device-init.c
+@@ -752,7 +752,7 @@ static int ps3_notification_read_write(struct 
ps3_notification_device *dev,
+       }
+       pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
+ 
+-      res = wait_event_interruptible(dev->done.wait,
++      res = swait_event_interruptible(dev->done.wait,
+                                      dev->done.done || kthread_should_stop());
+       if (kthread_should_stop())
+               res = -EINTR;
+diff --git a/arch/s390/include/asm/kvm_host.h 
b/arch/s390/include/asm/kvm_host.h
+index e9a983f40a24..bbdc539fb3c6 100644
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -427,7 +427,7 @@ struct kvm_s390_irq_payload {
+ struct kvm_s390_local_interrupt {
+       spinlock_t lock;
+       struct kvm_s390_float_interrupt *float_int;
+-      wait_queue_head_t *wq;
++      struct swait_queue_head *wq;
+       atomic_t *cpuflags;
+       DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
+       struct kvm_s390_irq_payload irq;
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 6a75352f453c..cc862c486002 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -868,13 +868,13 @@ no_timer:
+ 
+ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
+ {
+-      if (waitqueue_active(&vcpu->wq)) {
++      if (swait_active(&vcpu->wq)) {
+               /*
+                * The vcpu gave up the cpu voluntarily, mark it as a good
+                * yield-candidate.
+                */
+               vcpu->preempted = true;
+-              wake_up_interruptible(&vcpu->wq);
++              swake_up(&vcpu->wq);
+               vcpu->stat.halt_wakeup++;
+       }
+ }
+diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
+index 6c0378c0b8b5..abd58b4dff97 100644
+--- a/arch/sh/kernel/irq.c
++++ b/arch/sh/kernel/irq.c
+@@ -147,6 +147,7 @@ void irq_ctx_exit(int cpu)
+       hardirq_ctx[cpu] = NULL;
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+       struct thread_info *curctx;
+@@ -174,6 +175,7 @@ void do_softirq_own_stack(void)
+                 "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
+       );
+ }
++#endif
+ #else
+ static inline void handle_one_irq(unsigned int irq)
+ {
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 56442d2d7bbc..8c9598f534c9 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -189,12 +189,10 @@ config NR_CPUS
+ source kernel/Kconfig.hz
+ 
+ config RWSEM_GENERIC_SPINLOCK
+-      bool
+-      default y if SPARC32
++      def_bool PREEMPT_RT_FULL
+ 
+ config RWSEM_XCHGADD_ALGORITHM
+-      bool
+-      default y if SPARC64
++      def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+ 
+ config GENERIC_HWEIGHT
+       bool
+diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
+index e22416ce56ea..d359de71153a 100644
+--- a/arch/sparc/kernel/irq_64.c
++++ b/arch/sparc/kernel/irq_64.c
+@@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
+       set_irq_regs(old_regs);
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+       void *orig_sp, *sp = softirq_stack[smp_processor_id()];
+@@ -868,6 +869,7 @@ void do_softirq_own_stack(void)
+       __asm__ __volatile__("mov %0, %%sp"
+                            : : "r" (orig_sp));
+ }
++#endif
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+ void fixup_irqs(void)
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 436639a31624..6ee1dd0deadc 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -17,6 +17,7 @@ config X86_64
+ ### Arch settings
+ config X86
+       def_bool y
++      select HAVE_PREEMPT_LAZY
+       select ACPI_LEGACY_TABLES_LOOKUP        if ACPI
+       select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
+       select ANON_INODES
+@@ -212,8 +213,11 @@ config ARCH_MAY_HAVE_PC_FDC
+       def_bool y
+       depends on ISA_DMA_API
+ 
++config RWSEM_GENERIC_SPINLOCK
++      def_bool PREEMPT_RT_FULL
++
+ config RWSEM_XCHGADD_ALGORITHM
+-      def_bool y
++      def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+ 
+ config GENERIC_CALIBRATE_DELAY
+       def_bool y
+@@ -848,7 +852,7 @@ config IOMMU_HELPER
+ config MAXSMP
+       bool "Enable Maximum number of SMP Processors and NUMA Nodes"
+       depends on X86_64 && SMP && DEBUG_KERNEL
+-      select CPUMASK_OFFSTACK
++      select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
+       ---help---
+         Enable maximum number of CPUS and NUMA Nodes for this architecture.
+         If unsure, say N.
+diff --git a/arch/x86/crypto/aesni-intel_glue.c 
b/arch/x86/crypto/aesni-intel_glue.c
+index 3633ad6145c5..c6d5458ee7f9 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -383,14 +383,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
+       err = blkcipher_walk_virt(desc, &walk);
+       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ 
+-      kernel_fpu_begin();
+       while ((nbytes = walk.nbytes)) {
++              kernel_fpu_begin();
+               aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+-                            nbytes & AES_BLOCK_MASK);
++                              nbytes & AES_BLOCK_MASK);
++              kernel_fpu_end();
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+-      kernel_fpu_end();
+ 
+       return err;
+ }
+@@ -407,14 +407,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
+       err = blkcipher_walk_virt(desc, &walk);
+       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ 
+-      kernel_fpu_begin();
+       while ((nbytes = walk.nbytes)) {
++              kernel_fpu_begin();
+               aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+                             nbytes & AES_BLOCK_MASK);
++              kernel_fpu_end();
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+-      kernel_fpu_end();
+ 
+       return err;
+ }
+@@ -431,14 +431,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
+       err = blkcipher_walk_virt(desc, &walk);
+       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ 
+-      kernel_fpu_begin();
+       while ((nbytes = walk.nbytes)) {
++              kernel_fpu_begin();
+               aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+                             nbytes & AES_BLOCK_MASK, walk.iv);
++              kernel_fpu_end();
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+-      kernel_fpu_end();
+ 
+       return err;
+ }
+@@ -455,14 +455,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
+       err = blkcipher_walk_virt(desc, &walk);
+       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ 
+-      kernel_fpu_begin();
+       while ((nbytes = walk.nbytes)) {
++              kernel_fpu_begin();
+               aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+                             nbytes & AES_BLOCK_MASK, walk.iv);
++              kernel_fpu_end();
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+-      kernel_fpu_end();
+ 
+       return err;
+ }
+@@ -514,18 +514,20 @@ static int ctr_crypt(struct blkcipher_desc *desc,
+       err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ 
+-      kernel_fpu_begin();
+       while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
++              kernel_fpu_begin();
+               aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+                                     nbytes & AES_BLOCK_MASK, walk.iv);
++              kernel_fpu_end();
+               nbytes &= AES_BLOCK_SIZE - 1;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+       if (walk.nbytes) {
++              kernel_fpu_begin();
+               ctr_crypt_final(ctx, &walk);
++              kernel_fpu_end();
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+-      kernel_fpu_end();
+ 
+       return err;
+ }
+diff --git a/arch/x86/crypto/cast5_avx_glue.c 
b/arch/x86/crypto/cast5_avx_glue.c
+index 8648158f3916..d7699130ee36 100644
+--- a/arch/x86/crypto/cast5_avx_glue.c
++++ b/arch/x86/crypto/cast5_avx_glue.c
+@@ -59,7 +59,7 @@ static inline void cast5_fpu_end(bool fpu_enabled)
+ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+                    bool enc)
+ {
+-      bool fpu_enabled = false;
++      bool fpu_enabled;
+       struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       const unsigned int bsize = CAST5_BLOCK_SIZE;
+       unsigned int nbytes;
+@@ -75,7 +75,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct 
blkcipher_walk *walk,
+               u8 *wsrc = walk->src.virt.addr;
+               u8 *wdst = walk->dst.virt.addr;
+ 
+-              fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
++              fpu_enabled = cast5_fpu_begin(false, nbytes);
+ 
+               /* Process multi-block batch */
+               if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
+@@ -103,10 +103,9 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct 
blkcipher_walk *walk,
+               } while (nbytes >= bsize);
+ 
+ done:
++              cast5_fpu_end(fpu_enabled);
+               err = blkcipher_walk_done(desc, walk, nbytes);
+       }
+-
+-      cast5_fpu_end(fpu_enabled);
+       return err;
+ }
+ 
+@@ -227,7 +226,7 @@ done:
+ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                      struct scatterlist *src, unsigned int nbytes)
+ {
+-      bool fpu_enabled = false;
++      bool fpu_enabled;
+       struct blkcipher_walk walk;
+       int err;
+ 
+@@ -236,12 +235,11 @@ static int cbc_decrypt(struct blkcipher_desc *desc, 
struct scatterlist *dst,
+       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ 
+       while ((nbytes = walk.nbytes)) {
+-              fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
++              fpu_enabled = cast5_fpu_begin(false, nbytes);
+               nbytes = __cbc_decrypt(desc, &walk);
++              cast5_fpu_end(fpu_enabled);
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+-
+-      cast5_fpu_end(fpu_enabled);
+       return err;
+ }
+ 
+@@ -311,7 +309,7 @@ done:
+ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                    struct scatterlist *src, unsigned int nbytes)
+ {
+-      bool fpu_enabled = false;
++      bool fpu_enabled;
+       struct blkcipher_walk walk;
+       int err;
+ 
+@@ -320,13 +318,12 @@ static int ctr_crypt(struct blkcipher_desc *desc, struct 
scatterlist *dst,
+       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ 
+       while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
+-              fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
++              fpu_enabled = cast5_fpu_begin(false, nbytes);
+               nbytes = __ctr_crypt(desc, &walk);
++              cast5_fpu_end(fpu_enabled);
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+ 
+-      cast5_fpu_end(fpu_enabled);
+-
+       if (walk.nbytes) {
+               ctr_crypt_final(desc, &walk);
+               err = blkcipher_walk_done(desc, &walk, 0);
+diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
+index 6a85598931b5..3a506ce7ed93 100644
+--- a/arch/x86/crypto/glue_helper.c
++++ b/arch/x86/crypto/glue_helper.c
+@@ -39,7 +39,7 @@ static int __glue_ecb_crypt_128bit(const struct 
common_glue_ctx *gctx,
+       void *ctx = crypto_blkcipher_ctx(desc->tfm);
+       const unsigned int bsize = 128 / 8;
+       unsigned int nbytes, i, func_bytes;
+-      bool fpu_enabled = false;
++      bool fpu_enabled;
+       int err;
+ 
+       err = blkcipher_walk_virt(desc, walk);
+@@ -49,7 +49,7 @@ static int __glue_ecb_crypt_128bit(const struct 
common_glue_ctx *gctx,
+               u8 *wdst = walk->dst.virt.addr;
+ 
+               fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+-                                           desc, fpu_enabled, nbytes);
++                                           desc, false, nbytes);
+ 
+               for (i = 0; i < gctx->num_funcs; i++) {
+                       func_bytes = bsize * gctx->funcs[i].num_blocks;
+@@ -71,10 +71,10 @@ static int __glue_ecb_crypt_128bit(const struct 
common_glue_ctx *gctx,
+               }
+ 
+ done:
++              glue_fpu_end(fpu_enabled);
+               err = blkcipher_walk_done(desc, walk, nbytes);
+       }
+ 
+-      glue_fpu_end(fpu_enabled);
+       return err;
+ }
+ 
+@@ -194,7 +194,7 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx 
*gctx,
+                           struct scatterlist *src, unsigned int nbytes)
+ {
+       const unsigned int bsize = 128 / 8;
+-      bool fpu_enabled = false;
++      bool fpu_enabled;
+       struct blkcipher_walk walk;
+       int err;
+ 
+@@ -203,12 +203,12 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx 
*gctx,
+ 
+       while ((nbytes = walk.nbytes)) {
+               fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+-                                           desc, fpu_enabled, nbytes);
++                                           desc, false, nbytes);
+               nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
++              glue_fpu_end(fpu_enabled);
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+ 
+-      glue_fpu_end(fpu_enabled);
+       return err;
+ }
+ EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
+@@ -277,7 +277,7 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx 
*gctx,
+                         struct scatterlist *src, unsigned int nbytes)
+ {
+       const unsigned int bsize = 128 / 8;
+-      bool fpu_enabled = false;
++      bool fpu_enabled;
+       struct blkcipher_walk walk;
+       int err;
+ 
+@@ -286,13 +286,12 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx 
*gctx,
+ 
+       while ((nbytes = walk.nbytes) >= bsize) {
+               fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+-                                           desc, fpu_enabled, nbytes);
++                                           desc, false, nbytes);
+               nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
++              glue_fpu_end(fpu_enabled);
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+ 
+-      glue_fpu_end(fpu_enabled);
+-
+       if (walk.nbytes) {
+               glue_ctr_crypt_final_128bit(
+                       gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
+@@ -347,7 +346,7 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx 
*gctx,
+                         void *tweak_ctx, void *crypt_ctx)
+ {
+       const unsigned int bsize = 128 / 8;
+-      bool fpu_enabled = false;
++      bool fpu_enabled;
+       struct blkcipher_walk walk;
+       int err;
+ 
+@@ -360,21 +359,21 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx 
*gctx,
+ 
+       /* set minimum length to bsize, for tweak_fn */
+       fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+-                                   desc, fpu_enabled,
++                                   desc, false,
+                                    nbytes < bsize ? bsize : nbytes);
+-
+       /* calculate first value of T */
+       tweak_fn(tweak_ctx, walk.iv, walk.iv);
++      glue_fpu_end(fpu_enabled);
+ 
+       while (nbytes) {
++              fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
++                              desc, false, nbytes);
+               nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
+ 
++              glue_fpu_end(fpu_enabled);
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+               nbytes = walk.nbytes;
+       }
+-
+-      glue_fpu_end(fpu_enabled);
+-
+       return err;
+ }
+ EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 1a4477cedc49..75a301b6a5b6 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -220,7 +220,7 @@ long syscall_trace_enter(struct pt_regs *regs)
+ 
+ #define EXIT_TO_USERMODE_LOOP_FLAGS                           \
+       (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE |   \
+-       _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
++       _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY)
+ 
+ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
+ {
+@@ -236,9 +236,16 @@ static void exit_to_usermode_loop(struct pt_regs *regs, 
u32 cached_flags)
+               /* We have work to do. */
+               local_irq_enable();
+ 
+-              if (cached_flags & _TIF_NEED_RESCHED)
++              if (cached_flags & _TIF_NEED_RESCHED_MASK)
+                       schedule();
+ 
++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
++              if (unlikely(current->forced_info.si_signo)) {
++                      struct task_struct *t = current;
++                      force_sig_info(t->forced_info.si_signo, 
&t->forced_info, t);
++                      t->forced_info.si_signo = 0;
++              }
++#endif
+               if (cached_flags & _TIF_UPROBE)
+                       uprobe_notify_resume(regs);
+ 
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index f3b6d54e0042..2d722ee01fc2 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -278,8 +278,24 @@ END(ret_from_exception)
+ ENTRY(resume_kernel)
+       DISABLE_INTERRUPTS(CLBR_ANY)
+ need_resched:
++      # preempt count == 0 + NEED_RS set?
+       cmpl    $0, PER_CPU_VAR(__preempt_count)
++#ifndef CONFIG_PREEMPT_LAZY
+       jnz     restore_all
++#else
++      jz test_int_off
++
++      # atleast preempt count == 0 ?
++      cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++      jne restore_all
++
++      cmpl $0,TI_preempt_lazy_count(%ebp)     # non-zero preempt_lazy_count ?
++      jnz restore_all
++
++      testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp)
++      jz restore_all
++test_int_off:
++#endif
+       testl   $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception 
path) ?
+       jz      restore_all
+       call    preempt_schedule_irq
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index a55697d19824..316081a2ca85 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -579,7 +579,23 @@ retint_kernel:
+       bt      $9, EFLAGS(%rsp)                /* were interrupts off? */
+       jnc     1f
+ 0:    cmpl    $0, PER_CPU_VAR(__preempt_count)
++#ifndef CONFIG_PREEMPT_LAZY
+       jnz     1f
++#else
++      jz      do_preempt_schedule_irq
++
++      # atleast preempt count == 0 ?
++      cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++      jnz     1f
++
++      GET_THREAD_INFO(%rcx)
++      cmpl    $0, TI_preempt_lazy_count(%rcx)
++      jnz     1f
++
++      bt      $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx)
++      jnc     1f
++do_preempt_schedule_irq:
++#endif
+       call    preempt_schedule_irq
+       jmp     0b
+ 1:
+@@ -867,6 +883,7 @@ bad_gs:
+       jmp     2b
+       .previous
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /* Call softirq on interrupt stack. Interrupts are off. */
+ ENTRY(do_softirq_own_stack)
+       pushq   %rbp
+@@ -879,6 +896,7 @@ ENTRY(do_softirq_own_stack)
+       decl    PER_CPU_VAR(irq_count)
+       ret
+ END(do_softirq_own_stack)
++#endif
+ 
+ #ifdef CONFIG_XEN
+ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
+diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
+index 01bcde84d3e4..6f432adc55cd 100644
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -79,17 +79,46 @@ static __always_inline void __preempt_count_sub(int val)
+  * a decrement which hits zero means we have no preempt_count and should
+  * reschedule.
+  */
+-static __always_inline bool __preempt_count_dec_and_test(void)
++static __always_inline bool ____preempt_count_dec_and_test(void)
+ {
+       GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
+ }
+ 
++static __always_inline bool __preempt_count_dec_and_test(void)
++{
++      if (____preempt_count_dec_and_test())
++              return true;
++#ifdef CONFIG_PREEMPT_LAZY
++      if (current_thread_info()->preempt_lazy_count)
++              return false;
++      return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
++      return false;
++#endif
++}
++
+ /*
+  * Returns true when we need to resched and can (barring IRQ state).
+  */
+ static __always_inline bool should_resched(int preempt_offset)
+ {
++#ifdef CONFIG_PREEMPT_LAZY
++      u32 tmp;
++
++      tmp = raw_cpu_read_4(__preempt_count);
++      if (tmp == preempt_offset)
++              return true;
++
++      /* preempt count == 0 ? */
++      tmp &= ~PREEMPT_NEED_RESCHED;
++      if (tmp)
++              return false;
++      if (current_thread_info()->preempt_lazy_count)
++              return false;
++      return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
+       return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
++#endif
+ }
+ 
+ #ifdef CONFIG_PREEMPT
+diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
+index 2138c9ae19ee..3f5b4ee2e2c1 100644
+--- a/arch/x86/include/asm/signal.h
++++ b/arch/x86/include/asm/signal.h
+@@ -23,6 +23,19 @@ typedef struct {
+       unsigned long sig[_NSIG_WORDS];
+ } sigset_t;
+ 
++/*
++ * Because some traps use the IST stack, we must keep preemption
++ * disabled while calling do_trap(), but do_trap() may call
++ * force_sig_info() which will grab the signal spin_locks for the
++ * task, which in PREEMPT_RT_FULL are mutexes.  By defining
++ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
++ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
++ * trap.
++ */
++#if defined(CONFIG_PREEMPT_RT_FULL)
++#define ARCH_RT_DELAYS_SIGNAL_SEND
++#endif
++
+ #ifndef CONFIG_COMPAT
+ typedef sigset_t compat_sigset_t;
+ #endif
+diff --git a/arch/x86/include/asm/stackprotector.h 
b/arch/x86/include/asm/stackprotector.h
+index 58505f01962f..02fa39652cd6 100644
+--- a/arch/x86/include/asm/stackprotector.h
++++ b/arch/x86/include/asm/stackprotector.h
+@@ -59,7 +59,7 @@
+  */
+ static __always_inline void boot_init_stack_canary(void)
+ {
+-      u64 canary;
++      u64 uninitialized_var(canary);
+       u64 tsc;
+ 
+ #ifdef CONFIG_X86_64
+@@ -70,8 +70,15 @@ static __always_inline void boot_init_stack_canary(void)
+        * of randomness. The TSC only matters for very early init,
+        * there it already has some randomness on most systems. Later
+        * on during the bootup the random pool has true entropy too.
++       *
++       * For preempt-rt we need to weaken the randomness a bit, as
++       * we can't call into the random generator from atomic context
++       * due to locking constraints. We just leave canary
++       * uninitialized and use the TSC based randomness on top of it.
+        */
++#ifndef CONFIG_PREEMPT_RT_FULL
+       get_random_bytes(&canary, sizeof(canary));
++#endif
+       tsc = rdtsc();
+       canary += tsc + (tsc << 32UL);
+ 
+diff --git a/arch/x86/include/asm/thread_info.h 
b/arch/x86/include/asm/thread_info.h
+index c7b551028740..ddb63bd90e3c 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -58,6 +58,8 @@ struct thread_info {
+       __u32                   status;         /* thread synchronous flags */
+       __u32                   cpu;            /* current CPU */
+       mm_segment_t            addr_limit;
++      int                     preempt_lazy_count;     /* 0 => lazy preemptable
++                                                        <0 => BUG */
+       unsigned int            sig_on_uaccess_error:1;
+       unsigned int            uaccess_err:1;  /* uaccess failed */
+ };
+@@ -95,6 +97,7 @@ struct thread_info {
+ #define TIF_SYSCALL_EMU               6       /* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT     7       /* syscall auditing active */
+ #define TIF_SECCOMP           8       /* secure computing */
++#define TIF_NEED_RESCHED_LAZY 9       /* lazy rescheduling necessary */
+ #define TIF_USER_RETURN_NOTIFY        11      /* notify kernel of userspace 
return */
+ #define TIF_UPROBE            12      /* breakpointed or singlestepping */
+ #define TIF_NOTSC             16      /* TSC is not accessible in userland */
+@@ -119,6 +122,7 @@ struct thread_info {
+ #define _TIF_SYSCALL_EMU      (1 << TIF_SYSCALL_EMU)
+ #define _TIF_SYSCALL_AUDIT    (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP          (1 << TIF_SECCOMP)
++#define _TIF_NEED_RESCHED_LAZY        (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_USER_RETURN_NOTIFY       (1 << TIF_USER_RETURN_NOTIFY)
+ #define _TIF_UPROBE           (1 << TIF_UPROBE)
+ #define _TIF_NOTSC            (1 << TIF_NOTSC)
+@@ -152,6 +156,8 @@ struct thread_info {
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+ 
++#define _TIF_NEED_RESCHED_MASK        (_TIF_NEED_RESCHED | 
_TIF_NEED_RESCHED_LAZY)
++
+ #define STACK_WARN            (THREAD_SIZE/8)
+ 
+ /*
+diff --git a/arch/x86/include/asm/uv/uv_bau.h 
b/arch/x86/include/asm/uv/uv_bau.h
+index fc808b83fccb..ebb40118abf5 100644
+--- a/arch/x86/include/asm/uv/uv_bau.h
++++ b/arch/x86/include/asm/uv/uv_bau.h
+@@ -615,9 +615,9 @@ struct bau_control {
+       cycles_t                send_message;
+       cycles_t                period_end;
+       cycles_t                period_time;
+-      spinlock_t              uvhub_lock;
+-      spinlock_t              queue_lock;
+-      spinlock_t              disable_lock;
++      raw_spinlock_t          uvhub_lock;
++      raw_spinlock_t          queue_lock;
++      raw_spinlock_t          disable_lock;
+       /* tunables */
+       int                     max_concurr;
+       int                     max_concurr_const;
+@@ -776,15 +776,15 @@ static inline int atom_asr(short i, struct atomic_short 
*v)
+  * to be lowered below the current 'v'.  atomic_add_unless can only stop
+  * on equal.
+  */
+-static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
++static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int 
u)
+ {
+-      spin_lock(lock);
++      raw_spin_lock(lock);
+       if (atomic_read(v) >= u) {
+-              spin_unlock(lock);
++              raw_spin_unlock(lock);
+               return 0;
+       }
+       atomic_inc(v);
+-      spin_unlock(lock);
++      raw_spin_unlock(lock);
+       return 1;
+ }
+ 
+diff --git a/arch/x86/include/asm/uv/uv_hub.h 
b/arch/x86/include/asm/uv/uv_hub.h
+index ea7074784cc4..01ec643ce66e 100644
+--- a/arch/x86/include/asm/uv/uv_hub.h
++++ b/arch/x86/include/asm/uv/uv_hub.h
+@@ -492,7 +492,7 @@ struct uv_blade_info {
+       unsigned short  nr_online_cpus;
+       unsigned short  pnode;
+       short           memory_nid;
+-      spinlock_t      nmi_lock;       /* obsolete, see uv_hub_nmi */
++      raw_spinlock_t  nmi_lock;       /* obsolete, see uv_hub_nmi */
+       unsigned long   nmi_count;      /* obsolete, see uv_hub_nmi */
+ };
+ extern struct uv_blade_info *uv_blade_info;
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index fdb0fbfb1197..678c711e2a16 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1711,7 +1711,8 @@ static bool io_apic_level_ack_pending(struct 
mp_chip_data *data)
+ static inline bool ioapic_irqd_mask(struct irq_data *data)
+ {
+       /* If we are moving the irq we need to mask it */
+-      if (unlikely(irqd_is_setaffinity_pending(data))) {
++      if (unlikely(irqd_is_setaffinity_pending(data) &&
++                   !irqd_irq_inprogress(data))) {
+               mask_ioapic_irq(data);
+               return true;
+       }
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c 
b/arch/x86/kernel/apic/x2apic_uv_x.c
+index 4a139465f1d4..ad2afff02b36 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -947,7 +947,7 @@ void __init uv_system_init(void)
+                       uv_blade_info[blade].pnode = pnode;
+                       uv_blade_info[blade].nr_possible_cpus = 0;
+                       uv_blade_info[blade].nr_online_cpus = 0;
+-                      spin_lock_init(&uv_blade_info[blade].nmi_lock);
++                      raw_spin_lock_init(&uv_blade_info[blade].nmi_lock);
+                       min_pnode = min(pnode, min_pnode);
+                       max_pnode = max(pnode, max_pnode);
+                       blade++;
+diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
+index 439df975bc7a..b7954ddd6a0a 100644
+--- a/arch/x86/kernel/asm-offsets.c
++++ b/arch/x86/kernel/asm-offsets.c
+@@ -32,6 +32,7 @@ void common(void) {
+       OFFSET(TI_flags, thread_info, flags);
+       OFFSET(TI_status, thread_info, status);
+       OFFSET(TI_addr_limit, thread_info, addr_limit);
++      OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count);
+ 
+       BLANK();
+       OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+@@ -89,4 +90,5 @@ void common(void) {
+ 
+       BLANK();
+       DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
++      DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
+ }
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c 
b/arch/x86/kernel/cpu/mcheck/mce.c
+index 7e8a736d09db..430a4ec07811 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -41,6 +41,8 @@
+ #include <linux/debugfs.h>
+ #include <linux/irq_work.h>
+ #include <linux/export.h>
++#include <linux/jiffies.h>
++#include <linux/swork.h>
+ 
+ #include <asm/processor.h>
+ #include <asm/traps.h>
+@@ -1236,7 +1238,7 @@ void mce_log_therm_throt_event(__u64 status)
+ static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
+ 
+ static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
+-static DEFINE_PER_CPU(struct timer_list, mce_timer);
++static DEFINE_PER_CPU(struct hrtimer, mce_timer);
+ 
+ static unsigned long mce_adjust_timer_default(unsigned long interval)
+ {
+@@ -1245,32 +1247,18 @@ static unsigned long mce_adjust_timer_default(unsigned 
long interval)
+ 
+ static unsigned long (*mce_adjust_timer)(unsigned long interval) = 
mce_adjust_timer_default;
+ 
+-static void __restart_timer(struct timer_list *t, unsigned long interval)
++static enum hrtimer_restart __restart_timer(struct hrtimer *timer, unsigned 
long interval)
+ {
+-      unsigned long when = jiffies + interval;
+-      unsigned long flags;
+-
+-      local_irq_save(flags);
+-
+-      if (timer_pending(t)) {
+-              if (time_before(when, t->expires))
+-                      mod_timer_pinned(t, when);
+-      } else {
+-              t->expires = round_jiffies(when);
+-              add_timer_on(t, smp_processor_id());
+-      }
+-
+-      local_irq_restore(flags);
++      if (!interval)
++              return HRTIMER_NORESTART;
++      hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(interval)));
++      return HRTIMER_RESTART;
+ }
+ 
+-static void mce_timer_fn(unsigned long data)
++static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
+ {
+-      struct timer_list *t = this_cpu_ptr(&mce_timer);
+-      int cpu = smp_processor_id();
+       unsigned long iv;
+ 
+-      WARN_ON(cpu != data);
+-
+       iv = __this_cpu_read(mce_next_interval);
+ 
+       if (mce_available(this_cpu_ptr(&cpu_info))) {
+@@ -1293,7 +1281,7 @@ static void mce_timer_fn(unsigned long data)
+ 
+ done:
+       __this_cpu_write(mce_next_interval, iv);
+-      __restart_timer(t, iv);
++      return __restart_timer(timer, iv);
+ }
+ 
+ /*
+@@ -1301,7 +1289,7 @@ done:
+  */
+ void mce_timer_kick(unsigned long interval)
+ {
+-      struct timer_list *t = this_cpu_ptr(&mce_timer);
++      struct hrtimer *t = this_cpu_ptr(&mce_timer);
+       unsigned long iv = __this_cpu_read(mce_next_interval);
+ 
+       __restart_timer(t, interval);
+@@ -1316,7 +1304,7 @@ static void mce_timer_delete_all(void)
+       int cpu;
+ 
+       for_each_online_cpu(cpu)
+-              del_timer_sync(&per_cpu(mce_timer, cpu));
++              hrtimer_cancel(&per_cpu(mce_timer, cpu));
+ }
+ 
+ static void mce_do_trigger(struct work_struct *work)
+@@ -1326,6 +1314,56 @@ static void mce_do_trigger(struct work_struct *work)
+ 
+ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
+ 
++static void __mce_notify_work(struct swork_event *event)
++{
++      /* Not more than two messages every minute */
++      static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
++
++      /* wake processes polling /dev/mcelog */
++      wake_up_interruptible(&mce_chrdev_wait);
++
++      /*
++       * There is no risk of missing notifications because
++       * work_pending is always cleared before the function is
++       * executed.
++       */
++      if (mce_helper[0] && !work_pending(&mce_trigger_work))
++              schedule_work(&mce_trigger_work);
++
++      if (__ratelimit(&ratelimit))
++              pr_info(HW_ERR "Machine check events logged\n");
++}
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++static bool notify_work_ready __read_mostly;
++static struct swork_event notify_work;
++
++static int mce_notify_work_init(void)
++{
++      int err;
++
++      err = swork_get();
++      if (err)
++              return err;
++
++      INIT_SWORK(&notify_work, __mce_notify_work);
++      notify_work_ready = true;
++      return 0;
++}
++
++static void mce_notify_work(void)
++{
++      if (notify_work_ready)
++              swork_queue(&notify_work);
++}
++#else
++static void mce_notify_work(void)
++{
++      __mce_notify_work(NULL);
++}
++static inline int mce_notify_work_init(void) { return 0; }
++#endif
++
+ /*
+  * Notify the user(s) about new machine check events.
+  * Can be called from interrupt context, but not from machine check/NMI
+@@ -1333,19 +1371,8 @@ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
+  */
+ int mce_notify_irq(void)
+ {
+-      /* Not more than two messages every minute */
+-      static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
+-
+       if (test_and_clear_bit(0, &mce_need_notify)) {
+-              /* wake processes polling /dev/mcelog */
+-              wake_up_interruptible(&mce_chrdev_wait);
+-
+-              if (mce_helper[0])
+-                      schedule_work(&mce_trigger_work);
+-
+-              if (__ratelimit(&ratelimit))
+-                      pr_info(HW_ERR "Machine check events logged\n");
+-
++              mce_notify_work();
+               return 1;
+       }
+       return 0;
+@@ -1639,7 +1666,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 
*c)
+       }
+ }
+ 
+-static void mce_start_timer(unsigned int cpu, struct timer_list *t)
++static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
+ {
+       unsigned long iv = check_interval * HZ;
+ 
+@@ -1648,16 +1675,17 @@ static void mce_start_timer(unsigned int cpu, struct 
timer_list *t)
+ 
+       per_cpu(mce_next_interval, cpu) = iv;
+ 
+-      t->expires = round_jiffies(jiffies + iv);
+-      add_timer_on(t, cpu);
++      hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
++                      0, HRTIMER_MODE_REL_PINNED);
+ }
+ 
+ static void __mcheck_cpu_init_timer(void)
+ {
+-      struct timer_list *t = this_cpu_ptr(&mce_timer);
++      struct hrtimer *t = this_cpu_ptr(&mce_timer);
+       unsigned int cpu = smp_processor_id();
+ 
+-      setup_timer(t, mce_timer_fn, cpu);
++      hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++      t->function = mce_timer_fn;
+       mce_start_timer(cpu, t);
+ }
+ 
+@@ -2376,6 +2404,8 @@ static void mce_disable_cpu(void *h)
+       if (!mce_available(raw_cpu_ptr(&cpu_info)))
+               return;
+ 
++      hrtimer_cancel(this_cpu_ptr(&mce_timer));
++
+       if (!(action & CPU_TASKS_FROZEN))
+               cmci_clear();
+ 
+@@ -2398,6 +2428,7 @@ static void mce_reenable_cpu(void *h)
+               if (b->init)
+                       wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
+       }
++      __mcheck_cpu_init_timer();
+ }
+ 
+ /* Get notified when a cpu comes on/off. Be hotplug friendly. */
+@@ -2405,7 +2436,6 @@ static int
+ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ {
+       unsigned int cpu = (unsigned long)hcpu;
+-      struct timer_list *t = &per_cpu(mce_timer, cpu);
+ 
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_ONLINE:
+@@ -2425,11 +2455,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned 
long action, void *hcpu)
+               break;
+       case CPU_DOWN_PREPARE:
+               smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
+-              del_timer_sync(t);
+               break;
+       case CPU_DOWN_FAILED:
+               smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
+-              mce_start_timer(cpu, t);
+               break;
+       }
+ 
+@@ -2468,6 +2496,10 @@ static __init int mcheck_init_device(void)
+               goto err_out;
+       }
+ 
++      err = mce_notify_work_init();
++      if (err)
++              goto err_out;
++
+       if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
+               err = -ENOMEM;
+               goto err_out;
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c 
b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+index ed446bdcbf31..d2ac364e2118 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+@@ -117,7 +117,7 @@ static struct perf_pmu_events_attr event_attr_##v = {      
                \
+ };
+ 
+ struct rapl_pmu {
+-      spinlock_t       lock;
++      raw_spinlock_t   lock;
+       int              n_active; /* number of active events */
+       struct list_head active_list;
+       struct pmu       *pmu; /* pointer to rapl_pmu_class */
+@@ -220,13 +220,13 @@ static enum hrtimer_restart rapl_hrtimer_handle(struct 
hrtimer *hrtimer)
+       if (!pmu->n_active)
+               return HRTIMER_NORESTART;
+ 
+-      spin_lock_irqsave(&pmu->lock, flags);
++      raw_spin_lock_irqsave(&pmu->lock, flags);
+ 
+       list_for_each_entry(event, &pmu->active_list, active_entry) {
+               rapl_event_update(event);
+       }
+ 
+-      spin_unlock_irqrestore(&pmu->lock, flags);
++      raw_spin_unlock_irqrestore(&pmu->lock, flags);
+ 
+       hrtimer_forward_now(hrtimer, pmu->timer_interval);
+ 
+@@ -263,9 +263,9 @@ static void rapl_pmu_event_start(struct perf_event *event, 
int mode)
+       struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
+       unsigned long flags;
+ 
+-      spin_lock_irqsave(&pmu->lock, flags);
++      raw_spin_lock_irqsave(&pmu->lock, flags);
+       __rapl_pmu_event_start(pmu, event);
+-      spin_unlock_irqrestore(&pmu->lock, flags);
++      raw_spin_unlock_irqrestore(&pmu->lock, flags);
+ }
+ 
+ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
+@@ -274,7 +274,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, 
int mode)
+       struct hw_perf_event *hwc = &event->hw;
+       unsigned long flags;
+ 
+-      spin_lock_irqsave(&pmu->lock, flags);
++      raw_spin_lock_irqsave(&pmu->lock, flags);
+ 
+       /* mark event as deactivated and stopped */
+       if (!(hwc->state & PERF_HES_STOPPED)) {
+@@ -299,7 +299,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, 
int mode)
+               hwc->state |= PERF_HES_UPTODATE;
+       }
+ 
+-      spin_unlock_irqrestore(&pmu->lock, flags);
++      raw_spin_unlock_irqrestore(&pmu->lock, flags);
+ }
+ 
+ static int rapl_pmu_event_add(struct perf_event *event, int mode)
+@@ -308,14 +308,14 @@ static int rapl_pmu_event_add(struct perf_event *event, 
int mode)
+       struct hw_perf_event *hwc = &event->hw;
+       unsigned long flags;
+ 
+-      spin_lock_irqsave(&pmu->lock, flags);
++      raw_spin_lock_irqsave(&pmu->lock, flags);
+ 
+       hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ 
+       if (mode & PERF_EF_START)
+               __rapl_pmu_event_start(pmu, event);
+ 
+-      spin_unlock_irqrestore(&pmu->lock, flags);
++      raw_spin_unlock_irqrestore(&pmu->lock, flags);
+ 
+       return 0;
+ }
+@@ -603,7 +603,7 @@ static int rapl_cpu_prepare(int cpu)
+       pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+       if (!pmu)
+               return -1;
+-      spin_lock_init(&pmu->lock);
++      raw_spin_lock_init(&pmu->lock);
+ 
+       INIT_LIST_HEAD(&pmu->active_list);
+ 
+diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
+index 464ffd69b92e..00db1aad1548 100644
+--- a/arch/x86/kernel/dumpstack_32.c
++++ b/arch/x86/kernel/dumpstack_32.c
+@@ -42,7 +42,7 @@ void dump_trace(struct task_struct *task, struct pt_regs 
*regs,
+               unsigned long *stack, unsigned long bp,
+               const struct stacktrace_ops *ops, void *data)
+ {
+-      const unsigned cpu = get_cpu();
++      const unsigned cpu = get_cpu_light();
+       int graph = 0;
+       u32 *prev_esp;
+ 
+@@ -86,7 +86,7 @@ void dump_trace(struct task_struct *task, struct pt_regs 
*regs,
+                       break;
+               touch_nmi_watchdog();
+       }
+-      put_cpu();
++      put_cpu_light();
+ }
+ EXPORT_SYMBOL(dump_trace);
+ 
+diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
+index 5f1c6266eb30..c331e3fef465 100644
+--- a/arch/x86/kernel/dumpstack_64.c
++++ b/arch/x86/kernel/dumpstack_64.c
+@@ -152,7 +152,7 @@ void dump_trace(struct task_struct *task, struct pt_regs 
*regs,
+               unsigned long *stack, unsigned long bp,
+               const struct stacktrace_ops *ops, void *data)
+ {
+-      const unsigned cpu = get_cpu();
++      const unsigned cpu = get_cpu_light();
+       struct thread_info *tinfo;
+       unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
+       unsigned long dummy;
+@@ -241,7 +241,7 @@ void dump_trace(struct task_struct *task, struct pt_regs 
*regs,
+        * This handles the process stack:
+        */
+       bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
+-      put_cpu();
++      put_cpu_light();
+ }
+ EXPORT_SYMBOL(dump_trace);
+ 
+@@ -255,7 +255,7 @@ show_stack_log_lvl(struct task_struct *task, struct 
pt_regs *regs,
+       int cpu;
+       int i;
+ 
+-      preempt_disable();
++      migrate_disable();
+       cpu = smp_processor_id();
+ 
+       irq_stack_end   = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
+@@ -291,7 +291,7 @@ show_stack_log_lvl(struct task_struct *task, struct 
pt_regs *regs,
+                       pr_cont(" %016lx", *stack++);
+               touch_nmi_watchdog();
+       }
+-      preempt_enable();
++      migrate_enable();
+ 
+       pr_cont("\n");
+       show_trace_log_lvl(task, regs, sp, bp, log_lvl);
+diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
+index 38da8f29a9c8..ce71f7098f15 100644
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -128,6 +128,7 @@ void irq_ctx_init(int cpu)
+              cpu, per_cpu(hardirq_stack, cpu),  per_cpu(softirq_stack, cpu));
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+       struct thread_info *curstk;
+@@ -146,6 +147,7 @@ void do_softirq_own_stack(void)
+ 
+       call_on_stack(__do_softirq, isp);
+ }
++#endif
+ 
+ bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
+ {
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 47190bd399e7..807950860fb7 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -36,6 +36,7 @@
+ #include <linux/kprobes.h>
+ #include <linux/debugfs.h>
+ #include <linux/nmi.h>
++#include <linux/swait.h>
+ #include <asm/timer.h>
+ #include <asm/cpu.h>
+ #include <asm/traps.h>
+@@ -91,14 +92,14 @@ static void kvm_io_delay(void)
+ 
+ struct kvm_task_sleep_node {
+       struct hlist_node link;
+-      wait_queue_head_t wq;
++      struct swait_queue_head wq;
+       u32 token;
+       int cpu;
+       bool halted;
+ };
+ 
+ static struct kvm_task_sleep_head {
+-      spinlock_t lock;
++      raw_spinlock_t lock;
+       struct hlist_head list;
+ } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
+ 
+@@ -122,17 +123,17 @@ void kvm_async_pf_task_wait(u32 token)
+       u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
+       struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
+       struct kvm_task_sleep_node n, *e;
+-      DEFINE_WAIT(wait);
++      DECLARE_SWAITQUEUE(wait);
+ 
+       rcu_irq_enter();
+ 
+-      spin_lock(&b->lock);
++      raw_spin_lock(&b->lock);
+       e = _find_apf_task(b, token);
+       if (e) {
+               /* dummy entry exist -> wake up was delivered ahead of PF */
+               hlist_del(&e->link);
+               kfree(e);
+-              spin_unlock(&b->lock);
++              raw_spin_unlock(&b->lock);
+ 
+               rcu_irq_exit();
+               return;
+@@ -141,13 +142,13 @@ void kvm_async_pf_task_wait(u32 token)
+       n.token = token;
+       n.cpu = smp_processor_id();
+       n.halted = is_idle_task(current) || preempt_count() > 1;
+-      init_waitqueue_head(&n.wq);
++      init_swait_queue_head(&n.wq);
+       hlist_add_head(&n.link, &b->list);
+-      spin_unlock(&b->lock);
++      raw_spin_unlock(&b->lock);
+ 
+       for (;;) {
+               if (!n.halted)
+-                      prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
++                      prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
+               if (hlist_unhashed(&n.link))
+                       break;
+ 
+@@ -166,7 +167,7 @@ void kvm_async_pf_task_wait(u32 token)
+               }
+       }
+       if (!n.halted)
+-              finish_wait(&n.wq, &wait);
++              finish_swait(&n.wq, &wait);
+ 
+       rcu_irq_exit();
+       return;
+@@ -178,8 +179,8 @@ static void apf_task_wake_one(struct kvm_task_sleep_node 
*n)
+       hlist_del_init(&n->link);
+       if (n->halted)
+               smp_send_reschedule(n->cpu);
+-      else if (waitqueue_active(&n->wq))
+-              wake_up(&n->wq);
++      else if (swait_active(&n->wq))
++              swake_up(&n->wq);
+ }
+ 
+ static void apf_task_wake_all(void)
+@@ -189,14 +190,14 @@ static void apf_task_wake_all(void)
+       for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
+               struct hlist_node *p, *next;
+               struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
+-              spin_lock(&b->lock);
++              raw_spin_lock(&b->lock);
+               hlist_for_each_safe(p, next, &b->list) {
+                       struct kvm_task_sleep_node *n =
+                               hlist_entry(p, typeof(*n), link);
+                       if (n->cpu == smp_processor_id())
+                               apf_task_wake_one(n);
+               }
+-              spin_unlock(&b->lock);
++              raw_spin_unlock(&b->lock);
+       }
+ }
+ 
+@@ -212,7 +213,7 @@ void kvm_async_pf_task_wake(u32 token)
+       }
+ 
+ again:
+-      spin_lock(&b->lock);
++      raw_spin_lock(&b->lock);
+       n = _find_apf_task(b, token);
+       if (!n) {
+               /*
+@@ -225,17 +226,17 @@ again:
+                        * Allocation failed! Busy wait while other cpu
+                        * handles async PF.
+                        */
+-                      spin_unlock(&b->lock);
++                      raw_spin_unlock(&b->lock);
+                       cpu_relax();
+                       goto again;
+               }
+               n->token = token;
+               n->cpu = smp_processor_id();
+-              init_waitqueue_head(&n->wq);
++              init_swait_queue_head(&n->wq);
+               hlist_add_head(&n->link, &b->list);
+       } else
+               apf_task_wake_one(n);
+-      spin_unlock(&b->lock);
++      raw_spin_unlock(&b->lock);
+       return;
+ }
+ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
+@@ -486,7 +487,7 @@ void __init kvm_guest_init(void)
+       paravirt_ops_setup();
+       register_reboot_notifier(&kvm_pv_reboot_nb);
+       for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
+-              spin_lock_init(&async_pf_sleepers[i].lock);
++              raw_spin_lock_init(&async_pf_sleepers[i].lock);
+       if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
+               x86_init.irqs.trap_init = kvm_apf_trap_init;
+ 
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index 697f90db0e37..424aec4a4c71 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -231,7 +231,7 @@ pci_serr_error(unsigned char reason, struct pt_regs *regs)
+ #endif
+ 
+       if (panic_on_unrecovered_nmi)
+-              panic("NMI: Not continuing");
++              nmi_panic(regs, "NMI: Not continuing");
+ 
+       pr_emerg("Dazed and confused, but trying to continue\n");
+ 
+@@ -255,8 +255,16 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
+                reason, smp_processor_id());
+       show_regs(regs);
+ 
+-      if (panic_on_io_nmi)
+-              panic("NMI IOCK error: Not continuing");
++      if (panic_on_io_nmi) {
++              nmi_panic(regs, "NMI IOCK error: Not continuing");
++
++              /*
++               * If we end up here, it means we have received an NMI while
++               * processing panic(). Simply return without delaying and
++               * re-enabling NMIs.
++               */
++              return;
++      }
+ 
+       /* Re-enable the IOCK line, wait for a few seconds */
+       reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
+@@ -297,7 +305,7 @@ unknown_nmi_error(unsigned char reason, struct pt_regs 
*regs)
+ 
+       pr_emerg("Do you have a strange power saving mode enabled?\n");
+       if (unknown_nmi_panic || panic_on_unrecovered_nmi)
+-              panic("NMI: Not continuing");
++              nmi_panic(regs, "NMI: Not continuing");
+ 
+       pr_emerg("Dazed and confused, but trying to continue\n");
+ }
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 9f950917528b..4dd4beae917a 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -35,6 +35,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/io.h>
+ #include <linux/kdebug.h>
++#include <linux/highmem.h>
+ 
+ #include <asm/pgtable.h>
+ #include <asm/ldt.h>
+@@ -210,6 +211,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, 
unsigned long new_sp)
+ }
+ EXPORT_SYMBOL_GPL(start_thread);
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void switch_kmaps(struct task_struct *prev_p, struct task_struct 
*next_p)
++{
++      int i;
++
++      /*
++       * Clear @prev's kmap_atomic mappings
++       */
++      for (i = 0; i < prev_p->kmap_idx; i++) {
++              int idx = i + KM_TYPE_NR * smp_processor_id();
++              pte_t *ptep = kmap_pte - idx;
++
++              kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
++      }
++      /*
++       * Restore @next_p's kmap_atomic mappings
++       */
++      for (i = 0; i < next_p->kmap_idx; i++) {
++              int idx = i + KM_TYPE_NR * smp_processor_id();
++
++              if (!pte_none(next_p->kmap_pte[i]))
++                      set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
++      }
++}
++#else
++static inline void
++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
++#endif
++
+ 
+ /*
+  *    switch_to(x,y) should switch tasks from x to y.
+@@ -286,6 +316,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct 
*next_p)
+                    task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+               __switch_to_xtra(prev_p, next_p, tss);
+ 
++      switch_kmaps(prev_p, next_p);
++
+       /*
+        * Leave lazy mode, flushing any hypercalls made here.
+        * This must be done before restoring TLS segments so
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index f660d63f40fe..8384207adde2 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -726,6 +726,7 @@ static int crashing_cpu;
+ static nmi_shootdown_cb shootdown_callback;
+ 
+ static atomic_t waiting_for_crash_ipi;
++static int crash_ipi_issued;
+ 
+ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
+ {
+@@ -788,6 +789,9 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
+ 
+       smp_send_nmi_allbutself();
+ 
++      /* Kick CPUs looping in NMI context. */
++      WRITE_ONCE(crash_ipi_issued, 1);
++
+       msecs = 1000; /* Wait at most a second for the other cpus to stop */
+       while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
+               mdelay(1);
+@@ -796,6 +800,22 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
+ 
+       /* Leave the nmi callback set */
+ }
++
++/* Override the weak function in kernel/panic.c */
++void nmi_panic_self_stop(struct pt_regs *regs)
++{
++      while (1) {
++              /*
++               * Wait for the crash dumping IPI to be issued, and then
++               * call its callback directly.
++               */
++              if (READ_ONCE(crash_ipi_issued))
++                      crash_nmi_callback(0, regs); /* Don't return */
++
++              cpu_relax();
++      }
++}
++
+ #else /* !CONFIG_SMP */
+ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
+ {
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 4d30b865be30..20d9e9fb3b74 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1195,7 +1195,7 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
+ static void apic_timer_expired(struct kvm_lapic *apic)
+ {
+       struct kvm_vcpu *vcpu = apic->vcpu;
+-      wait_queue_head_t *q = &vcpu->wq;
++      struct swait_queue_head *q = &vcpu->wq;
+       struct kvm_timer *ktimer = &apic->lapic_timer;
+ 
+       if (atomic_read(&apic->lapic_timer.pending))
+@@ -1204,8 +1204,8 @@ static void apic_timer_expired(struct kvm_lapic *apic)
+       atomic_inc(&apic->lapic_timer.pending);
+       kvm_set_pending_timer(vcpu);
+ 
+-      if (waitqueue_active(q))
+-              wake_up_interruptible(q);
++      if (swait_active(q))
++              swake_up(q);
+ 
+       if (apic_lvtt_tscdeadline(apic))
+               ktimer->expired_tscdeadline = ktimer->tscdeadline;
+@@ -1801,6 +1801,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
+       hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
+                    HRTIMER_MODE_ABS);
+       apic->lapic_timer.timer.function = apic_timer_fn;
++      apic->lapic_timer.timer.irqsafe = 1;
+ 
+       /*
+        * APIC is created enabled. This will prevent kvm_lapic_set_base from
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index d7cb9577fa31..77c1bdd802df 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5792,6 +5792,13 @@ int kvm_arch_init(void *opaque)
+               goto out;
+       }
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++      if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
++              printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
++              return -EOPNOTSUPP;
++      }
++#endif
++
+       r = kvm_mmu_module_init();
+       if (r)
+               goto out_free_percpu;
+diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
+index a6d739258137..bd24ba1c4a86 100644
+--- a/arch/x86/mm/highmem_32.c
++++ b/arch/x86/mm/highmem_32.c
+@@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap);
+  */
+ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ {
++      pte_t pte = mk_pte(page, prot);
+       unsigned long vaddr;
+       int idx, type;
+ 
+-      preempt_disable();
++      preempt_disable_nort();
+       pagefault_disable();
+ 
+       if (!PageHighMem(page))
+@@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+       idx = type + KM_TYPE_NR*smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+       BUG_ON(!pte_none(*(kmap_pte-idx)));
+-      set_pte(kmap_pte-idx, mk_pte(page, prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++      current->kmap_pte[type] = pte;
++#endif
++      set_pte(kmap_pte-idx, pte);
+       arch_flush_lazy_mmu_mode();
+ 
+       return (void *)vaddr;
+@@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr)
+                * is a bad idea also, in case the page changes cacheability
+                * attributes or becomes a protected page in a hypervisor.
+                */
++#ifdef CONFIG_PREEMPT_RT_FULL
++              current->kmap_pte[type] = __pte(0);
++#endif
+               kpte_clear_flush(kmap_pte-idx, vaddr);
+               kmap_atomic_idx_pop();
+               arch_flush_lazy_mmu_mode();
+@@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr)
+ #endif
+ 
+       pagefault_enable();
+-      preempt_enable();
++      preempt_enable_nort();
+ }
+ EXPORT_SYMBOL(__kunmap_atomic);
+ 
+diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
+index 9c0ff045fdd4..dd25dd1671b6 100644
+--- a/arch/x86/mm/iomap_32.c
++++ b/arch/x86/mm/iomap_32.c
+@@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free);
+ 
+ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+ {
++      pte_t pte = pfn_pte(pfn, prot);
+       unsigned long vaddr;
+       int idx, type;
+ 
+@@ -65,7 +66,12 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+       type = kmap_atomic_idx_push();
+       idx = type + KM_TYPE_NR * smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+-      set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
++      WARN_ON(!pte_none(*(kmap_pte - idx)));
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++      current->kmap_pte[type] = pte;
++#endif
++      set_pte(kmap_pte - idx, pte);
+       arch_flush_lazy_mmu_mode();
+ 
+       return (void *)vaddr;
+@@ -113,6 +119,9 @@ iounmap_atomic(void __iomem *kvaddr)
+                * is a bad idea also, in case the page changes cacheability
+                * attributes or becomes a protected page in a hypervisor.
+                */
++#ifdef CONFIG_PREEMPT_RT_FULL
++              current->kmap_pte[type] = __pte(0);
++#endif
+               kpte_clear_flush(kmap_pte-idx, vaddr);
+               kmap_atomic_idx_pop();
+       }
+diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
+index 3b6ec42718e4..7871083de089 100644
+--- a/arch/x86/platform/uv/tlb_uv.c
++++ b/arch/x86/platform/uv/tlb_uv.c
+@@ -714,9 +714,9 @@ static void destination_plugged(struct bau_desc *bau_desc,
+ 
+               quiesce_local_uvhub(hmaster);
+ 
+-              spin_lock(&hmaster->queue_lock);
++              raw_spin_lock(&hmaster->queue_lock);
+               reset_with_ipi(&bau_desc->distribution, bcp);
+-              spin_unlock(&hmaster->queue_lock);
++              raw_spin_unlock(&hmaster->queue_lock);
+ 
+               end_uvhub_quiesce(hmaster);
+ 
+@@ -736,9 +736,9 @@ static void destination_timeout(struct bau_desc *bau_desc,
+ 
+               quiesce_local_uvhub(hmaster);
+ 
+-              spin_lock(&hmaster->queue_lock);
++              raw_spin_lock(&hmaster->queue_lock);
+               reset_with_ipi(&bau_desc->distribution, bcp);
+-              spin_unlock(&hmaster->queue_lock);
++              raw_spin_unlock(&hmaster->queue_lock);
+ 
+               end_uvhub_quiesce(hmaster);
+ 
+@@ -759,7 +759,7 @@ static void disable_for_period(struct bau_control *bcp, 
struct ptc_stats *stat)
+       cycles_t tm1;
+ 
+       hmaster = bcp->uvhub_master;
+-      spin_lock(&hmaster->disable_lock);
++      raw_spin_lock(&hmaster->disable_lock);
+       if (!bcp->baudisabled) {
+               stat->s_bau_disabled++;
+               tm1 = get_cycles();
+@@ -772,7 +772,7 @@ static void disable_for_period(struct bau_control *bcp, 
struct ptc_stats *stat)
+                       }
+               }
+       }
+-      spin_unlock(&hmaster->disable_lock);
++      raw_spin_unlock(&hmaster->disable_lock);
+ }
+ 
+ static void count_max_concurr(int stat, struct bau_control *bcp,
+@@ -835,7 +835,7 @@ static void record_send_stats(cycles_t time1, cycles_t 
time2,
+  */
+ static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
+ {
+-      spinlock_t *lock = &hmaster->uvhub_lock;
++      raw_spinlock_t *lock = &hmaster->uvhub_lock;
+       atomic_t *v;
+ 
+       v = &hmaster->active_descriptor_count;
+@@ -968,7 +968,7 @@ static int check_enable(struct bau_control *bcp, struct 
ptc_stats *stat)
+       struct bau_control *hmaster;
+ 
+       hmaster = bcp->uvhub_master;
+-      spin_lock(&hmaster->disable_lock);
++      raw_spin_lock(&hmaster->disable_lock);
+       if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
+               stat->s_bau_reenabled++;
+               for_each_present_cpu(tcpu) {
+@@ -980,10 +980,10 @@ static int check_enable(struct bau_control *bcp, struct 
ptc_stats *stat)
+                               tbcp->period_giveups = 0;
+                       }
+               }
+-              spin_unlock(&hmaster->disable_lock);
++              raw_spin_unlock(&hmaster->disable_lock);
+               return 0;
+       }
+-      spin_unlock(&hmaster->disable_lock);
++      raw_spin_unlock(&hmaster->disable_lock);
+       return -1;
+ }
+ 
+@@ -1901,9 +1901,9 @@ static void __init init_per_cpu_tunables(void)
+               bcp->cong_reps                  = congested_reps;
+               bcp->disabled_period =          sec_2_cycles(disabled_period);
+               bcp->giveup_limit =             giveup_limit;
+-              spin_lock_init(&bcp->queue_lock);
+-              spin_lock_init(&bcp->uvhub_lock);
+-              spin_lock_init(&bcp->disable_lock);
++              raw_spin_lock_init(&bcp->queue_lock);
++              raw_spin_lock_init(&bcp->uvhub_lock);
++              raw_spin_lock_init(&bcp->disable_lock);
+       }
+ }
+ 
+diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
+index 2b158a9fa1d7..5e0b122620cb 100644
+--- a/arch/x86/platform/uv/uv_time.c
++++ b/arch/x86/platform/uv/uv_time.c
+@@ -57,7 +57,7 @@ static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
+ 
+ /* There is one of these allocated per node */
+ struct uv_rtc_timer_head {
+-      spinlock_t      lock;
++      raw_spinlock_t  lock;
+       /* next cpu waiting for timer, local node relative: */
+       int             next_cpu;
+       /* number of cpus on this node: */
+@@ -177,7 +177,7 @@ static __init int uv_rtc_allocate_timers(void)
+                               uv_rtc_deallocate_timers();
+                               return -ENOMEM;
+                       }
+-                      spin_lock_init(&head->lock);
++                      raw_spin_lock_init(&head->lock);
+                       head->ncpus = uv_blade_nr_possible_cpus(bid);
+                       head->next_cpu = -1;
+                       blade_info[bid] = head;
+@@ -231,7 +231,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
+       unsigned long flags;
+       int next_cpu;
+ 
+-      spin_lock_irqsave(&head->lock, flags);
++      raw_spin_lock_irqsave(&head->lock, flags);
+ 
+       next_cpu = head->next_cpu;
+       *t = expires;
+@@ -243,12 +243,12 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
+               if (uv_setup_intr(cpu, expires)) {
+                       *t = ULLONG_MAX;
+                       uv_rtc_find_next_timer(head, pnode);
+-                      spin_unlock_irqrestore(&head->lock, flags);
++                      raw_spin_unlock_irqrestore(&head->lock, flags);
+                       return -ETIME;
+               }
+       }
+ 
+-      spin_unlock_irqrestore(&head->lock, flags);
++      raw_spin_unlock_irqrestore(&head->lock, flags);
+       return 0;
+ }
+ 
+@@ -267,7 +267,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
+       unsigned long flags;
+       int rc = 0;
+ 
+-      spin_lock_irqsave(&head->lock, flags);
++      raw_spin_lock_irqsave(&head->lock, flags);
+ 
+       if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
+               rc = 1;
+@@ -279,7 +279,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
+                       uv_rtc_find_next_timer(head, pnode);
+       }
+ 
+-      spin_unlock_irqrestore(&head->lock, flags);
++      raw_spin_unlock_irqrestore(&head->lock, flags);
+ 
+       return rc;
+ }
+@@ -299,13 +299,18 @@ static int uv_rtc_unset_timer(int cpu, int force)
+ static cycle_t uv_read_rtc(struct clocksource *cs)
+ {
+       unsigned long offset;
++      cycle_t cycles;
+ 
++      preempt_disable();
+       if (uv_get_min_hub_revision_id() == 1)
+               offset = 0;
+       else
+               offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
+ 
+-      return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
++      cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
++      preempt_enable();
++
++      return cycles;
+ }
+ 
+ /*
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 4fab5d610805..52d2fe2fec8f 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q, struct request 
*rq)
+ 
+       INIT_LIST_HEAD(&rq->queuelist);
<Skipped 27493 lines>
================================================================

---- gitweb:

http://git.pld-linux.org/gitweb.cgi/packages/kernel.git/commitdiff/b4de310e3b27a2231954d63e30c8ee4e9b24d8ba

_______________________________________________
pld-cvs-commit mailing list
[email protected]
http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit

Reply via email to