commit:     8911ed4b99ac4a15838eeaaf9bc095b327cb3d23
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Aug 25 11:00:07 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Aug 25 11:00:07 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8911ed4b

Linux patch 4.12.9

 0000_README             |    4 +
 1008_linux-4.12.9.patch | 1644 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1648 insertions(+)

diff --git a/0000_README b/0000_README
index 47efe0d..90242d0 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  1007_linux-4.12.8.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.12.8
 
+Patch:  1008_linux-4.12.9.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.12.9
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1008_linux-4.12.9.patch b/1008_linux-4.12.9.patch
new file mode 100644
index 0000000..21e964c
--- /dev/null
+++ b/1008_linux-4.12.9.patch
@@ -0,0 +1,1644 @@
+diff --git a/Makefile b/Makefile
+index 6da481d08441..a6c2a5e7a48d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi 
b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
+index 559da17297ef..651299c242ec 100644
+--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
+@@ -507,7 +507,7 @@
+       pinctrl_pcie: pciegrp {
+               fsl,pins = <
+                       /* PCIe reset */
+-                      MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x030b0
++                      MX6QDL_PAD_EIM_DA0__GPIO3_IO00  0x030b0
+                       MX6QDL_PAD_EIM_DA4__GPIO3_IO04  0x030b0
+               >;
+       };
+@@ -668,7 +668,7 @@
+ &pcie {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_pcie>;
+-      reset-gpio = <&gpio6 31 GPIO_ACTIVE_LOW>;
++      reset-gpio = <&gpio3 0 GPIO_ACTIVE_LOW>;
+       status = "okay";
+ };
+ 
+diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
+index 4e6e88a6b2f4..2244a94ed9c9 100644
+--- a/arch/arm/include/asm/bug.h
++++ b/arch/arm/include/asm/bug.h
+@@ -37,7 +37,7 @@ do {                                                         
\
+               ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
+               "2:\t.asciz " #__file "\n"                      \
+               ".popsection\n"                                 \
+-              ".pushsection __bug_table,\"a\"\n"              \
++              ".pushsection __bug_table,\"aw\"\n"             \
+               ".align 2\n"                                    \
+               "3:\t.word 1b, 2b\n"                            \
+               "\t.hword " #__line ", 0\n"                     \
+diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
+index 366448eb0fb7..a02a57186f56 100644
+--- a/arch/arm64/include/asm/bug.h
++++ b/arch/arm64/include/asm/bug.h
+@@ -36,7 +36,7 @@
+ #ifdef CONFIG_GENERIC_BUG
+ 
+ #define __BUG_ENTRY(flags)                            \
+-              ".pushsection __bug_table,\"a\"\n\t"    \
++              ".pushsection __bug_table,\"aw\"\n\t"   \
+               ".align 2\n\t"                          \
+       "0:     .long 1f - 0b\n\t"                      \
+ _BUGVERBOSE_LOCATION(__FILE__, __LINE__)              \
+diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
+index acae781f7359..3288c2b36731 100644
+--- a/arch/arm64/include/asm/elf.h
++++ b/arch/arm64/include/asm/elf.h
+@@ -114,10 +114,10 @@
+ 
+ /*
+  * This is the base location for PIE (ET_DYN with INTERP) loads. On
+- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
++ * 64-bit, this is above 4GB to leave the entire 32-bit address
+  * space open for things that want to use the area for 32-bit pointers.
+  */
+-#define ELF_ET_DYN_BASE               0x100000000UL
++#define ELF_ET_DYN_BASE               (2 * TASK_SIZE_64 / 3)
+ 
+ #ifndef __ASSEMBLY__
+ 
+diff --git a/arch/blackfin/include/asm/bug.h b/arch/blackfin/include/asm/bug.h
+index 8d9b1eba89c4..76b2e82ee730 100644
+--- a/arch/blackfin/include/asm/bug.h
++++ b/arch/blackfin/include/asm/bug.h
+@@ -21,7 +21,7 @@
+ #define _BUG_OR_WARN(flags)                                           \
+       asm volatile(                                                   \
+               "1:     .hword  %0\n"                                   \
+-              "       .section __bug_table,\"a\",@progbits\n"         \
++              "       .section __bug_table,\"aw\",@progbits\n"        \
+               "2:     .long   1b\n"                                   \
+               "       .long   %1\n"                                   \
+               "       .short  %2\n"                                   \
+@@ -38,7 +38,7 @@
+ #define _BUG_OR_WARN(flags)                                           \
+       asm volatile(                                                   \
+               "1:     .hword  %0\n"                                   \
+-              "       .section __bug_table,\"a\",@progbits\n"         \
++              "       .section __bug_table,\"aw\",@progbits\n"        \
+               "2:     .long   1b\n"                                   \
+               "       .short  %1\n"                                   \
+               "       .org    2b + %2\n"                              \
+diff --git a/arch/mn10300/include/asm/bug.h b/arch/mn10300/include/asm/bug.h
+index aa6a38886391..811414fb002d 100644
+--- a/arch/mn10300/include/asm/bug.h
++++ b/arch/mn10300/include/asm/bug.h
+@@ -21,7 +21,7 @@ do {                                                         
\
+       asm volatile(                                           \
+               "       syscall 15                      \n"     \
+               "0:                                     \n"     \
+-              "       .section __bug_table,\"a\"      \n"     \
++              "       .section __bug_table,\"aw\"     \n"     \
+               "       .long 0b,%0,%1                  \n"     \
+               "       .previous                       \n"     \
+               :                                               \
+diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
+index d2742273a685..07ea467f22fc 100644
+--- a/arch/parisc/include/asm/bug.h
++++ b/arch/parisc/include/asm/bug.h
+@@ -27,7 +27,7 @@
+       do {                                                            \
+               asm volatile("\n"                                       \
+                            "1:\t" PARISC_BUG_BREAK_ASM "\n"           \
+-                           "\t.pushsection __bug_table,\"a\"\n"       \
++                           "\t.pushsection __bug_table,\"aw\"\n"      \
+                            "2:\t" ASM_WORD_INSN "1b, %c0\n"           \
+                            "\t.short %c1, %c2\n"                      \
+                            "\t.org 2b+%c3\n"                          \
+@@ -50,7 +50,7 @@
+       do {                                                            \
+               asm volatile("\n"                                       \
+                            "1:\t" PARISC_BUG_BREAK_ASM "\n"           \
+-                           "\t.pushsection __bug_table,\"a\"\n"       \
++                           "\t.pushsection __bug_table,\"aw\"\n"      \
+                            "2:\t" ASM_WORD_INSN "1b, %c0\n"           \
+                            "\t.short %c1, %c2\n"                      \
+                            "\t.org 2b+%c3\n"                          \
+@@ -64,7 +64,7 @@
+       do {                                                            \
+               asm volatile("\n"                                       \
+                            "1:\t" PARISC_BUG_BREAK_ASM "\n"           \
+-                           "\t.pushsection __bug_table,\"a\"\n"       \
++                           "\t.pushsection __bug_table,\"aw\"\n"      \
+                            "2:\t" ASM_WORD_INSN "1b\n"                \
+                            "\t.short %c0\n"                           \
+                            "\t.org 2b+%c1\n"                          \
+diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
+index 0151af6c2a50..87fcc1948817 100644
+--- a/arch/powerpc/include/asm/bug.h
++++ b/arch/powerpc/include/asm/bug.h
+@@ -18,7 +18,7 @@
+ #include <asm/asm-offsets.h>
+ #ifdef CONFIG_DEBUG_BUGVERBOSE
+ .macro EMIT_BUG_ENTRY addr,file,line,flags
+-       .section __bug_table,"a"
++       .section __bug_table,"aw"
+ 5001:  PPC_LONG \addr, 5002f
+        .short \line, \flags
+        .org 5001b+BUG_ENTRY_SIZE
+@@ -29,7 +29,7 @@
+ .endm
+ #else
+ .macro EMIT_BUG_ENTRY addr,file,line,flags
+-       .section __bug_table,"a"
++       .section __bug_table,"aw"
+ 5001:  PPC_LONG \addr
+        .short \flags
+        .org 5001b+BUG_ENTRY_SIZE
+@@ -42,14 +42,14 @@
+    sizeof(struct bug_entry), respectively */
+ #ifdef CONFIG_DEBUG_BUGVERBOSE
+ #define _EMIT_BUG_ENTRY                               \
+-      ".section __bug_table,\"a\"\n"          \
++      ".section __bug_table,\"aw\"\n"         \
+       "2:\t" PPC_LONG "1b, %0\n"              \
+       "\t.short %1, %2\n"                     \
+       ".org 2b+%3\n"                          \
+       ".previous\n"
+ #else
+ #define _EMIT_BUG_ENTRY                               \
+-      ".section __bug_table,\"a\"\n"          \
++      ".section __bug_table,\"aw\"\n"         \
+       "2:\t" PPC_LONG "1b\n"                  \
+       "\t.short %2\n"                         \
+       ".org 2b+%3\n"                          \
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 2ad725ef4368..318738f3aa05 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -362,7 +362,8 @@ void enable_kernel_vsx(void)
+ 
+       cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
+ 
+-      if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
++      if (current->thread.regs &&
++          (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
+               check_if_tm_restore_required(current);
+               /*
+                * If a thread has already been reclaimed then the
+@@ -386,7 +387,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
+ {
+       if (tsk->thread.regs) {
+               preempt_disable();
+-              if (tsk->thread.regs->msr & MSR_VSX) {
++              if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
+                       BUG_ON(tsk != current);
+                       giveup_vsx(tsk);
+               }
+diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
+index 1bbd9dbfe4e0..ce9cc123988b 100644
+--- a/arch/s390/include/asm/bug.h
++++ b/arch/s390/include/asm/bug.h
+@@ -14,7 +14,7 @@
+               ".section .rodata.str,\"aMS\",@progbits,1\n"    \
+               "2:     .asciz  \""__FILE__"\"\n"               \
+               ".previous\n"                                   \
+-              ".section __bug_table,\"a\"\n"                  \
++              ".section __bug_table,\"aw\"\n"                 \
+               "3:     .long   1b-3b,2b-3b\n"                  \
+               "       .short  %0,%1\n"                        \
+               "       .org    3b+%2\n"                        \
+@@ -30,7 +30,7 @@
+       asm volatile(                                   \
+               "0:     j       0b+2\n"                 \
+               "1:\n"                                  \
+-              ".section __bug_table,\"a\"\n"          \
++              ".section __bug_table,\"aw\"\n"         \
+               "2:     .long   1b-2b\n"                \
+               "       .short  %0\n"                   \
+               "       .org    2b+%1\n"                \
+diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h
+index 1b77f068be2b..986c8781d89f 100644
+--- a/arch/sh/include/asm/bug.h
++++ b/arch/sh/include/asm/bug.h
+@@ -24,14 +24,14 @@
+  */
+ #ifdef CONFIG_DEBUG_BUGVERBOSE
+ #define _EMIT_BUG_ENTRY                               \
+-      "\t.pushsection __bug_table,\"a\"\n"    \
++      "\t.pushsection __bug_table,\"aw\"\n"   \
+       "2:\t.long 1b, %O1\n"                   \
+       "\t.short %O2, %O3\n"                   \
+       "\t.org 2b+%O4\n"                       \
+       "\t.popsection\n"
+ #else
+ #define _EMIT_BUG_ENTRY                               \
+-      "\t.pushsection __bug_table,\"a\"\n"    \
++      "\t.pushsection __bug_table,\"aw\"\n"   \
+       "2:\t.long 1b\n"                        \
+       "\t.short %O3\n"                        \
+       "\t.org 2b+%O4\n"                       \
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 0efb4c9497bc..ae1d55548f5a 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -94,6 +94,7 @@ config X86
+       select GENERIC_STRNCPY_FROM_USER
+       select GENERIC_STRNLEN_USER
+       select GENERIC_TIME_VSYSCALL
++      select HARDLOCKUP_CHECK_TIMESTAMP       if X86_64
+       select HAVE_ACPI_APEI                   if ACPI
+       select HAVE_ACPI_APEI_NMI               if ACPI
+       select HAVE_ALIGNED_STRUCT_PAGE         if SLUB
+diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S 
b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+index 1cd792db15ef..1eab79c9ac48 100644
+--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
++++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+@@ -117,11 +117,10 @@
+       .set T1, REG_T1
+ .endm
+ 
+-#define K_BASE                %r8
+ #define HASH_PTR      %r9
++#define BLOCKS_CTR    %r8
+ #define BUFFER_PTR    %r10
+ #define BUFFER_PTR2   %r13
+-#define BUFFER_END    %r11
+ 
+ #define PRECALC_BUF   %r14
+ #define WK_BUF                %r15
+@@ -205,14 +204,14 @@
+                * blended AVX2 and ALU instruction scheduling
+                * 1 vector iteration per 8 rounds
+                */
+-              vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
++              vmovdqu (i * 2)(BUFFER_PTR), W_TMP
+       .elseif ((i & 7) == 1)
+-              vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
++              vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
+                        WY_TMP, WY_TMP
+       .elseif ((i & 7) == 2)
+               vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
+       .elseif ((i & 7) == 4)
+-              vpaddd  K_XMM(K_BASE), WY, WY_TMP
++              vpaddd  K_XMM + K_XMM_AR(%rip), WY, WY_TMP
+       .elseif ((i & 7) == 7)
+               vmovdqu  WY_TMP, PRECALC_WK(i&~7)
+ 
+@@ -255,7 +254,7 @@
+               vpxor   WY, WY_TMP, WY_TMP
+       .elseif ((i & 7) == 7)
+               vpxor   WY_TMP2, WY_TMP, WY
+-              vpaddd  K_XMM(K_BASE), WY, WY_TMP
++              vpaddd  K_XMM + K_XMM_AR(%rip), WY, WY_TMP
+               vmovdqu WY_TMP, PRECALC_WK(i&~7)
+ 
+               PRECALC_ROTATE_WY
+@@ -291,7 +290,7 @@
+               vpsrld  $30, WY, WY
+               vpor    WY, WY_TMP, WY
+       .elseif ((i & 7) == 7)
+-              vpaddd  K_XMM(K_BASE), WY, WY_TMP
++              vpaddd  K_XMM + K_XMM_AR(%rip), WY, WY_TMP
+               vmovdqu WY_TMP, PRECALC_WK(i&~7)
+ 
+               PRECALC_ROTATE_WY
+@@ -446,6 +445,16 @@
+ 
+ .endm
+ 
++/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
++ * %1 + %2 >= %3 ? %4 : 0
++ */
++.macro ADD_IF_GE a, b, c, d
++      mov     \a, RTA
++      add     $\d, RTA
++      cmp     $\c, \b
++      cmovge  RTA, \a
++.endm
++
+ /*
+  * macro implements 80 rounds of SHA-1, for multiple blocks with s/w 
pipelining
+  */
+@@ -463,13 +472,16 @@
+       lea     (2*4*80+32)(%rsp), WK_BUF
+ 
+       # Precalc WK for first 2 blocks
+-      PRECALC_OFFSET = 0
++      ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
+       .set i, 0
+       .rept    160
+               PRECALC i
+               .set i, i + 1
+       .endr
+-      PRECALC_OFFSET = 128
++
++      /* Go to next block if needed */
++      ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
++      ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
+       xchg    WK_BUF, PRECALC_BUF
+ 
+       .align 32
+@@ -479,8 +491,8 @@ _loop:
+        * we use K_BASE value as a signal of a last block,
+        * it is set below by: cmovae BUFFER_PTR, K_BASE
+        */
+-      cmp     K_BASE, BUFFER_PTR
+-      jne     _begin
++      test BLOCKS_CTR, BLOCKS_CTR
++      jnz _begin
+       .align 32
+       jmp     _end
+       .align 32
+@@ -512,10 +524,10 @@ _loop0:
+               .set j, j+2
+       .endr
+ 
+-      add     $(2*64), BUFFER_PTR       /* move to next odd-64-byte block */
+-      cmp     BUFFER_END, BUFFER_PTR    /* is current block the last one? */
+-      cmovae  K_BASE, BUFFER_PTR      /* signal the last iteration smartly */
+-
++      /* Update Counter */
++      sub $1, BLOCKS_CTR
++      /* Move to the next block only if needed*/
++      ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
+       /*
+        * rounds
+        * 60,62,64,66,68
+@@ -532,8 +544,8 @@ _loop0:
+       UPDATE_HASH     12(HASH_PTR), D
+       UPDATE_HASH     16(HASH_PTR), E
+ 
+-      cmp     K_BASE, BUFFER_PTR      /* is current block the last one? */
+-      je      _loop
++      test    BLOCKS_CTR, BLOCKS_CTR
++      jz      _loop
+ 
+       mov     TB, B
+ 
+@@ -575,10 +587,10 @@ _loop2:
+               .set j, j+2
+       .endr
+ 
+-      add     $(2*64), BUFFER_PTR2      /* move to next even-64-byte block */
+-
+-      cmp     BUFFER_END, BUFFER_PTR2   /* is current block the last one */
+-      cmovae  K_BASE, BUFFER_PTR       /* signal the last iteration smartly */
++      /* update counter */
++      sub     $1, BLOCKS_CTR
++      /* Move to the next block only if needed*/
++      ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
+ 
+       jmp     _loop3
+ _loop3:
+@@ -641,19 +653,12 @@ _loop3:
+ 
+       avx2_zeroupper
+ 
+-      lea     K_XMM_AR(%rip), K_BASE
+-
++      /* Setup initial values */
+       mov     CTX, HASH_PTR
+       mov     BUF, BUFFER_PTR
+-      lea     64(BUF), BUFFER_PTR2
+-
+-      shl     $6, CNT                 /* mul by 64 */
+-      add     BUF, CNT
+-      add     $64, CNT
+-      mov     CNT, BUFFER_END
+ 
+-      cmp     BUFFER_END, BUFFER_PTR2
+-      cmovae  K_BASE, BUFFER_PTR2
++      mov     BUF, BUFFER_PTR2
++      mov     CNT, BLOCKS_CTR
+ 
+       xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
+ 
+diff --git a/arch/x86/crypto/sha1_ssse3_glue.c 
b/arch/x86/crypto/sha1_ssse3_glue.c
+index f960a043cdeb..fc61739150e7 100644
+--- a/arch/x86/crypto/sha1_ssse3_glue.c
++++ b/arch/x86/crypto/sha1_ssse3_glue.c
+@@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const 
char *data,
+ 
+ static bool avx2_usable(void)
+ {
+-      if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
++      if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
+               && boot_cpu_has(X86_FEATURE_BMI1)
+               && boot_cpu_has(X86_FEATURE_BMI2))
+               return true;
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 4a4c0834f965..22f2281b942b 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -1209,6 +1209,8 @@ ENTRY(nmi)
+        * other IST entries.
+        */
+ 
++      ASM_CLAC
++
+       /* Use %rdx as our temp variable throughout */
+       pushq   %rdx
+ 
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 580b60f5ac83..c138835c5547 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2105,7 +2105,7 @@ static void refresh_pce(void *ignored)
+               load_mm_cr4(current->active_mm);
+ }
+ 
+-static void x86_pmu_event_mapped(struct perf_event *event)
++static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct 
*mm)
+ {
+       if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
+               return;
+@@ -2120,22 +2120,20 @@ static void x86_pmu_event_mapped(struct perf_event 
*event)
+        * For now, this can't happen because all callers hold mmap_sem
+        * for write.  If this changes, we'll need a different solution.
+        */
+-      lockdep_assert_held_exclusive(&current->mm->mmap_sem);
++      lockdep_assert_held_exclusive(&mm->mmap_sem);
+ 
+-      if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
+-              on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
++      if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
++              on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
+ }
+ 
+-static void x86_pmu_event_unmapped(struct perf_event *event)
++static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct 
*mm)
+ {
+-      if (!current->mm)
+-              return;
+ 
+       if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
+               return;
+ 
+-      if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed))
+-              on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
++      if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
++              on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
+ }
+ 
+ static int x86_pmu_event_idx(struct perf_event *event)
+diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
+index 39e702d90cdb..aa6b2023d8f8 100644
+--- a/arch/x86/include/asm/bug.h
++++ b/arch/x86/include/asm/bug.h
+@@ -35,7 +35,7 @@
+ #define _BUG_FLAGS(ins, flags)                                                
\
+ do {                                                                  \
+       asm volatile("1:\t" ins "\n"                                    \
+-                   ".pushsection __bug_table,\"a\"\n"                 \
++                   ".pushsection __bug_table,\"aw\"\n"                \
+                    "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n"   \
+                    "\t"  __BUG_REL(%c0) "\t# bug_entry::file\n"       \
+                    "\t.word %c1"        "\t# bug_entry::line\n"       \
+@@ -52,7 +52,7 @@ do {                                                         
        \
+ #define _BUG_FLAGS(ins, flags)                                                
\
+ do {                                                                  \
+       asm volatile("1:\t" ins "\n"                                    \
+-                   ".pushsection __bug_table,\"a\"\n"                 \
++                   ".pushsection __bug_table,\"aw\"\n"                \
+                    "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n"   \
+                    "\t.word %c0"        "\t# bug_entry::flags\n"      \
+                    "\t.org 2b+%c1\n"                                  \
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 1c18d83d3f09..9aeb91935ce0 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -247,11 +247,11 @@ extern int force_personality32;
+ 
+ /*
+  * This is the base location for PIE (ET_DYN with INTERP) loads. On
+- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
++ * 64-bit, this is above 4GB to leave the entire 32-bit address
+  * space open for things that want to use the area for 32-bit pointers.
+  */
+ #define ELF_ET_DYN_BASE               (mmap_is_ia32() ? 0x000400000UL : \
+-                                                0x100000000UL)
++                                                (TASK_SIZE / 3 * 2))
+ 
+ /* This yields a mask that user programs can use to figure out what
+    instruction set this CPU supports.  This could be done in user space,
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index 19ad095b41df..81db3e92dc76 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -50,8 +50,7 @@ unsigned long tasksize_64bit(void)
+ static unsigned long stack_maxrandom_size(unsigned long task_size)
+ {
+       unsigned long max = 0;
+-      if ((current->flags & PF_RANDOMIZE) &&
+-              !(current->personality & ADDR_NO_RANDOMIZE)) {
++      if (current->flags & PF_RANDOMIZE) {
+               max = (-1UL) & __STACK_RND_MASK(task_size == tasksize_32bit());
+               max <<= PAGE_SHIFT;
+       }
+@@ -82,13 +81,13 @@ static int mmap_is_legacy(void)
+ 
+ static unsigned long arch_rnd(unsigned int rndbits)
+ {
++      if (!(current->flags & PF_RANDOMIZE))
++              return 0;
+       return (get_random_long() & ((1UL << rndbits) - 1)) << PAGE_SHIFT;
+ }
+ 
+ unsigned long arch_mmap_rnd(void)
+ {
+-      if (!(current->flags & PF_RANDOMIZE))
+-              return 0;
+       return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits);
+ }
+ 
+diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
+index 0c3354cf3552..76944e3271bf 100644
+--- a/block/blk-mq-pci.c
++++ b/block/blk-mq-pci.c
+@@ -36,12 +36,18 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, 
struct pci_dev *pdev)
+       for (queue = 0; queue < set->nr_hw_queues; queue++) {
+               mask = pci_irq_get_affinity(pdev, queue);
+               if (!mask)
+-                      return -EINVAL;
++                      goto fallback;
+ 
+               for_each_cpu(cpu, mask)
+                       set->mq_map[cpu] = queue;
+       }
+ 
+       return 0;
++
++fallback:
++      WARN_ON_ONCE(set->nr_hw_queues > 1);
++      for_each_possible_cpu(cpu)
++              set->mq_map[cpu] = 0;
++      return 0;
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 39459631667c..b49547c5f2c2 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -2119,9 +2119,9 @@ static int blkfront_resume(struct xenbus_device *dev)
+                       /*
+                        * Get the bios in the request so we can re-queue them.
+                        */
+-                      if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
+-                          req_op(shadow[i].request) == REQ_OP_DISCARD ||
+-                          req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
++                      if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
++                          req_op(shadow[j].request) == REQ_OP_DISCARD ||
++                          req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
+                           shadow[j].request->cmd_flags & REQ_FUA) {
+                               /*
+                                * Flush operations don't contain bios, so
+diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
+index 771dd26c7076..6719e346b790 100644
+--- a/drivers/crypto/ixp4xx_crypto.c
++++ b/drivers/crypto/ixp4xx_crypto.c
+@@ -1074,7 +1074,7 @@ static int aead_perform(struct aead_request *req, int 
encrypt,
+               req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
+                               &crypt->icv_rev_aes);
+               if (unlikely(!req_ctx->hmac_virt))
+-                      goto free_buf_src;
++                      goto free_buf_dst;
+               if (!encrypt) {
+                       scatterwalk_map_and_copy(req_ctx->hmac_virt,
+                               req->src, cryptlen, authsize, 0);
+@@ -1089,10 +1089,10 @@ static int aead_perform(struct aead_request *req, int 
encrypt,
+       BUG_ON(qmgr_stat_overflow(SEND_QID));
+       return -EINPROGRESS;
+ 
+-free_buf_src:
+-      free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+ free_buf_dst:
+       free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
++free_buf_src:
++      free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+       crypt->ctl_flags = CTL_FLAG_UNUSED;
+       return -ENOMEM;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+index ed814e6d0207..28c1112e520c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+@@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct 
amdgpu_sync *sync,
+               struct dma_fence *f = e->fence;
+               struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ 
++              if (dma_fence_is_signaled(f)) {
++                      hash_del(&e->node);
++                      dma_fence_put(f);
++                      kmem_cache_free(amdgpu_sync_slab, e);
++                      continue;
++              }
+               if (ring && s_fence) {
+                       /* For fences from the same ring it is sufficient
+                        * when they are scheduled.
+@@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct 
amdgpu_sync *sync,
+                       }
+               }
+ 
+-              if (dma_fence_is_signaled(f)) {
+-                      hash_del(&e->node);
+-                      dma_fence_put(f);
+-                      kmem_cache_free(amdgpu_sync_slab, e);
+-                      continue;
+-              }
+-
+               return f;
+       }
+ 
+diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c 
b/drivers/gpu/drm/i915/i915_gem_render_state.c
+index 7032c542a9b1..4dd4c2159a92 100644
+--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
++++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
+@@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct 
drm_i915_gem_request *req)
+                       goto err_unpin;
+       }
+ 
++      ret = req->engine->emit_flush(req, EMIT_INVALIDATE);
++      if (ret)
++              goto err_unpin;
++
+       ret = req->engine->emit_bb_start(req,
+                                        so->batch_offset, so->batch_size,
+                                        I915_DISPATCH_SECURE);
+diff --git a/drivers/input/mouse/elan_i2c_core.c 
b/drivers/input/mouse/elan_i2c_core.c
+index d5ab9ddef3e3..3b0e9fb33afe 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1224,6 +1224,10 @@ static const struct acpi_device_id elan_acpi_id[] = {
+       { "ELAN0100", 0 },
+       { "ELAN0600", 0 },
+       { "ELAN0605", 0 },
++      { "ELAN0608", 0 },
++      { "ELAN0605", 0 },
++      { "ELAN0609", 0 },
++      { "ELAN060B", 0 },
+       { "ELAN1000", 0 },
+       { }
+ };
+diff --git a/drivers/irqchip/irq-atmel-aic-common.c 
b/drivers/irqchip/irq-atmel-aic-common.c
+index 28b26c80f4cf..056507099725 100644
+--- a/drivers/irqchip/irq-atmel-aic-common.c
++++ b/drivers/irqchip/irq-atmel-aic-common.c
+@@ -142,9 +142,9 @@ void __init aic_common_rtc_irq_fixup(struct device_node 
*root)
+       struct device_node *np;
+       void __iomem *regs;
+ 
+-      np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc");
++      np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
+       if (!np)
+-              np = of_find_compatible_node(root, NULL,
++              np = of_find_compatible_node(NULL, NULL,
+                                            "atmel,at91sam9x5-rtc");
+ 
+       if (!np)
+@@ -196,7 +196,6 @@ static void __init aic_common_irq_fixup(const struct 
of_device_id *matches)
+               return;
+ 
+       match = of_match_node(matches, root);
+-      of_node_put(root);
+ 
+       if (match) {
+               void (*fixup)(struct device_node *) = match->data;
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index d7847014821a..caca5d689cdc 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7979,7 +7979,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
+       if (mddev->safemode == 1)
+               mddev->safemode = 0;
+       /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
+-      if (mddev->in_sync || !mddev->sync_checkers) {
++      if (mddev->in_sync || mddev->sync_checkers) {
+               spin_lock(&mddev->lock);
+               if (mddev->in_sync) {
+                       mddev->in_sync = 0;
+@@ -8639,6 +8639,9 @@ void md_check_recovery(struct mddev *mddev)
+       if (mddev_trylock(mddev)) {
+               int spares = 0;
+ 
++              if (!mddev->external && mddev->safemode == 1)
++                      mddev->safemode = 0;
++
+               if (mddev->ro) {
+                       struct md_rdev *rdev;
+                       if (!mddev->external && mddev->in_sync)
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index c42153a985be..473f91322368 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x19d2, 0x1428, 2)},    /* Telewell TW-LTE 4G v2 */
+       {QMI_FIXED_INTF(0x19d2, 0x2002, 4)},    /* ZTE (Vodafone) K3765-Z */
+       {QMI_FIXED_INTF(0x2001, 0x7e19, 4)},    /* D-Link DWM-221 B1 */
++      {QMI_FIXED_INTF(0x2001, 0x7e35, 4)},    /* D-Link DWM-222 */
+       {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
+       {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
+       {QMI_FIXED_INTF(0x1199, 0x68a2, 8)},    /* Sierra Wireless MC7710 in 
QMI mode */
+diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
+index 5c63b920b471..ed92c1254cff 100644
+--- a/drivers/parisc/dino.c
++++ b/drivers/parisc/dino.c
+@@ -956,7 +956,7 @@ static int __init dino_probe(struct parisc_device *dev)
+ 
+       dino_dev->hba.dev = dev;
+       dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
+-      dino_dev->hba.lmmio_space_offset = 0;   /* CPU addrs == bus addrs */
++      dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
+       spin_lock_init(&dino_dev->dinosaur_pen);
+       dino_dev->hba.iommu = ccio_get_iommu(dev);
+ 
+diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
+index 2776cfe64c09..ef9cf4a21afe 100644
+--- a/drivers/usb/core/usb-acpi.c
++++ b/drivers/usb/core/usb-acpi.c
+@@ -127,6 +127,22 @@ static enum usb_port_connect_type 
usb_acpi_get_connect_type(acpi_handle handle,
+  */
+ #define USB_ACPI_LOCATION_VALID (1 << 31)
+ 
++static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent,
++                                            int raw)
++{
++      struct acpi_device *adev;
++
++      if (!parent)
++              return NULL;
++
++      list_for_each_entry(adev, &parent->children, node) {
++              if (acpi_device_adr(adev) == raw)
++                      return adev;
++      }
++
++      return acpi_find_child_device(parent, raw, false);
++}
++
+ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
+ {
+       struct usb_device *udev;
+@@ -174,8 +190,10 @@ static struct acpi_device *usb_acpi_find_companion(struct 
device *dev)
+                       int raw;
+ 
+                       raw = usb_hcd_find_raw_port_number(hcd, port1);
+-                      adev = 
acpi_find_child_device(ACPI_COMPANION(&udev->dev),
+-                                      raw, false);
++
++                      adev = usb_acpi_find_port(ACPI_COMPANION(&udev->dev),
++                                                raw);
++
+                       if (!adev)
+                               return NULL;
+               } else {
+@@ -186,7 +204,9 @@ static struct acpi_device *usb_acpi_find_companion(struct 
device *dev)
+                               return NULL;
+ 
+                       acpi_bus_get_device(parent_handle, &adev);
+-                      adev = acpi_find_child_device(adev, port1, false);
++
++                      adev = usb_acpi_find_port(adev, port1);
++
+                       if (!adev)
+                               return NULL;
+               }
+diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
+index 4da69dbf7dca..1bdd02a6d6ac 100644
+--- a/drivers/xen/biomerge.c
++++ b/drivers/xen/biomerge.c
+@@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+       unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
+       unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
+ 
+-      return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
+-              ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
++      return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
+ #else
+       /*
+        * XXX: Add support for merging bio_vec when using different page
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 7465c3ea5dd5..9867eda73769 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -666,8 +666,7 @@ static unsigned long randomize_stack_top(unsigned long 
stack_top)
+ {
+       unsigned long random_variable = 0;
+ 
+-      if ((current->flags & PF_RANDOMIZE) &&
+-              !(current->personality & ADDR_NO_RANDOMIZE)) {
++      if (current->flags & PF_RANDOMIZE) {
+               random_variable = get_random_long();
+               random_variable &= STACK_RND_MASK;
+               random_variable <<= PAGE_SHIFT;
+diff --git a/include/linux/memblock.h b/include/linux/memblock.h
+index 8098695e5d8d..2526c501622f 100644
+--- a/include/linux/memblock.h
++++ b/include/linux/memblock.h
+@@ -65,6 +65,7 @@ extern bool movable_node_enabled;
+ #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+ #define __init_memblock __meminit
+ #define __initdata_memblock __meminitdata
++void memblock_discard(void);
+ #else
+ #define __init_memblock
+ #define __initdata_memblock
+@@ -78,8 +79,6 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, 
phys_addr_t align,
+                                       int nid, ulong flags);
+ phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
+                                  phys_addr_t size, phys_addr_t align);
+-phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
+-phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
+ void memblock_allow_resize(void);
+ int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
+ int memblock_add(phys_addr_t base, phys_addr_t size);
+@@ -114,6 +113,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
+ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
+                               phys_addr_t *out_end);
+ 
++void __memblock_free_early(phys_addr_t base, phys_addr_t size);
++void __memblock_free_late(phys_addr_t base, phys_addr_t size);
++
+ /**
+  * for_each_mem_range - iterate through memblock areas from type_a and not
+  * included in type_b. Or just type_a if type_b is NULL.
+diff --git a/include/linux/nmi.h b/include/linux/nmi.h
+index aa3cd0878270..a8d4fc3356d2 100644
+--- a/include/linux/nmi.h
++++ b/include/linux/nmi.h
+@@ -155,6 +155,14 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
+ #define sysctl_softlockup_all_cpu_backtrace 0
+ #define sysctl_hardlockup_all_cpu_backtrace 0
+ #endif
++
++#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
++    defined(CONFIG_HARDLOCKUP_DETECTOR)
++void watchdog_update_hrtimer_threshold(u64 period);
++#else
++static inline void watchdog_update_hrtimer_threshold(u64 period) { }
++#endif
++
+ extern bool is_hardlockup(void);
+ struct ctl_table;
+ extern int proc_watchdog(struct ctl_table *, int ,
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 24a635887f28..fc32347473a9 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -310,8 +310,8 @@ struct pmu {
+        * Notification that the event was mapped or unmapped.  Called
+        * in the context of the mapping task.
+        */
+-      void (*event_mapped)            (struct perf_event *event); /*optional*/
+-      void (*event_unmapped)          (struct perf_event *event); /*optional*/
++      void (*event_mapped)            (struct perf_event *event, struct 
mm_struct *mm); /* optional */
++      void (*event_unmapped)          (struct perf_event *event, struct 
mm_struct *mm); /* optional */
+ 
+       /*
+        * Flags for ->add()/->del()/ ->start()/->stop(). There are
+diff --git a/include/linux/pid.h b/include/linux/pid.h
+index 4d179316e431..719582744a2e 100644
+--- a/include/linux/pid.h
++++ b/include/linux/pid.h
+@@ -8,7 +8,9 @@ enum pid_type
+       PIDTYPE_PID,
+       PIDTYPE_PGID,
+       PIDTYPE_SID,
+-      PIDTYPE_MAX
++      PIDTYPE_MAX,
++      /* only valid to __task_pid_nr_ns() */
++      __PIDTYPE_TGID
+ };
+ 
+ /*
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 7f2a1eff2997..35f4517eeba9 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1132,13 +1132,6 @@ static inline pid_t task_tgid_nr(struct task_struct 
*tsk)
+       return tsk->tgid;
+ }
+ 
+-extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace 
*ns);
+-
+-static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+-{
+-      return pid_vnr(task_tgid(tsk));
+-}
+-
+ /**
+  * pid_alive - check that a task structure is not stale
+  * @p: Task structure to be checked.
+@@ -1154,23 +1147,6 @@ static inline int pid_alive(const struct task_struct *p)
+       return p->pids[PIDTYPE_PID].pid != NULL;
+ }
+ 
+-static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct 
pid_namespace *ns)
+-{
+-      pid_t pid = 0;
+-
+-      rcu_read_lock();
+-      if (pid_alive(tsk))
+-              pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+-      rcu_read_unlock();
+-
+-      return pid;
+-}
+-
+-static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+-{
+-      return task_ppid_nr_ns(tsk, &init_pid_ns);
+-}
+-
+ static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct 
pid_namespace *ns)
+ {
+       return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
+@@ -1192,6 +1168,33 @@ static inline pid_t task_session_vnr(struct task_struct 
*tsk)
+       return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
+ }
+ 
++static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct 
pid_namespace *ns)
++{
++      return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
++}
++
++static inline pid_t task_tgid_vnr(struct task_struct *tsk)
++{
++      return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
++}
++
++static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct 
pid_namespace *ns)
++{
++      pid_t pid = 0;
++
++      rcu_read_lock();
++      if (pid_alive(tsk))
++              pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
++      rcu_read_unlock();
++
++      return pid;
++}
++
++static inline pid_t task_ppid_nr(const struct task_struct *tsk)
++{
++      return task_ppid_nr_ns(tsk, &init_pid_ns);
++}
++
+ /* Obsolete, do not use: */
+ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+ {
+diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
+index 62d686d96581..ed748ee40029 100644
+--- a/kernel/audit_watch.c
++++ b/kernel/audit_watch.c
+@@ -457,13 +457,15 @@ void audit_remove_watch_rule(struct audit_krule *krule)
+       list_del(&krule->rlist);
+ 
+       if (list_empty(&watch->rules)) {
++              /*
++               * audit_remove_watch() drops our reference to 'parent' which
++               * can get freed. Grab our own reference to be safe.
++               */
++              audit_get_parent(parent);
+               audit_remove_watch(watch);
+-
+-              if (list_empty(&parent->watches)) {
+-                      audit_get_parent(parent);
++              if (list_empty(&parent->watches))
+                       fsnotify_destroy_mark(&parent->mark, audit_watch_group);
+-                      audit_put_parent(parent);
+-              }
++              audit_put_parent(parent);
+       }
+ }
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index d7f726747341..dbb3d273d497 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5084,7 +5084,7 @@ static void perf_mmap_open(struct vm_area_struct *vma)
+               atomic_inc(&event->rb->aux_mmap_count);
+ 
+       if (event->pmu->event_mapped)
+-              event->pmu->event_mapped(event);
++              event->pmu->event_mapped(event, vma->vm_mm);
+ }
+ 
+ static void perf_pmu_output_stop(struct perf_event *event);
+@@ -5107,7 +5107,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+       unsigned long size = perf_data_size(rb);
+ 
+       if (event->pmu->event_unmapped)
+-              event->pmu->event_unmapped(event);
++              event->pmu->event_unmapped(event, vma->vm_mm);
+ 
+       /*
+        * rb->aux_mmap_count will always drop before rb->mmap_count and
+@@ -5405,7 +5405,7 @@ static int perf_mmap(struct file *file, struct 
vm_area_struct *vma)
+       vma->vm_ops = &perf_mmap_vmops;
+ 
+       if (event->pmu->event_mapped)
+-              event->pmu->event_mapped(event);
++              event->pmu->event_mapped(event, vma->vm_mm);
+ 
+       return ret;
+ }
+diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
+index c94da688ee9b..cdf94ce959d8 100644
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -898,13 +898,15 @@ EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
+ 
+ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
+ {
+-      unsigned long flags;
++      unsigned long flags, trigger, tmp;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
+ 
+       if (!desc)
+               return;
+       irq_settings_clr_and_set(desc, clr, set);
+ 
++      trigger = irqd_get_trigger_type(&desc->irq_data);
++
+       irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
+                  IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
+       if (irq_settings_has_no_balance_set(desc))
+@@ -916,7 +918,11 @@ void irq_modify_status(unsigned int irq, unsigned long 
clr, unsigned long set)
+       if (irq_settings_is_level(desc))
+               irqd_set(&desc->irq_data, IRQD_LEVEL);
+ 
+-      irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
++      tmp = irq_settings_get_trigger_mask(desc);
++      if (tmp != IRQ_TYPE_NONE)
++              trigger = tmp;
++
++      irqd_set(&desc->irq_data, trigger);
+ 
+       irq_put_desc_unlock(desc, flags);
+ }
+diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
+index 1a9abc1c8ea0..259a22aa9934 100644
+--- a/kernel/irq/ipi.c
++++ b/kernel/irq/ipi.c
+@@ -165,7 +165,7 @@ irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned 
int cpu)
+       struct irq_data *data = irq_get_irq_data(irq);
+       struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : 
NULL;
+ 
+-      if (!data || !ipimask || cpu > nr_cpu_ids)
++      if (!data || !ipimask || cpu >= nr_cpu_ids)
+               return INVALID_HWIRQ;
+ 
+       if (!cpumask_test_cpu(cpu, ipimask))
+@@ -195,7 +195,7 @@ static int ipi_send_verify(struct irq_chip *chip, struct 
irq_data *data,
+       if (!chip->ipi_send_single && !chip->ipi_send_mask)
+               return -EINVAL;
+ 
+-      if (cpu > nr_cpu_ids)
++      if (cpu >= nr_cpu_ids)
+               return -EINVAL;
+ 
+       if (dest) {
+diff --git a/kernel/pid.c b/kernel/pid.c
+index fd1cde1e4576..eeb892e728f8 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -527,8 +527,11 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum 
pid_type type,
+       if (!ns)
+               ns = task_active_pid_ns(current);
+       if (likely(pid_alive(task))) {
+-              if (type != PIDTYPE_PID)
++              if (type != PIDTYPE_PID) {
++                      if (type == __PIDTYPE_TGID)
++                              type = PIDTYPE_PID;
+                       task = task->group_leader;
++              }
+               nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
+       }
+       rcu_read_unlock();
+@@ -537,12 +540,6 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum 
pid_type type,
+ }
+ EXPORT_SYMBOL(__task_pid_nr_ns);
+ 
+-pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+-{
+-      return pid_nr_ns(task_tgid(tsk), ns);
+-}
+-EXPORT_SYMBOL(task_tgid_nr_ns);
+-
+ struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
+ {
+       return ns_of_pid(task_pid(tsk));
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 03e0b69bb5bf..b8e938c7273f 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -161,6 +161,7 @@ static void set_sample_period(void)
+        * hardlockup detector generates a warning
+        */
+       sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
++      watchdog_update_hrtimer_threshold(sample_period);
+ }
+ 
+ /* Commands for resetting the watchdog */
+diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
+index 54a427d1f344..cd0986b69cbc 100644
+--- a/kernel/watchdog_hld.c
++++ b/kernel/watchdog_hld.c
+@@ -70,6 +70,62 @@ void touch_nmi_watchdog(void)
+ }
+ EXPORT_SYMBOL(touch_nmi_watchdog);
+ 
++#ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
++static DEFINE_PER_CPU(ktime_t, last_timestamp);
++static DEFINE_PER_CPU(unsigned int, nmi_rearmed);
++static ktime_t watchdog_hrtimer_sample_threshold __read_mostly;
++
++void watchdog_update_hrtimer_threshold(u64 period)
++{
++      /*
++       * The hrtimer runs with a period of (watchdog_threshold * 2) / 5
++       *
++       * So it runs effectively with 2.5 times the rate of the NMI
++       * watchdog. That means the hrtimer should fire 2-3 times before
++       * the NMI watchdog expires. The NMI watchdog on x86 is based on
++       * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
++       * might run way faster than expected and the NMI fires in a
++       * smaller period than the one deduced from the nominal CPU
++       * frequency. Depending on the Turbo-Mode factor this might be fast
++       * enough to get the NMI period smaller than the hrtimer watchdog
++       * period and trigger false positives.
++       *
++       * The sample threshold is used to check in the NMI handler whether
++       * the minimum time between two NMI samples has elapsed. That
++       * prevents false positives.
++       *
++       * Set this to 4/5 of the actual watchdog threshold period so the
++       * hrtimer is guaranteed to fire at least once within the real
++       * watchdog threshold.
++       */
++      watchdog_hrtimer_sample_threshold = period * 2;
++}
++
++static bool watchdog_check_timestamp(void)
++{
++      ktime_t delta, now = ktime_get_mono_fast_ns();
++
++      delta = now - __this_cpu_read(last_timestamp);
++      if (delta < watchdog_hrtimer_sample_threshold) {
++              /*
++               * If ktime is jiffies based, a stalled timer would prevent
++               * jiffies from being incremented and the filter would look
++               * at a stale timestamp and never trigger.
++               */
++              if (__this_cpu_inc_return(nmi_rearmed) < 10)
++                      return false;
++      }
++      __this_cpu_write(nmi_rearmed, 0);
++      __this_cpu_write(last_timestamp, now);
++      return true;
++}
++#else
++static inline bool watchdog_check_timestamp(void)
++{
++      return true;
++}
++#endif
++
+ static struct perf_event_attr wd_hw_attr = {
+       .type           = PERF_TYPE_HARDWARE,
+       .config         = PERF_COUNT_HW_CPU_CYCLES,
+@@ -94,6 +150,9 @@ static void watchdog_overflow_callback(struct perf_event 
*event,
+               return;
+       }
+ 
++      if (!watchdog_check_timestamp())
++              return;
++
+       /* check for a hardlockup
+        * This is done by making sure our timer interrupt
+        * is incrementing.  The timer interrupt should have
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index e4587ebe52c7..1f1cb51005de 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -344,6 +344,13 @@ config SECTION_MISMATCH_WARN_ONLY
+ 
+         If unsure, say Y.
+ 
++#
++# Enables a timestamp based low pass filter to compensate for perf based
++# hard lockup detection which runs too fast due to turbo modes.
++#
++config HARDLOCKUP_CHECK_TIMESTAMP
++      bool
++
+ #
+ # Select this config option from the architecture Kconfig, if it
+ # is preferred to always offer frame pointers as a config
+diff --git a/mm/cma_debug.c b/mm/cma_debug.c
+index 595b757bef72..c03ccbc405a0 100644
+--- a/mm/cma_debug.c
++++ b/mm/cma_debug.c
+@@ -167,7 +167,7 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
+       char name[16];
+       int u32s;
+ 
+-      sprintf(name, "cma-%s", cma->name);
++      scnprintf(name, sizeof(name), "cma-%s", cma->name);
+ 
+       tmp = debugfs_create_dir(name, cma_debugfs_root);
+ 
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 7b8a5db76a2f..7087d5578866 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -288,31 +288,27 @@ static void __init_memblock 
memblock_remove_region(struct memblock_type *type, u
+ }
+ 
+ #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+-
+-phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
+-                                      phys_addr_t *addr)
+-{
+-      if (memblock.reserved.regions == memblock_reserved_init_regions)
+-              return 0;
+-
+-      *addr = __pa(memblock.reserved.regions);
+-
+-      return PAGE_ALIGN(sizeof(struct memblock_region) *
+-                        memblock.reserved.max);
+-}
+-
+-phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
+-                                      phys_addr_t *addr)
++/**
++ * Discard memory and reserved arrays if they were allocated
++ */
++void __init memblock_discard(void)
+ {
+-      if (memblock.memory.regions == memblock_memory_init_regions)
+-              return 0;
++      phys_addr_t addr, size;
+ 
+-      *addr = __pa(memblock.memory.regions);
++      if (memblock.reserved.regions != memblock_reserved_init_regions) {
++              addr = __pa(memblock.reserved.regions);
++              size = PAGE_ALIGN(sizeof(struct memblock_region) *
++                                memblock.reserved.max);
++              __memblock_free_late(addr, size);
++      }
+ 
+-      return PAGE_ALIGN(sizeof(struct memblock_region) *
+-                        memblock.memory.max);
++      if (memblock.memory.regions == memblock_memory_init_regions) {
++              addr = __pa(memblock.memory.regions);
++              size = PAGE_ALIGN(sizeof(struct memblock_region) *
++                                memblock.memory.max);
++              __memblock_free_late(addr, size);
++      }
+ }
+-
+ #endif
+ 
+ /**
+diff --git a/mm/memory.c b/mm/memory.c
+index b0c3d1556a94..9e50ffcf9639 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3882,8 +3882,18 @@ int handle_mm_fault(struct vm_area_struct *vma, 
unsigned long address,
+        * further.
+        */
+       if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR)
+-                              && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags)))
++                              && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags))) 
{
++
++              /*
++               * We are going to enforce SIGBUS but the PF path might have
++               * dropped the mmap_sem already so take it again so that
++               * we do not break expectations of all arch specific PF paths
++               * and g-u-p
++               */
++              if (ret & VM_FAULT_RETRY)
++                      down_read(&vma->vm_mm->mmap_sem);
+               ret = VM_FAULT_SIGBUS;
++      }
+ 
+       return ret;
+ }
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 37d0b334bfe9..e0157546e6b5 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -931,11 +931,6 @@ static long do_get_mempolicy(int *policy, nodemask_t 
*nmask,
+               *policy |= (pol->flags & MPOL_MODE_FLAGS);
+       }
+ 
+-      if (vma) {
+-              up_read(&current->mm->mmap_sem);
+-              vma = NULL;
+-      }
+-
+       err = 0;
+       if (nmask) {
+               if (mpol_store_user_nodemask(pol)) {
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 89a0a1707f4c..2586d5ab9b99 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -41,6 +41,7 @@
+ #include <linux/page_idle.h>
+ #include <linux/page_owner.h>
+ #include <linux/sched/mm.h>
++#include <linux/ptrace.h>
+ 
+ #include <asm/tlbflush.h>
+ 
+@@ -1649,7 +1650,6 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, 
nr_pages,
+               const int __user *, nodes,
+               int __user *, status, int, flags)
+ {
+-      const struct cred *cred = current_cred(), *tcred;
+       struct task_struct *task;
+       struct mm_struct *mm;
+       int err;
+@@ -1673,14 +1673,9 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, 
nr_pages,
+ 
+       /*
+        * Check if this process has the right to modify the specified
+-       * process. The right exists if the process has administrative
+-       * capabilities, superuser privileges or the same
+-       * userid as the target process.
++       * process. Use the regular "ptrace_may_access()" checks.
+        */
+-      tcred = __task_cred(task);
+-      if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) 
&&
+-          !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) 
&&
+-          !capable(CAP_SYS_NICE)) {
++      if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
+               rcu_read_unlock();
+               err = -EPERM;
+               goto out;
+diff --git a/mm/nobootmem.c b/mm/nobootmem.c
+index 487dad610731..ab998125f04d 100644
+--- a/mm/nobootmem.c
++++ b/mm/nobootmem.c
+@@ -146,22 +146,6 @@ static unsigned long __init 
free_low_memory_core_early(void)
+                               NULL)
+               count += __free_memory_core(start, end);
+ 
+-#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+-      {
+-              phys_addr_t size;
+-
+-              /* Free memblock.reserved array if it was allocated */
+-              size = get_allocated_memblock_reserved_regions_info(&start);
+-              if (size)
+-                      count += __free_memory_core(start, start + size);
+-
+-              /* Free memblock.memory array if it was allocated */
+-              size = get_allocated_memblock_memory_regions_info(&start);
+-              if (size)
+-                      count += __free_memory_core(start, start + size);
+-      }
+-#endif
+-
+       return count;
+ }
+ 
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 07569fa25760..4d16ef9d42a9 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1582,6 +1582,10 @@ void __init page_alloc_init_late(void)
+       /* Reinit limits that are based on free pages after the kernel is up */
+       files_maxfiles_init();
+ #endif
++#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
++      /* Discard memblock private memory */
++      memblock_discard();
++#endif
+ 
+       for_each_populated_zone(zone)
+               set_zone_contiguous(zone);
+diff --git a/mm/slub.c b/mm/slub.c
+index 8addc535bcdc..a0f3c56611c6 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -5637,13 +5637,14 @@ static void sysfs_slab_remove_workfn(struct 
work_struct *work)
+                * A cache is never shut down before deactivation is
+                * complete, so no need to worry about synchronization.
+                */
+-              return;
++              goto out;
+ 
+ #ifdef CONFIG_MEMCG
+       kset_unregister(s->memcg_kset);
+ #endif
+       kobject_uevent(&s->kobj, KOBJ_REMOVE);
+       kobject_del(&s->kobj);
++out:
+       kobject_put(&s->kobj);
+ }
+ 
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index ecc97f74ab18..104eb720ba43 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1669,7 +1669,10 @@ static void *__vmalloc_area_node(struct vm_struct 
*area, gfp_t gfp_mask,
+       struct page **pages;
+       unsigned int nr_pages, array_size, i;
+       const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
+-      const gfp_t alloc_mask = gfp_mask | __GFP_HIGHMEM | __GFP_NOWARN;
++      const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
++      const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
++                                      0 :
++                                      __GFP_HIGHMEM;
+ 
+       nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
+       array_size = (nr_pages * sizeof(struct page *));
+@@ -1677,7 +1680,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, 
gfp_t gfp_mask,
+       area->nr_pages = nr_pages;
+       /* Please note that the recursion is strictly bounded. */
+       if (array_size > PAGE_SIZE) {
+-              pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
++              pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
+                               PAGE_KERNEL, node, area->caller);
+       } else {
+               pages = kmalloc_node(array_size, nested_gfp, node);
+@@ -1698,9 +1701,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, 
gfp_t gfp_mask,
+               }
+ 
+               if (node == NUMA_NO_NODE)
+-                      page = alloc_page(alloc_mask);
++                      page = alloc_page(alloc_mask|highmem_mask);
+               else
+-                      page = alloc_pages_node(node, alloc_mask, 0);
++                      page = alloc_pages_node(node, alloc_mask|highmem_mask, 
0);
+ 
+               if (unlikely(!page)) {
+                       /* Successfully allocated i pages, free them in 
__vunmap() */
+@@ -1708,7 +1711,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, 
gfp_t gfp_mask,
+                       goto fail;
+               }
+               area->pages[i] = page;
+-              if (gfpflags_allow_blocking(gfp_mask))
++              if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
+                       cond_resched();
+       }
+ 
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index f3b1d7f50b81..67c4c68ce041 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1502,16 +1502,11 @@ static int snd_seq_ioctl_unsubscribe_port(struct 
snd_seq_client *client,
+ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void 
*arg)
+ {
+       struct snd_seq_queue_info *info = arg;
+-      int result;
+       struct snd_seq_queue *q;
+ 
+-      result = snd_seq_queue_alloc(client->number, info->locked, info->flags);
+-      if (result < 0)
+-              return result;
+-
+-      q = queueptr(result);
+-      if (q == NULL)
+-              return -EINVAL;
++      q = snd_seq_queue_alloc(client->number, info->locked, info->flags);
++      if (IS_ERR(q))
++              return PTR_ERR(q);
+ 
+       info->queue = q->queue;
+       info->locked = q->locked;
+@@ -1521,7 +1516,7 @@ static int snd_seq_ioctl_create_queue(struct 
snd_seq_client *client, void *arg)
+       if (!info->name[0])
+               snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
+       strlcpy(q->name, info->name, sizeof(q->name));
+-      queuefree(q);
++      snd_use_lock_free(&q->use_lock);
+ 
+       return 0;
+ }
+diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
+index 450c5187eecb..79e0c5604ef8 100644
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -184,22 +184,26 @@ void __exit snd_seq_queues_delete(void)
+ static void queue_use(struct snd_seq_queue *queue, int client, int use);
+ 
+ /* allocate a new queue -
+- * return queue index value or negative value for error
++ * return pointer to new queue or ERR_PTR(-errno) for error
++ * The new queue's use_lock is set to 1. It is the caller's responsibility to
++ * call snd_use_lock_free(&q->use_lock).
+  */
+-int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
++struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned 
int info_flags)
+ {
+       struct snd_seq_queue *q;
+ 
+       q = queue_new(client, locked);
+       if (q == NULL)
+-              return -ENOMEM;
++              return ERR_PTR(-ENOMEM);
+       q->info_flags = info_flags;
+       queue_use(q, client, 1);
++      snd_use_lock_use(&q->use_lock);
+       if (queue_list_add(q) < 0) {
++              snd_use_lock_free(&q->use_lock);
+               queue_delete(q);
+-              return -ENOMEM;
++              return ERR_PTR(-ENOMEM);
+       }
+-      return q->queue;
++      return q;
+ }
+ 
+ /* delete a queue - queue must be owned by the client */
+diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h
+index 30c8111477f6..719093489a2c 100644
+--- a/sound/core/seq/seq_queue.h
++++ b/sound/core/seq/seq_queue.h
+@@ -71,7 +71,7 @@ void snd_seq_queues_delete(void);
+ 
+ 
+ /* create new queue (constructor) */
+-int snd_seq_queue_alloc(int client, int locked, unsigned int flags);
++struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned 
int flags);
+ 
+ /* delete queue (destructor) */
+ int snd_seq_queue_delete(int client, int queueid);
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 082736c539bc..e630813c5008 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -542,6 +542,8 @@ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, 
int op_flag,
+ 
+       if (size < sizeof(scale))
+               return -ENOMEM;
++      if (cval->min_mute)
++              scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE;
+       scale[2] = cval->dBmin;
+       scale[3] = cval->dBmax;
+       if (copy_to_user(_tlv, scale, sizeof(scale)))
+diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
+index 3417ef347e40..2b4b067646ab 100644
+--- a/sound/usb/mixer.h
++++ b/sound/usb/mixer.h
+@@ -64,6 +64,7 @@ struct usb_mixer_elem_info {
+       int cached;
+       int cache_val[MAX_CHANNELS];
+       u8 initialized;
++      u8 min_mute;
+       void *private_data;
+ };
+ 
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 4fa0053a40af..7fbc90f5c6de 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -1878,6 +1878,12 @@ void snd_usb_mixer_fu_apply_quirk(struct 
usb_mixer_interface *mixer,
+               if (unitid == 7 && cval->control == UAC_FU_VOLUME)
+                       snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
+               break;
++      /* lowest playback value is muted on C-Media devices */
++      case USB_ID(0x0d8c, 0x000c):
++      case USB_ID(0x0d8c, 0x0014):
++              if (strstr(kctl->id.name, "Playback"))
++                      cval->min_mute = 1;
++              break;
+       }
+ }
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index d7b0b0a3a2db..6a03f9697039 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1142,6 +1142,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio 
*chip)
+       case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
+       case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
+       case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
++      case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
+       case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
+       case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
+       case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
+@@ -1374,6 +1375,10 @@ u64 snd_usb_interface_dsd_format_quirks(struct 
snd_usb_audio *chip,
+                       }
+               }
+               break;
++      case USB_ID(0x16d0, 0x0a23):
++              if (fp->altsetting == 2)
++                      return SNDRV_PCM_FMTBIT_DSD_U32_BE;
++              break;
+ 
+       default:
+               break;

Reply via email to