commit:     5dc3041f3bac29d77549d3e6c30940707f468149
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 12 17:59:39 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 12 17:59:39 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5dc3041f

Linux patch 4.4.61

 0000_README             |    4 +
 1060_linux-4.4.61.patch | 1527 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1531 insertions(+)

diff --git a/0000_README b/0000_README
index 6cc653c..84c1648 100644
--- a/0000_README
+++ b/0000_README
@@ -283,6 +283,10 @@ Patch:  1059_linux-4.4.60.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.60
 
+Patch:  1060_linux-4.4.61.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.61
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1060_linux-4.4.61.patch b/1060_linux-4.4.61.patch
new file mode 100644
index 0000000..642a5bd
--- /dev/null
+++ b/1060_linux-4.4.61.patch
@@ -0,0 +1,1527 @@
+diff --git a/Makefile b/Makefile
+index fb7c2b40753d..ef5045b8201d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 60
++SUBLEVEL = 61
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index 11b6595c2672..f91ee2f27b41 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -796,6 +796,7 @@ void stage2_unmap_vm(struct kvm *kvm)
+       int idx;
+ 
+       idx = srcu_read_lock(&kvm->srcu);
++      down_read(&current->mm->mmap_sem);
+       spin_lock(&kvm->mmu_lock);
+ 
+       slots = kvm_memslots(kvm);
+@@ -803,6 +804,7 @@ void stage2_unmap_vm(struct kvm *kvm)
+               stage2_unmap_memslot(kvm, memslot);
+ 
+       spin_unlock(&kvm->mmu_lock);
++      up_read(&current->mm->mmap_sem);
+       srcu_read_unlock(&kvm->srcu, idx);
+ }
+ 
+@@ -1759,6 +1761,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+           (KVM_PHYS_SIZE >> PAGE_SHIFT))
+               return -EFAULT;
+ 
++      down_read(&current->mm->mmap_sem);
+       /*
+        * A memory region could potentially cover multiple VMAs, and any holes
+        * between them, so iterate over all of them to find out if we can map
+@@ -1802,8 +1805,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                       pa += vm_start - vma->vm_start;
+ 
+                       /* IO region dirty page logging not allowed */
+-                      if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
+-                              return -EINVAL;
++                      if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
++                              ret = -EINVAL;
++                              goto out;
++                      }
+ 
+                       ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
+                                                   vm_end - vm_start,
+@@ -1815,7 +1820,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+       } while (hva < reg_end);
+ 
+       if (change == KVM_MR_FLAGS_ONLY)
+-              return ret;
++              goto out;
+ 
+       spin_lock(&kvm->mmu_lock);
+       if (ret)
+@@ -1823,6 +1828,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+       else
+               stage2_flush_memslot(kvm, memslot);
+       spin_unlock(&kvm->mmu_lock);
++out:
++      up_read(&current->mm->mmap_sem);
+       return ret;
+ }
+ 
+diff --git a/arch/metag/include/asm/uaccess.h 
b/arch/metag/include/asm/uaccess.h
+index 273e61225c27..07238b39638c 100644
+--- a/arch/metag/include/asm/uaccess.h
++++ b/arch/metag/include/asm/uaccess.h
+@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user 
*src, long count);
+ 
+ #define strlen_user(str) strnlen_user(str, 32767)
+ 
+-extern unsigned long __must_check __copy_user_zeroing(void *to,
+-                                                    const void __user *from,
+-                                                    unsigned long n);
++extern unsigned long raw_copy_from_user(void *to, const void __user *from,
++                                      unsigned long n);
+ 
+ static inline unsigned long
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++      unsigned long res = n;
+       if (likely(access_ok(VERIFY_READ, from, n)))
+-              return __copy_user_zeroing(to, from, n);
+-      memset(to, 0, n);
+-      return n;
++              res = raw_copy_from_user(to, from, n);
++      if (unlikely(res))
++              memset(to + (n - res), 0, res);
++      return res;
+ }
+ 
+-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
++#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
+ #define __copy_from_user_inatomic __copy_from_user
+ 
+ extern unsigned long __must_check __copy_user(void __user *to,
+diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
+index b3ebfe9c8e88..2792fc621088 100644
+--- a/arch/metag/lib/usercopy.c
++++ b/arch/metag/lib/usercopy.c
+@@ -29,7 +29,6 @@
+               COPY                                             \
+               "1:\n"                                           \
+               "       .section .fixup,\"ax\"\n"                \
+-              "       MOV D1Ar1,#0\n"                          \
+               FIXUP                                            \
+               "       MOVT    D1Ar1,#HI(1b)\n"                 \
+               "       JUMP    D1Ar1,#LO(1b)\n"                 \
+@@ -260,27 +259,31 @@
+               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "22:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+-              "SUB    %3, %3, #32\n"                                  \
+               "23:\n"                                                 \
+-              "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
++              "SUB    %3, %3, #32\n"                                  \
+               "24:\n"                                                 \
++              "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
++              "25:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "26:\n"                                                 \
+               "SUB    %3, %3, #32\n"                                  \
+               "DCACHE [%1+#-64], D0Ar6\n"                             \
+               "BR     $Lloop"id"\n"                                   \
+                                                                       \
+               "MOV    RAPF, %1\n"                                     \
+-              "25:\n"                                                 \
++              "27:\n"                                                 \
+               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "26:\n"                                                 \
++              "28:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "29:\n"                                                 \
+               "SUB    %3, %3, #32\n"                                  \
+-              "27:\n"                                                 \
++              "30:\n"                                                 \
+               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "28:\n"                                                 \
++              "31:\n"                                                 \
+               "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "32:\n"                                                 \
+               "SUB    %0, %0, #8\n"                                   \
+-              "29:\n"                                                 \
++              "33:\n"                                                 \
+               "SETL   [%0++], D0.7, D1.7\n"                           \
+               "SUB    %3, %3, #32\n"                                  \
+               "1:"                                                    \
+@@ -312,11 +315,15 @@
+               "       .long 26b,3b\n"                                 \
+               "       .long 27b,3b\n"                                 \
+               "       .long 28b,3b\n"                                 \
+-              "       .long 29b,4b\n"                                 \
++              "       .long 29b,3b\n"                                 \
++              "       .long 30b,3b\n"                                 \
++              "       .long 31b,3b\n"                                 \
++              "       .long 32b,3b\n"                                 \
++              "       .long 33b,4b\n"                                 \
+               "       .previous\n"                                    \
+               : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
+               : "0" (to), "1" (from), "2" (ret), "3" (n)              \
+-              : "D1Ar1", "D0Ar2", "memory")
++              : "D1Ar1", "D0Ar2", "cc", "memory")
+ 
+ /*    rewind 'to' and 'from'  pointers when a fault occurs
+  *
+@@ -342,7 +349,7 @@
+ #define __asm_copy_to_user_64bit_rapf_loop(to,        from, ret, n, id)\
+       __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
+               "LSR    D0Ar2, D0Ar2, #8\n"                             \
+-              "AND    D0Ar2, D0Ar2, #0x7\n"                           \
++              "ANDS   D0Ar2, D0Ar2, #0x7\n"                           \
+               "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
+               "SUB    D0Ar2, D0Ar2, #1\n"                             \
+               "MOV    D1Ar1, #4\n"                                    \
+@@ -403,47 +410,55 @@
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "22:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+-              "SUB    %3, %3, #16\n"                                  \
+               "23:\n"                                                 \
+-              "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "24:\n"                                                 \
+-              "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "SUB    %3, %3, #16\n"                                  \
+-              "25:\n"                                                 \
++              "24:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "26:\n"                                                 \
++              "25:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "26:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+               "27:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "28:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "29:\n"                                                 \
++              "SUB    %3, %3, #16\n"                                  \
++              "30:\n"                                                 \
++              "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
++              "31:\n"                                                 \
++              "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "32:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+               "DCACHE [%1+#-64], D0Ar6\n"                             \
+               "BR     $Lloop"id"\n"                                   \
+                                                                       \
+               "MOV    RAPF, %1\n"                                     \
+-              "29:\n"                                                 \
++              "33:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "30:\n"                                                 \
++              "34:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "35:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+-              "31:\n"                                                 \
++              "36:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "32:\n"                                                 \
++              "37:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "38:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+-              "33:\n"                                                 \
++              "39:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "34:\n"                                                 \
++              "40:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "41:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+-              "35:\n"                                                 \
++              "42:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+-              "36:\n"                                                 \
++              "43:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
++              "44:\n"                                                 \
+               "SUB    %0, %0, #4\n"                                   \
+-              "37:\n"                                                 \
++              "45:\n"                                                 \
+               "SETD   [%0++], D0.7\n"                                 \
+               "SUB    %3, %3, #16\n"                                  \
+               "1:"                                                    \
+@@ -483,11 +498,19 @@
+               "       .long 34b,3b\n"                                 \
+               "       .long 35b,3b\n"                                 \
+               "       .long 36b,3b\n"                                 \
+-              "       .long 37b,4b\n"                                 \
++              "       .long 37b,3b\n"                                 \
++              "       .long 38b,3b\n"                                 \
++              "       .long 39b,3b\n"                                 \
++              "       .long 40b,3b\n"                                 \
++              "       .long 41b,3b\n"                                 \
++              "       .long 42b,3b\n"                                 \
++              "       .long 43b,3b\n"                                 \
++              "       .long 44b,3b\n"                                 \
++              "       .long 45b,4b\n"                                 \
+               "       .previous\n"                                    \
+               : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
+               : "0" (to), "1" (from), "2" (ret), "3" (n)              \
+-              : "D1Ar1", "D0Ar2", "memory")
++              : "D1Ar1", "D0Ar2", "cc", "memory")
+ 
+ /*    rewind 'to' and 'from'  pointers when a fault occurs
+  *
+@@ -513,7 +536,7 @@
+ #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
+       __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
+               "LSR    D0Ar2, D0Ar2, #8\n"                             \
+-              "AND    D0Ar2, D0Ar2, #0x7\n"                           \
++              "ANDS   D0Ar2, D0Ar2, #0x7\n"                           \
+               "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
+               "SUB    D0Ar2, D0Ar2, #1\n"                             \
+               "MOV    D1Ar1, #4\n"                                    \
+@@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void 
*psrc,
+       if ((unsigned long) src & 1) {
+               __asm_copy_to_user_1(dst, src, retn);
+               n--;
++              if (retn)
++                      return retn + n;
+       }
+       if ((unsigned long) dst & 1) {
+               /* Worst case - byte copy */
+               while (n > 0) {
+                       __asm_copy_to_user_1(dst, src, retn);
+                       n--;
++                      if (retn)
++                              return retn + n;
+               }
+       }
+       if (((unsigned long) src & 2) && n >= 2) {
+               __asm_copy_to_user_2(dst, src, retn);
+               n -= 2;
++              if (retn)
++                      return retn + n;
+       }
+       if ((unsigned long) dst & 2) {
+               /* Second worst case - word copy */
+               while (n >= 2) {
+                       __asm_copy_to_user_2(dst, src, retn);
+                       n -= 2;
++                      if (retn)
++                              return retn + n;
+               }
+       }
+ 
+@@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void 
*psrc,
+               while (n >= 8) {
+                       __asm_copy_to_user_8x64(dst, src, retn);
+                       n -= 8;
++                      if (retn)
++                              return retn + n;
+               }
+       }
+       if (n >= RAPF_MIN_BUF_SIZE) {
+@@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void 
*psrc,
+               while (n >= 8) {
+                       __asm_copy_to_user_8x64(dst, src, retn);
+                       n -= 8;
++                      if (retn)
++                              return retn + n;
+               }
+       }
+ #endif
+@@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void 
*psrc,
+       while (n >= 16) {
+               __asm_copy_to_user_16(dst, src, retn);
+               n -= 16;
++              if (retn)
++                      return retn + n;
+       }
+ 
+       while (n >= 4) {
+               __asm_copy_to_user_4(dst, src, retn);
+               n -= 4;
++              if (retn)
++                      return retn + n;
+       }
+ 
+       switch (n) {
+@@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void 
*psrc,
+               break;
+       }
+ 
++      /*
++       * If we get here, retn correctly reflects the number of failing
++       * bytes.
++       */
+       return retn;
+ }
+ EXPORT_SYMBOL(__copy_user);
+@@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
+       __asm_copy_user_cont(to, from, ret,     \
+               "       GETB D1Ar1,[%1++]\n"    \
+               "2:     SETB [%0++],D1Ar1\n",   \
+-              "3:     ADD  %2,%2,#1\n"        \
+-              "       SETB [%0++],D1Ar1\n",   \
++              "3:     ADD  %2,%2,#1\n",       \
+               "       .long 2b,3b\n")
+ 
+ #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_user_cont(to, from, ret,             \
+               "       GETW D1Ar1,[%1++]\n"            \
+               "2:     SETW [%0++],D1Ar1\n" COPY,      \
+-              "3:     ADD  %2,%2,#2\n"                \
+-              "       SETW [%0++],D1Ar1\n" FIXUP,     \
++              "3:     ADD  %2,%2,#2\n" FIXUP,         \
+               "       .long 2b,3b\n" TENTRY)
+ 
+ #define __asm_copy_from_user_2(to, from, ret) \
+@@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
+       __asm_copy_from_user_2x_cont(to, from, ret,     \
+               "       GETB D1Ar1,[%1++]\n"            \
+               "4:     SETB [%0++],D1Ar1\n",           \
+-              "5:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
++              "5:     ADD  %2,%2,#1\n",               \
+               "       .long 4b,5b\n")
+ 
+ #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+       __asm_copy_user_cont(to, from, ret,             \
+               "       GETD D1Ar1,[%1++]\n"            \
+               "2:     SETD [%0++],D1Ar1\n" COPY,      \
+-              "3:     ADD  %2,%2,#4\n"                \
+-              "       SETD [%0++],D1Ar1\n" FIXUP,     \
++              "3:     ADD  %2,%2,#4\n" FIXUP,         \
+               "       .long 2b,3b\n" TENTRY)
+ 
+ #define __asm_copy_from_user_4(to, from, ret) \
+       __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
+ 
+-#define __asm_copy_from_user_5(to, from, ret) \
+-      __asm_copy_from_user_4x_cont(to, from, ret,     \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "4:     SETB [%0++],D1Ar1\n",           \
+-              "5:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 4b,5b\n")
+-
+-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_4x_cont(to, from, ret,     \
+-              "       GETW D1Ar1,[%1++]\n"            \
+-              "4:     SETW [%0++],D1Ar1\n" COPY,      \
+-              "5:     ADD  %2,%2,#2\n"                \
+-              "       SETW [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 4b,5b\n" TENTRY)
+-
+-#define __asm_copy_from_user_6(to, from, ret) \
+-      __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_7(to, from, ret) \
+-      __asm_copy_from_user_6x_cont(to, from, ret,     \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "6:     SETB [%0++],D1Ar1\n",           \
+-              "7:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 6b,7b\n")
+-
+-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_4x_cont(to, from, ret,     \
+-              "       GETD D1Ar1,[%1++]\n"            \
+-              "4:     SETD [%0++],D1Ar1\n" COPY,      \
+-              "5:     ADD  %2,%2,#4\n"                        \
+-              "       SETD [%0++],D1Ar1\n" FIXUP,             \
+-              "       .long 4b,5b\n" TENTRY)
+-
+-#define __asm_copy_from_user_8(to, from, ret) \
+-      __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_9(to, from, ret) \
+-      __asm_copy_from_user_8x_cont(to, from, ret,     \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "6:     SETB [%0++],D1Ar1\n",           \
+-              "7:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 6b,7b\n")
+-
+-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_8x_cont(to, from, ret,     \
+-              "       GETW D1Ar1,[%1++]\n"            \
+-              "6:     SETW [%0++],D1Ar1\n" COPY,      \
+-              "7:     ADD  %2,%2,#2\n"                \
+-              "       SETW [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 6b,7b\n" TENTRY)
+-
+-#define __asm_copy_from_user_10(to, from, ret) \
+-      __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_11(to, from, ret)                \
+-      __asm_copy_from_user_10x_cont(to, from, ret,    \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "8:     SETB [%0++],D1Ar1\n",           \
+-              "9:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 8b,9b\n")
+-
+-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_8x_cont(to, from, ret,     \
+-              "       GETD D1Ar1,[%1++]\n"            \
+-              "6:     SETD [%0++],D1Ar1\n" COPY,      \
+-              "7:     ADD  %2,%2,#4\n"                \
+-              "       SETD [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 6b,7b\n" TENTRY)
+-
+-#define __asm_copy_from_user_12(to, from, ret) \
+-      __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_13(to, from, ret) \
+-      __asm_copy_from_user_12x_cont(to, from, ret,    \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "8:     SETB [%0++],D1Ar1\n",           \
+-              "9:     ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 8b,9b\n")
+-
+-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_12x_cont(to, from, ret,    \
+-              "       GETW D1Ar1,[%1++]\n"            \
+-              "8:     SETW [%0++],D1Ar1\n" COPY,      \
+-              "9:     ADD  %2,%2,#2\n"                \
+-              "       SETW [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 8b,9b\n" TENTRY)
+-
+-#define __asm_copy_from_user_14(to, from, ret) \
+-      __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
+-
+-#define __asm_copy_from_user_15(to, from, ret) \
+-      __asm_copy_from_user_14x_cont(to, from, ret,    \
+-              "       GETB D1Ar1,[%1++]\n"            \
+-              "10:    SETB [%0++],D1Ar1\n",           \
+-              "11:    ADD  %2,%2,#1\n"                \
+-              "       SETB [%0++],D1Ar1\n",           \
+-              "       .long 10b,11b\n")
+-
+-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
+-      __asm_copy_from_user_12x_cont(to, from, ret,    \
+-              "       GETD D1Ar1,[%1++]\n"            \
+-              "8:     SETD [%0++],D1Ar1\n" COPY,      \
+-              "9:     ADD  %2,%2,#4\n"                \
+-              "       SETD [%0++],D1Ar1\n" FIXUP,     \
+-              "       .long 8b,9b\n" TENTRY)
+-
+-#define __asm_copy_from_user_16(to, from, ret) \
+-      __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
+-
+ #define __asm_copy_from_user_8x64(to, from, ret) \
+       asm volatile (                          \
+               "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
+               "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
+               "1:\n"                                  \
+               "       .section .fixup,\"ax\"\n"       \
+-              "       MOV D1Ar1,#0\n"                 \
+-              "       MOV D0Ar2,#0\n"                 \
+               "3:     ADD  %2,%2,#8\n"                \
+-              "       SETL [%0++],D0Ar2,D1Ar1\n"      \
+               "       MOVT    D0Ar2,#HI(1b)\n"        \
+               "       JUMP    D0Ar2,#LO(1b)\n"        \
+               "       .previous\n"                    \
+@@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
+  *
+  *    Rationale:
+  *            A fault occurs while reading from user buffer, which is the
+- *            source. Since the fault is at a single address, we only
+- *            need to rewind by 8 bytes.
++ *            source.
+  *            Since we don't write to kernel buffer until we read first,
+  *            the kernel buffer is at the right state and needn't be
+- *            corrected.
++ *            corrected, but the source must be rewound to the beginning of
++ *            the block, which is LSM_STEP*8 bytes.
++ *            LSM_STEP is bits 10:8 in TXSTATUS which is already read
++ *            and stored in D0Ar2
++ *
++ *            NOTE: If a fault occurs at the last operation in M{G,S}ETL
++ *                    LSM_STEP will be 0. ie: we do 4 writes in our case, if
++ *                    a fault happens at the 4th write, LSM_STEP will be 0
++ *                    instead of 4. The code copes with that.
+  */
+ #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id)    \
+       __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
+-              "SUB    %1, %1, #8\n")
++              "LSR    D0Ar2, D0Ar2, #5\n"                             \
++              "ANDS   D0Ar2, D0Ar2, #0x38\n"                          \
++              "ADDZ   D0Ar2, D0Ar2, #32\n"                            \
++              "SUB    %1, %1, D0Ar2\n")
+ 
+ /*    rewind 'from' pointer when a fault occurs
+  *
+  *    Rationale:
+  *            A fault occurs while reading from user buffer, which is the
+- *            source. Since the fault is at a single address, we only
+- *            need to rewind by 4 bytes.
++ *            source.
+  *            Since we don't write to kernel buffer until we read first,
+  *            the kernel buffer is at the right state and needn't be
+- *            corrected.
++ *            corrected, but the source must be rewound to the beginning of
++ *            the block, which is LSM_STEP*4 bytes.
++ *            LSM_STEP is bits 10:8 in TXSTATUS which is already read
++ *            and stored in D0Ar2
++ *
++ *            NOTE: If a fault occurs at the last operation in M{G,S}ETL
++ *                    LSM_STEP will be 0. ie: we do 4 writes in our case, if
++ *                    a fault happens at the 4th write, LSM_STEP will be 0
++ *                    instead of 4. The code copes with that.
+  */
+ #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id)    \
+       __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
+-              "SUB    %1, %1, #4\n")
++              "LSR    D0Ar2, D0Ar2, #6\n"                             \
++              "ANDS   D0Ar2, D0Ar2, #0x1c\n"                          \
++              "ADDZ   D0Ar2, D0Ar2, #16\n"                            \
++              "SUB    %1, %1, D0Ar2\n")
+ 
+ 
+-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
+-   userland.  The return-value is the number of bytes that were
+-   inaccessible.  */
+-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+-                                unsigned long n)
++/*
++ * Copy from user to kernel. The return-value is the number of bytes that were
++ * inaccessible.
++ */
++unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
++                               unsigned long n)
+ {
+       register char *dst asm ("A0.2") = pdst;
+       register const char __user *src asm ("A1.2") = psrc;
+@@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void 
__user *psrc,
+       if ((unsigned long) src & 1) {
+               __asm_copy_from_user_1(dst, src, retn);
+               n--;
++              if (retn)
++                      return retn + n;
+       }
+       if ((unsigned long) dst & 1) {
+               /* Worst case - byte copy */
+@@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void 
__user *psrc,
+                       __asm_copy_from_user_1(dst, src, retn);
+                       n--;
+                       if (retn)
+-                              goto copy_exception_bytes;
++                              return retn + n;
+               }
+       }
+       if (((unsigned long) src & 2) && n >= 2) {
+               __asm_copy_from_user_2(dst, src, retn);
+               n -= 2;
++              if (retn)
++                      return retn + n;
+       }
+       if ((unsigned long) dst & 2) {
+               /* Second worst case - word copy */
+@@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void 
__user *psrc,
+                       __asm_copy_from_user_2(dst, src, retn);
+                       n -= 2;
+                       if (retn)
+-                              goto copy_exception_bytes;
++                              return retn + n;
+               }
+       }
+ 
+-      /* We only need one check after the unalignment-adjustments,
+-         because if both adjustments were done, either both or
+-         neither reference had an exception.  */
+-      if (retn != 0)
+-              goto copy_exception_bytes;
+-
+ #ifdef USE_RAPF
+       /* 64 bit copy loop */
+       if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
+@@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void 
__user *psrc,
+                       __asm_copy_from_user_8x64(dst, src, retn);
+                       n -= 8;
+                       if (retn)
+-                              goto copy_exception_bytes;
++                              return retn + n;
+               }
+       }
+ 
+@@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void 
__user *psrc,
+                       __asm_copy_from_user_8x64(dst, src, retn);
+                       n -= 8;
+                       if (retn)
+-                              goto copy_exception_bytes;
++                              return retn + n;
+               }
+       }
+ #endif
+@@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void 
__user *psrc,
+               n -= 4;
+ 
+               if (retn)
+-                      goto copy_exception_bytes;
++                      return retn + n;
+       }
+ 
+       /* If we get here, there were no memory read faults.  */
+@@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void 
__user *psrc,
+       /* If we get here, retn correctly reflects the number of failing
+          bytes.  */
+       return retn;
+-
+- copy_exception_bytes:
+-      /* We already have "retn" bytes cleared, and need to clear the
+-         remaining "n" bytes.  A non-optimized simple byte-for-byte in-line
+-         memset is preferred here, since this isn't speed-critical code and
+-         we'd rather have this a leaf-function than calling memset.  */
+-      {
+-              char *endp;
+-              for (endp = dst + n; dst < endp; dst++)
+-                      *dst = 0;
+-      }
+-
+-      return retn + n;
+ }
+-EXPORT_SYMBOL(__copy_user_zeroing);
++EXPORT_SYMBOL(raw_copy_from_user);
+ 
+ #define __asm_clear_8x64(to, ret) \
+       asm volatile (                                  \
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index db459612de44..75bfca69e418 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1412,7 +1412,7 @@ config CPU_MIPS32_R6
+       select CPU_SUPPORTS_MSA
+       select GENERIC_CSUM
+       select HAVE_KVM
+-      select MIPS_O32_FP64_SUPPORT
++      select MIPS_O32_FP64_SUPPORT if 32BIT
+       help
+         Choose this option to build a kernel for release 6 or later of the
+         MIPS32 architecture.  New MIPS processors, starting with the Warrior
+diff --git a/arch/mips/include/asm/spinlock.h 
b/arch/mips/include/asm/spinlock.h
+index 40196bebe849..2365ce0ad8f2 100644
+--- a/arch/mips/include/asm/spinlock.h
++++ b/arch/mips/include/asm/spinlock.h
+@@ -112,7 +112,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
+               "       andi    %[ticket], %[ticket], 0xffff            \n"
+               "       bne     %[ticket], %[my_ticket], 4f             \n"
+               "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
+-              "2:                                                     \n"
++              "2:     .insn                                           \n"
+               "       .subsection 2                                   \n"
+               "4:     andi    %[ticket], %[ticket], 0xffff            \n"
+               "       sll     %[ticket], 5                            \n"
+@@ -187,7 +187,7 @@ static inline unsigned int 
arch_spin_trylock(arch_spinlock_t *lock)
+               "       sc      %[ticket], %[ticket_ptr]                \n"
+               "       beqz    %[ticket], 1b                           \n"
+               "        li     %[ticket], 1                            \n"
+-              "2:                                                     \n"
++              "2:     .insn                                           \n"
+               "       .subsection 2                                   \n"
+               "3:     b       2b                                      \n"
+               "        li     %[ticket], 0                            \n"
+@@ -367,7 +367,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
+               "       .set    reorder                                 \n"
+               __WEAK_LLSC_MB
+               "       li      %2, 1                                   \n"
+-              "2:                                                     \n"
++              "2:     .insn                                           \n"
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
+               : "memory");
+@@ -407,7 +407,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
+                       "       lui     %1, 0x8000                      \n"
+                       "       sc      %1, %0                          \n"
+                       "       li      %2, 1                           \n"
+-                      "2:                                             \n"
++                      "2:     .insn                                   \n"
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
+                         "=&r" (ret)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
+diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
+index 3e390a4e3897..daf580ce5ca2 100644
+--- a/arch/mips/lantiq/xway/sysctrl.c
++++ b/arch/mips/lantiq/xway/sysctrl.c
+@@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
+ 
+               if (!np_xbar)
+                       panic("Failed to load xbar nodes from devicetree");
+-              if (of_address_to_resource(np_pmu, 0, &res_xbar))
++              if (of_address_to_resource(np_xbar, 0, &res_xbar))
+                       panic("Failed to get xbar resources");
+               if (request_mem_region(res_xbar.start, resource_size(&res_xbar),
+                       res_xbar.name) < 0)
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index 29f73e00253d..63b7d6f82d24 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -757,7 +757,8 @@ static void build_huge_update_entries(u32 **p, unsigned 
int pte,
+ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
+                                   struct uasm_label **l,
+                                   unsigned int pte,
+-                                  unsigned int ptr)
++                                  unsigned int ptr,
++                                  unsigned int flush)
+ {
+ #ifdef CONFIG_SMP
+       UASM_i_SC(p, pte, 0, ptr);
+@@ -766,6 +767,22 @@ static void build_huge_handler_tail(u32 **p, struct 
uasm_reloc **r,
+ #else
+       UASM_i_SW(p, pte, 0, ptr);
+ #endif
++      if (cpu_has_ftlb && flush) {
++              BUG_ON(!cpu_has_tlbinv);
++
++              UASM_i_MFC0(p, ptr, C0_ENTRYHI);
++              uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
++              UASM_i_MTC0(p, ptr, C0_ENTRYHI);
++              build_tlb_write_entry(p, l, r, tlb_indexed);
++
++              uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
++              UASM_i_MTC0(p, ptr, C0_ENTRYHI);
++              build_huge_update_entries(p, pte, ptr);
++              build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
++
++              return;
++      }
++
+       build_huge_update_entries(p, pte, ptr);
+       build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
+ }
+@@ -2082,7 +2099,7 @@ static void build_r4000_tlb_load_handler(void)
+               uasm_l_tlbl_goaround2(&l, p);
+       }
+       uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
+-      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
+ #endif
+ 
+       uasm_l_nopage_tlbl(&l, p);
+@@ -2137,7 +2154,7 @@ static void build_r4000_tlb_store_handler(void)
+       build_tlb_probe_entry(&p);
+       uasm_i_ori(&p, wr.r1, wr.r1,
+                  _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+-      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
+ #endif
+ 
+       uasm_l_nopage_tlbs(&l, p);
+@@ -2193,7 +2210,7 @@ static void build_r4000_tlb_modify_handler(void)
+       build_tlb_probe_entry(&p);
+       uasm_i_ori(&p, wr.r1, wr.r1,
+                  _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
+-      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
++      build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
+ #endif
+ 
+       uasm_l_nopage_tlbm(&l, p);
+diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
+index f42834c7f007..3c575093f8f1 100644
+--- a/arch/mips/ralink/rt3883.c
++++ b/arch/mips/ralink/rt3883.c
+@@ -36,7 +36,7 @@ static struct rt2880_pmx_func uartlite_func[] = { 
FUNC("uartlite", 0, 15, 2) };
+ static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
+ static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
+ static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
+-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
++static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
+ static struct rt2880_pmx_func pci_func[] = {
+       FUNC("pci-dev", 0, 40, 32),
+       FUNC("pci-host2", 1, 40, 32),
+@@ -44,7 +44,7 @@ static struct rt2880_pmx_func pci_func[] = {
+       FUNC("pci-fnc", 3, 40, 32)
+ };
+ static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
+-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
++static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
+ 
+ static struct rt2880_pmx_group rt3883_pinmux_data[] = {
+       GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
+diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
+index 718dd197909f..de73beb36910 100644
+--- a/arch/nios2/kernel/prom.c
++++ b/arch/nios2/kernel/prom.c
+@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 
align)
+       return alloc_bootmem_align(size, align);
+ }
+ 
++int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t 
size,
++                                           bool nomap)
++{
++      reserve_bootmem(base, size, BOOTMEM_DEFAULT);
++      return 0;
++}
++
+ void __init early_init_devtree(void *params)
+ {
+       __be32 *dtb = (u32 *)__dtb_start;
+diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
+index a4ff86d58d5c..6c4e351a7930 100644
+--- a/arch/nios2/kernel/setup.c
++++ b/arch/nios2/kernel/setup.c
+@@ -195,6 +195,9 @@ void __init setup_arch(char **cmdline_p)
+       }
+ #endif /* CONFIG_BLK_DEV_INITRD */
+ 
++      early_init_fdt_reserve_self();
++      early_init_fdt_scan_reserved_mem();
++
+       unflatten_and_copy_device_tree();
+ 
+       setup_cpuinfo();
+diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
+index 86150fbb42c3..91e5c1758b5c 100644
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -808,14 +808,25 @@ int fix_alignment(struct pt_regs *regs)
+       nb = aligninfo[instr].len;
+       flags = aligninfo[instr].flags;
+ 
+-      /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
+-      if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
+-              nb = 8;
+-              flags = LD+SW;
+-      } else if (IS_XFORM(instruction) &&
+-                 ((instruction >> 1) & 0x3ff) == 660) {
+-              nb = 8;
+-              flags = ST+SW;
++      /*
++       * Handle some cases which give overlaps in the DSISR values.
++       */
++      if (IS_XFORM(instruction)) {
++              switch (get_xop(instruction)) {
++              case 532:       /* ldbrx */
++                      nb = 8;
++                      flags = LD+SW;
++                      break;
++              case 660:       /* stdbrx */
++                      nb = 8;
++                      flags = ST+SW;
++                      break;
++              case 20:        /* lwarx */
++              case 84:        /* ldarx */
++              case 116:       /* lharx */
++              case 276:       /* lqarx */
++                      return 0;       /* not emulated ever */
++              }
+       }
+ 
+       /* Byteswap little endian loads and stores */
+diff --git a/arch/powerpc/mm/hash_native_64.c 
b/arch/powerpc/mm/hash_native_64.c
+index c8822af10a58..19d9b2d2d212 100644
+--- a/arch/powerpc/mm/hash_native_64.c
++++ b/arch/powerpc/mm/hash_native_64.c
+@@ -645,6 +645,10 @@ static void native_flush_hash_range(unsigned long number, 
int local)
+       unsigned long psize = batch->psize;
+       int ssize = batch->ssize;
+       int i;
++      unsigned int use_local;
++
++      use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
++              mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
+ 
+       local_irq_save(flags);
+ 
+@@ -671,8 +675,7 @@ static void native_flush_hash_range(unsigned long number, 
int local)
+               } pte_iterate_hashed_end();
+       }
+ 
+-      if (mmu_has_feature(MMU_FTR_TLBIEL) &&
+-          mmu_psize_defs[psize].tlbiel && local) {
++      if (use_local) {
+               asm volatile("ptesync":::"memory");
+               for (i = 0; i < number; i++) {
+                       vpn = batch->vpn[i];
+diff --git a/arch/s390/boot/compressed/misc.c 
b/arch/s390/boot/compressed/misc.c
+index 4da604ebf6fd..ca15613eaaa4 100644
+--- a/arch/s390/boot/compressed/misc.c
++++ b/arch/s390/boot/compressed/misc.c
+@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned 
long size)
+ 
+ unsigned long decompress_kernel(void)
+ {
+-      unsigned long output_addr;
+-      unsigned char *output;
++      void *output, *kernel_end;
+ 
+-      output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
+-      check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
+-      memset(&_bss, 0, &_ebss - &_bss);
+-      free_mem_ptr = (unsigned long)&_end;
+-      free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+-      output = (unsigned char *) output_addr;
++      output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
++      kernel_end = output + SZ__bss_start;
++      check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
+ 
+ #ifdef CONFIG_BLK_DEV_INITRD
+       /*
+        * Move the initrd right behind the end of the decompressed
+-       * kernel image.
++       * kernel image. This also prevents initrd corruption caused by
++       * bss clearing since kernel_end will always be located behind the
++       * current bss section..
+        */
+-      if (INITRD_START && INITRD_SIZE &&
+-          INITRD_START < (unsigned long) output + SZ__bss_start) {
+-              check_ipl_parmblock(output + SZ__bss_start,
+-                                  INITRD_START + INITRD_SIZE);
+-              memmove(output + SZ__bss_start,
+-                      (void *) INITRD_START, INITRD_SIZE);
+-              INITRD_START = (unsigned long) output + SZ__bss_start;
++      if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
++              check_ipl_parmblock(kernel_end, INITRD_SIZE);
++              memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
++              INITRD_START = (unsigned long) kernel_end;
+       }
+ #endif
+ 
++      /*
++       * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
++       * initialized afterwards since they reside in bss.
++       */
++      memset(&_bss, 0, &_ebss - &_bss);
++      free_mem_ptr = (unsigned long) &_end;
++      free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
++
+       puts("Uncompressing Linux... ");
+       __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
+       puts("Ok, booting the kernel.\n");
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index 5c7381c5ad7f..c8d837f0fbbc 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -150,7 +150,7 @@ unsigned long __must_check __copy_to_user(void __user *to, 
const void *from,
+               "       jg      2b\n"                           \
+               ".popsection\n"                                 \
+               EX_TABLE(0b,3b) EX_TABLE(1b,3b)                 \
+-              : "=d" (__rc), "=Q" (*(to))                     \
++              : "=d" (__rc), "+Q" (*(to))                     \
+               : "d" (size), "Q" (*(from)),                    \
+                 "d" (__reg0), "K" (-EFAULT)                   \
+               : "cc");                                        \
+diff --git a/drivers/gpu/drm/ttm/ttm_object.c 
b/drivers/gpu/drm/ttm/ttm_object.c
+index 4f5fa8d65fe9..144367c0c28f 100644
+--- a/drivers/gpu/drm/ttm/ttm_object.c
++++ b/drivers/gpu/drm/ttm/ttm_object.c
+@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
+       if (unlikely(ret != 0))
+               goto out_err0;
+ 
+-      ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
++      ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
+       if (unlikely(ret != 0))
+               goto out_err1;
+ 
+@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
+ 
+ int ttm_ref_object_add(struct ttm_object_file *tfile,
+                      struct ttm_base_object *base,
+-                     enum ttm_ref_type ref_type, bool *existed)
++                     enum ttm_ref_type ref_type, bool *existed,
++                     bool require_existed)
+ {
+       struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+       struct ttm_ref_object *ref;
+@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
+               }
+ 
+               rcu_read_unlock();
++              if (require_existed)
++                      return -EPERM;
++
+               ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
+                                          false, false);
+               if (unlikely(ret != 0))
+@@ -635,7 +639,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+       prime = (struct ttm_prime_object *) dma_buf->priv;
+       base = &prime->base;
+       *handle = base->hash.key;
+-      ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
++      ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
+ 
+       dma_buf_put(dma_buf);
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+index 8e689b439890..6c649f7b5929 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+@@ -539,7 +539,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
+                    struct vmw_fence_obj **p_fence)
+ {
+       struct vmw_fence_obj *fence;
+-      int ret;
++      int ret;
+ 
+       fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+       if (unlikely(fence == NULL))
+@@ -702,6 +702,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
+ }
+ 
+ 
++/**
++ * vmw_fence_obj_lookup - Look up a user-space fence object
++ *
++ * @tfile: A struct ttm_object_file identifying the caller.
++ * @handle: A handle identifying the fence object.
++ * @return: A struct vmw_user_fence base ttm object on success or
++ * an error pointer on failure.
++ *
++ * The fence object is looked up and type-checked. The caller needs
++ * to have opened the fence object first, but since that happens on
++ * creation and fence objects aren't shareable, that's not an
++ * issue currently.
++ */
++static struct ttm_base_object *
++vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
++{
++      struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
++
++      if (!base) {
++              pr_err("Invalid fence object handle 0x%08lx.\n",
++                     (unsigned long)handle);
++              return ERR_PTR(-EINVAL);
++      }
++
++      if (base->refcount_release != vmw_user_fence_base_release) {
++              pr_err("Invalid fence object handle 0x%08lx.\n",
++                     (unsigned long)handle);
++              ttm_base_object_unref(&base);
++              return ERR_PTR(-EINVAL);
++      }
++
++      return base;
++}
++
++
+ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv)
+ {
+@@ -727,13 +762,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void 
*data,
+               arg->kernel_cookie = jiffies + wait_timeout;
+       }
+ 
+-      base = ttm_base_object_lookup(tfile, arg->handle);
+-      if (unlikely(base == NULL)) {
+-              printk(KERN_ERR "Wait invalid fence object handle "
+-                     "0x%08lx.\n",
+-                     (unsigned long)arg->handle);
+-              return -EINVAL;
+-      }
++      base = vmw_fence_obj_lookup(tfile, arg->handle);
++      if (IS_ERR(base))
++              return PTR_ERR(base);
+ 
+       fence = &(container_of(base, struct vmw_user_fence, base)->fence);
+ 
+@@ -772,13 +803,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, 
void *data,
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct vmw_private *dev_priv = vmw_priv(dev);
+ 
+-      base = ttm_base_object_lookup(tfile, arg->handle);
+-      if (unlikely(base == NULL)) {
+-              printk(KERN_ERR "Fence signaled invalid fence object handle "
+-                     "0x%08lx.\n",
+-                     (unsigned long)arg->handle);
+-              return -EINVAL;
+-      }
++      base = vmw_fence_obj_lookup(tfile, arg->handle);
++      if (IS_ERR(base))
++              return PTR_ERR(base);
+ 
+       fence = &(container_of(base, struct vmw_user_fence, base)->fence);
+       fman = fman_from_fence(fence);
+@@ -1093,6 +1120,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void 
*data,
+               (struct drm_vmw_fence_event_arg *) data;
+       struct vmw_fence_obj *fence = NULL;
+       struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
++      struct ttm_object_file *tfile = vmw_fp->tfile;
+       struct drm_vmw_fence_rep __user *user_fence_rep =
+               (struct drm_vmw_fence_rep __user *)(unsigned long)
+               arg->fence_rep;
+@@ -1106,24 +1134,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void 
*data,
+        */
+       if (arg->handle) {
+               struct ttm_base_object *base =
+-                      ttm_base_object_lookup_for_ref(dev_priv->tdev,
+-                                                     arg->handle);
+-
+-              if (unlikely(base == NULL)) {
+-                      DRM_ERROR("Fence event invalid fence object handle "
+-                                "0x%08lx.\n",
+-                                (unsigned long)arg->handle);
+-                      return -EINVAL;
+-              }
++                      vmw_fence_obj_lookup(tfile, arg->handle);
++
++              if (IS_ERR(base))
++                      return PTR_ERR(base);
++
+               fence = &(container_of(base, struct vmw_user_fence,
+                                      base)->fence);
+               (void) vmw_fence_obj_reference(fence);
+ 
+               if (user_fence_rep != NULL) {
+-                      bool existed;
+-
+                       ret = ttm_ref_object_add(vmw_fp->tfile, base,
+-                                               TTM_REF_USAGE, &existed);
++                                               TTM_REF_USAGE, NULL, false);
+                       if (unlikely(ret != 0)) {
+                               DRM_ERROR("Failed to reference a fence "
+                                         "object.\n");
+@@ -1166,8 +1188,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void 
*data,
+       return 0;
+ out_no_create:
+       if (user_fence_rep != NULL)
+-              ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+-                                        handle, TTM_REF_USAGE);
++              ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+ out_no_ref_obj:
+       vmw_fence_obj_unreference(&fence);
+       return ret;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+index b8c6a03c8c54..5ec24fd801cd 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
+               param->value = dev_priv->has_dx;
+               break;
+       default:
+-              DRM_ERROR("Illegal vmwgfx get param request: %d\n",
+-                        param->param);
+               return -EINVAL;
+       }
+ 
+@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void 
*data,
+       bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
+       struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ 
+-      if (unlikely(arg->pad64 != 0)) {
++      if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
+               DRM_ERROR("Illegal GET_3D_CAP argument.\n");
+               return -EINVAL;
+       }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+index e57667ca7557..dbca128a9aa6 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+@@ -591,7 +591,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct 
vmw_user_dma_buffer *user_bo,
+               return ret;
+ 
+       ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+-                               TTM_REF_SYNCCPU_WRITE, &existed);
++                               TTM_REF_SYNCCPU_WRITE, &existed, false);
+       if (ret != 0 || existed)
+               ttm_bo_synccpu_write_release(&user_bo->dma.base);
+ 
+@@ -775,7 +775,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file 
*tfile,
+ 
+       *handle = user_bo->prime.base.hash.key;
+       return ttm_ref_object_add(tfile, &user_bo->prime.base,
+-                                TTM_REF_USAGE, NULL);
++                                TTM_REF_USAGE, NULL, false);
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 7d620e82e000..c9c04ccccdd9 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -715,11 +715,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, 
void *data,
+                       128;
+ 
+       num_sizes = 0;
+-      for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
++      for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
++              if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
++                      return -EINVAL;
+               num_sizes += req->mip_levels[i];
++      }
+ 
+-      if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+-          DRM_VMW_MAX_MIP_LEVELS)
++      if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
++          num_sizes == 0)
+               return -EINVAL;
+ 
+       size = vmw_user_surface_size + 128 +
+@@ -904,17 +907,16 @@ vmw_surface_handle_reference(struct vmw_private 
*dev_priv,
+       uint32_t handle;
+       struct ttm_base_object *base;
+       int ret;
++      bool require_exist = false;
+ 
+       if (handle_type == DRM_VMW_HANDLE_PRIME) {
+               ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
+               if (unlikely(ret != 0))
+                       return ret;
+       } else {
+-              if (unlikely(drm_is_render_client(file_priv))) {
+-                      DRM_ERROR("Render client refused legacy "
+-                                "surface reference.\n");
+-                      return -EACCES;
+-              }
++              if (unlikely(drm_is_render_client(file_priv)))
++                      require_exist = true;
++
+               if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+                       DRM_ERROR("Locked master refused legacy "
+                                 "surface reference.\n");
+@@ -942,17 +944,14 @@ vmw_surface_handle_reference(struct vmw_private 
*dev_priv,
+ 
+               /*
+                * Make sure the surface creator has the same
+-               * authenticating master.
++               * authenticating master, or is already registered with us.
+                */
+               if (drm_is_primary_client(file_priv) &&
+-                  user_srf->master != file_priv->master) {
+-                      DRM_ERROR("Trying to reference surface outside of"
+-                                " master domain.\n");
+-                      ret = -EACCES;
+-                      goto out_bad_resource;
+-              }
++                  user_srf->master != file_priv->master)
++                      require_exist = true;
+ 
+-              ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
++              ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
++                                       require_exist);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Could not add a reference to a surface.\n");
+                       goto out_bad_resource;
+diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
+index acb3b303d800..90841abd3ce4 100644
+--- a/drivers/iio/gyro/bmg160_core.c
++++ b/drivers/iio/gyro/bmg160_core.c
+@@ -28,6 +28,7 @@
+ #include <linux/iio/trigger_consumer.h>
+ #include <linux/iio/triggered_buffer.h>
+ #include <linux/regmap.h>
++#include <linux/delay.h>
+ #include "bmg160.h"
+ 
+ #define BMG160_IRQ_NAME               "bmg160_event"
+@@ -53,6 +54,9 @@
+ #define BMG160_NO_FILTER              0
+ #define BMG160_DEF_BW                 100
+ 
++#define BMG160_GYRO_REG_RESET         0x14
++#define BMG160_GYRO_RESET_VAL         0xb6
++
+ #define BMG160_REG_INT_MAP_0          0x17
+ #define BMG160_INT_MAP_0_BIT_ANY      BIT(1)
+ 
+@@ -186,6 +190,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
+       int ret;
+       unsigned int val;
+ 
++      /*
++       * Reset chip to get it in a known good state. A delay of 30ms after
++       * reset is required according to the datasheet.
++       */
++      regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
++                   BMG160_GYRO_RESET_VAL);
++      usleep_range(30000, 30700);
++
+       ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
+       if (ret < 0) {
+               dev_err(data->dev, "Error reading reg_chip_id\n");
+diff --git a/drivers/staging/android/ashmem.c 
b/drivers/staging/android/ashmem.c
+index 3f2a3d611e4b..9c6357c03905 100644
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -392,6 +392,7 @@ static int ashmem_mmap(struct file *file, struct 
vm_area_struct *vma)
+                       ret = PTR_ERR(vmfile);
+                       goto out;
+               }
++              vmfile->f_mode |= FMODE_LSEEK;
+               asma->file = vmfile;
+       }
+       get_file(asma->file);
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 2fa754c5fd62..6cb5c4b30e78 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -952,6 +952,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, 
const char *tree,
+               return -EINVAL;
+       }
+ 
++      /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
++      if (tcon)
++              tcon->tid = 0;
++
+       rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
+       if (rc) {
+               kfree(unc_path);
+diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
+index b803213d1307..39c75a86c67f 100644
+--- a/fs/sysfs/file.c
++++ b/fs/sysfs/file.c
+@@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, 
char *buf,
+ {
+       const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
+       struct kobject *kobj = of->kn->parent->priv;
+-      size_t len;
++      ssize_t len;
+ 
+       /*
+        * If buf != of->prealloc_buf, we don't know how
+@@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file 
*of, char *buf,
+       if (WARN_ON_ONCE(buf != of->prealloc_buf))
+               return 0;
+       len = ops->show(kobj, of->kn->priv, buf);
++      if (len < 0)
++              return len;
+       if (pos) {
+               if (len <= pos)
+                       return 0;
+               len -= pos;
+               memmove(buf, buf + pos, len);
+       }
+-      return min(count, len);
++      return min_t(ssize_t, count, len);
+ }
+ 
+ /* kernfs write callback for regular sysfs files */
+diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
+index ed953f98f0e1..1487011fe057 100644
+--- a/include/drm/ttm/ttm_object.h
++++ b/include/drm/ttm/ttm_object.h
+@@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object 
**p_base);
+  * @ref_type: The type of reference.
+  * @existed: Upon completion, indicates that an identical reference object
+  * already existed, and the refcount was upped on that object instead.
++ * @require_existed: Fail with -EPERM if an identical ref object didn't
++ * already exist.
+  *
+  * Checks that the base object is shareable and adds a ref object to it.
+  *
+@@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object 
**p_base);
+  */
+ extern int ttm_ref_object_add(struct ttm_object_file *tfile,
+                             struct ttm_base_object *base,
+-                            enum ttm_ref_type ref_type, bool *existed);
++                            enum ttm_ref_type ref_type, bool *existed,
++                            bool require_existed);
+ 
+ extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
+                                 struct ttm_base_object *base);
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index a46c40bfb5f6..c7e8ed99c953 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -151,11 +151,17 @@ static void ptrace_unfreeze_traced(struct task_struct 
*task)
+ 
+       WARN_ON(!task->ptrace || task->parent != current);
+ 
++      /*
++       * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
++       * Recheck state under the lock to close this race.
++       */
+       spin_lock_irq(&task->sighand->siglock);
+-      if (__fatal_signal_pending(task))
+-              wake_up_state(task, __TASK_TRACED);
+-      else
+-              task->state = TASK_TRACED;
++      if (task->state == __TASK_TRACED) {
++              if (__fatal_signal_pending(task))
++                      wake_up_state(task, __TASK_TRACED);
++              else
++                      task->state = TASK_TRACED;
++      }
+       spin_unlock_irq(&task->sighand->siglock);
+ }
+ 
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index acbb0e73d3a2..7d7f99b0db47 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -4875,9 +4875,9 @@ static __init int test_ringbuffer(void)
+               rb_data[cpu].cnt = cpu;
+               rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
+                                                "rbtester/%d", cpu);
+-              if (WARN_ON(!rb_threads[cpu])) {
++              if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
+                       pr_cont("FAILED\n");
+-                      ret = -1;
++                      ret = PTR_ERR(rb_threads[cpu]);
+                       goto out_free;
+               }
+ 
+@@ -4887,9 +4887,9 @@ static __init int test_ringbuffer(void)
+ 
+       /* Now create the rb hammer! */
+       rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
+-      if (WARN_ON(!rb_hammer)) {
++      if (WARN_ON(IS_ERR(rb_hammer))) {
+               pr_cont("FAILED\n");
+-              ret = -1;
++              ret = PTR_ERR(rb_hammer);
+               goto out_free;
+       }
+ 
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index a4217fe60dff..e09b1a0e2cfe 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1492,7 +1492,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, 
policy,
+ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, 
nmask,
+                      compat_ulong_t, maxnode)
+ {
+-      long err = 0;
+       unsigned long __user *nm = NULL;
+       unsigned long nr_bits, alloc_size;
+       DECLARE_BITMAP(bm, MAX_NUMNODES);
+@@ -1501,14 +1500,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, 
compat_ulong_t __user *, nmask,
+       alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
+ 
+       if (nmask) {
+-              err = compat_get_bitmap(bm, nmask, nr_bits);
++              if (compat_get_bitmap(bm, nmask, nr_bits))
++                      return -EFAULT;
+               nm = compat_alloc_user_space(alloc_size);
+-              err |= copy_to_user(nm, bm, alloc_size);
++              if (copy_to_user(nm, bm, alloc_size))
++                      return -EFAULT;
+       }
+ 
+-      if (err)
+-              return -EFAULT;
+-
+       return sys_set_mempolicy(mode, nm, nr_bits+1);
+ }
+ 
+@@ -1516,7 +1514,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, 
compat_ulong_t, len,
+                      compat_ulong_t, mode, compat_ulong_t __user *, nmask,
+                      compat_ulong_t, maxnode, compat_ulong_t, flags)
+ {
+-      long err = 0;
+       unsigned long __user *nm = NULL;
+       unsigned long nr_bits, alloc_size;
+       nodemask_t bm;
+@@ -1525,14 +1522,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, 
compat_ulong_t, len,
+       alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
+ 
+       if (nmask) {
+-              err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
++              if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
++                      return -EFAULT;
+               nm = compat_alloc_user_space(alloc_size);
+-              err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
++              if (copy_to_user(nm, nodes_addr(bm), alloc_size))
++                      return -EFAULT;
+       }
+ 
+-      if (err)
+-              return -EFAULT;
+-
+       return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
+ }
+ 

Reply via email to