commit:     9288fa6d94a9020316950d84f455e3e52c9b7d5a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jan 10 11:47:11 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jan 10 11:47:11 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9288fa6d

Linux patch 4.9.76

 0000_README             |   4 +
 1075_linux-4.9.76.patch | 787 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 791 insertions(+)

diff --git a/0000_README b/0000_README
index 50f031f..f7c3cd9 100644
--- a/0000_README
+++ b/0000_README
@@ -343,6 +343,10 @@ Patch:  1074_linux-4.9.75.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.75
 
+Patch:  1075_linux-4.9.76.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.76
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1075_linux-4.9.76.patch b/1075_linux-4.9.76.patch
new file mode 100644
index 0000000..ea4d1dd
--- /dev/null
+++ b/1075_linux-4.9.76.patch
@@ -0,0 +1,787 @@
+diff --git a/Makefile b/Makefile
+index acbc1b032db2..2637f0ed0a07 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 75
++SUBLEVEL = 76
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
+index 41faf17cd28d..0684fd2f42e8 100644
+--- a/arch/arc/include/asm/uaccess.h
++++ b/arch/arc/include/asm/uaccess.h
+@@ -673,6 +673,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, 
long count)
+               return 0;
+ 
+       __asm__ __volatile__(
++      "       mov     lp_count, %5            \n"
+       "       lp      3f                      \n"
+       "1:     ldb.ab  %3, [%2, 1]             \n"
+       "       breq.d  %3, 0, 3f               \n"
+@@ -689,8 +690,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, 
long count)
+       "       .word   1b, 4b                  \n"
+       "       .previous                       \n"
+       : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
+-      : "g"(-EFAULT), "l"(count)
+-      : "memory");
++      : "g"(-EFAULT), "r"(count)
++      : "lp_count", "lp_start", "lp_end", "memory");
+ 
+       return res;
+ }
+diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
+index 8be707e1b6c7..82dea145574e 100644
+--- a/arch/parisc/include/asm/ldcw.h
++++ b/arch/parisc/include/asm/ldcw.h
+@@ -11,6 +11,7 @@
+    for the semaphore.  */
+ 
+ #define __PA_LDCW_ALIGNMENT   16
++#define __PA_LDCW_ALIGN_ORDER 4
+ #define __ldcw_align(a) ({                                    \
+       unsigned long __ret = (unsigned long) &(a)->lock[0];    \
+       __ret = (__ret + __PA_LDCW_ALIGNMENT - 1)               \
+@@ -28,6 +29,7 @@
+    ldcd). */
+ 
+ #define __PA_LDCW_ALIGNMENT   4
++#define __PA_LDCW_ALIGN_ORDER 2
+ #define __ldcw_align(a) (&(a)->slock)
+ #define __LDCW        "ldcw,co"
+ 
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index 4fcff2dcc9c3..e3d3e8e1d708 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -35,6 +35,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/signal.h>
+ #include <asm/unistd.h>
++#include <asm/ldcw.h>
+ #include <asm/thread_info.h>
+ 
+ #include <linux/linkage.h>
+@@ -46,6 +47,14 @@
+ #endif
+ 
+       .import         pa_tlb_lock,data
++      .macro  load_pa_tlb_lock reg
++#if __PA_LDCW_ALIGNMENT > 4
++      load32  PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
++      depi    0,31,__PA_LDCW_ALIGN_ORDER, \reg
++#else
++      load32  PA(pa_tlb_lock), \reg
++#endif
++      .endm
+ 
+       /* space_to_prot macro creates a prot id from a space id */
+ 
+@@ -457,7 +466,7 @@
+       .macro          tlb_lock        spc,ptp,pte,tmp,tmp1,fault
+ #ifdef CONFIG_SMP
+       cmpib,COND(=),n 0,\spc,2f
+-      load32          PA(pa_tlb_lock),\tmp
++      load_pa_tlb_lock \tmp
+ 1:    LDCW            0(\tmp),\tmp1
+       cmpib,COND(=)   0,\tmp1,1b
+       nop
+@@ -480,7 +489,7 @@
+       /* Release pa_tlb_lock lock. */
+       .macro          tlb_unlock1     spc,tmp
+ #ifdef CONFIG_SMP
+-      load32          PA(pa_tlb_lock),\tmp
++      load_pa_tlb_lock \tmp
+       tlb_unlock0     \spc,\tmp
+ #endif
+       .endm
+diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
+index adf7187f8951..2d40c4ff3f69 100644
+--- a/arch/parisc/kernel/pacache.S
++++ b/arch/parisc/kernel/pacache.S
+@@ -36,6 +36,7 @@
+ #include <asm/assembly.h>
+ #include <asm/pgtable.h>
+ #include <asm/cache.h>
++#include <asm/ldcw.h>
+ #include <linux/linkage.h>
+ 
+       .text
+@@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local)
+ 
+       .macro  tlb_lock        la,flags,tmp
+ #ifdef CONFIG_SMP
+-      ldil            L%pa_tlb_lock,%r1
+-      ldo             R%pa_tlb_lock(%r1),\la
++#if __PA_LDCW_ALIGNMENT > 4
++      load32          pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
++      depi            0,31,__PA_LDCW_ALIGN_ORDER, \la
++#else
++      load32          pa_tlb_lock, \la
++#endif
+       rsm             PSW_SM_I,\flags
+ 1:    LDCW            0(\la),\tmp
+       cmpib,<>,n      0,\tmp,3f
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index 7593787ed4c3..c3a532abac03 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -39,6 +39,7 @@
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ #include <linux/fs.h>
++#include <linux/cpu.h>
+ #include <linux/module.h>
+ #include <linux/personality.h>
+ #include <linux/ptrace.h>
+@@ -180,6 +181,44 @@ int dump_task_fpu (struct task_struct *tsk, 
elf_fpregset_t *r)
+       return 1;
+ }
+ 
++/*
++ * Idle thread support
++ *
++ * Detect when running on QEMU with SeaBIOS PDC Firmware and let
++ * QEMU idle the host too.
++ */
++
++int running_on_qemu __read_mostly;
++
++void __cpuidle arch_cpu_idle_dead(void)
++{
++      /* nop on real hardware, qemu will offline CPU. */
++      asm volatile("or %%r31,%%r31,%%r31\n":::);
++}
++
++void __cpuidle arch_cpu_idle(void)
++{
++      local_irq_enable();
++
++      /* nop on real hardware, qemu will idle sleep. */
++      asm volatile("or %%r10,%%r10,%%r10\n":::);
++}
++
++static int __init parisc_idle_init(void)
++{
++      const char *marker;
++
++      /* check QEMU/SeaBIOS marker in PAGE0 */
++      marker = (char *) &PAGE0->pad0;
++      running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
++
++      if (!running_on_qemu)
++              cpu_idle_poll_ctrl(1);
++
++      return 0;
++}
++arch_initcall(parisc_idle_init);
++
+ /*
+  * Copy architecture-specific thread state
+  */
+diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
+index 0f9cd90c11af..f06a9a0063f1 100644
+--- a/arch/s390/kernel/compat_linux.c
++++ b/arch/s390/kernel/compat_linux.c
+@@ -263,6 +263,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, 
u16 __user *, grouplis
+               return retval;
+       }
+ 
++      groups_sort(group_info);
+       retval = set_current_groups(group_info);
+       put_group_info(group_info);
+ 
+diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c 
b/arch/x86/entry/vsyscall/vsyscall_64.c
+index 636c4b341f36..6bb7e92c6d50 100644
+--- a/arch/x86/entry/vsyscall/vsyscall_64.c
++++ b/arch/x86/entry/vsyscall/vsyscall_64.c
+@@ -66,6 +66,11 @@ static int __init vsyscall_setup(char *str)
+ }
+ early_param("vsyscall", vsyscall_setup);
+ 
++bool vsyscall_enabled(void)
++{
++      return vsyscall_mode != NONE;
++}
++
+ static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
+                             const char *message)
+ {
+diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
+index 6ba66ee79710..4865e10dbb55 100644
+--- a/arch/x86/include/asm/vsyscall.h
++++ b/arch/x86/include/asm/vsyscall.h
+@@ -12,12 +12,14 @@ extern void map_vsyscall(void);
+  * Returns true if handled.
+  */
+ extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
++extern bool vsyscall_enabled(void);
+ #else
+ static inline void map_vsyscall(void) {}
+ static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long 
address)
+ {
+       return false;
+ }
++static inline bool vsyscall_enabled(void) { return false; }
+ #endif
+ 
+ #endif /* _ASM_X86_VSYSCALL_H */
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c 
b/arch/x86/kernel/cpu/microcode/amd.c
+index 017bda12caae..b74bb29db6b9 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -592,6 +592,7 @@ static unsigned int verify_patch_size(u8 family, u32 
patch_size,
+ #define F14H_MPB_MAX_SIZE 1824
+ #define F15H_MPB_MAX_SIZE 4096
+ #define F16H_MPB_MAX_SIZE 3458
++#define F17H_MPB_MAX_SIZE 3200
+ 
+       switch (family) {
+       case 0x14:
+@@ -603,6 +604,9 @@ static unsigned int verify_patch_size(u8 family, u32 
patch_size,
+       case 0x16:
+               max_size = F16H_MPB_MAX_SIZE;
+               break;
++      case 0x17:
++              max_size = F17H_MPB_MAX_SIZE;
++              break;
+       default:
+               max_size = F1XH_MPB_MAX_SIZE;
+               break;
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 1e779bca4f3e..f92bdb9f4e46 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -768,7 +768,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, 
cpu_tlbstate) = {
+       .state = 0,
+       .cr4 = ~0UL,    /* fail hard if we screw up cr4 shadow initialization */
+ };
+-EXPORT_SYMBOL_GPL(cpu_tlbstate);
++EXPORT_PER_CPU_SYMBOL(cpu_tlbstate);
+ 
+ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
+ {
+diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
+index d8376b4ad9f0..8f8e5e03d083 100644
+--- a/arch/x86/mm/kaiser.c
++++ b/arch/x86/mm/kaiser.c
+@@ -19,6 +19,7 @@
+ #include <asm/pgalloc.h>
+ #include <asm/desc.h>
+ #include <asm/cmdline.h>
++#include <asm/vsyscall.h>
+ 
+ int kaiser_enabled __read_mostly = 1;
+ EXPORT_SYMBOL(kaiser_enabled);        /* for inlined TLB flush functions */
+@@ -110,12 +111,13 @@ static inline unsigned long get_pa_from_mapping(unsigned 
long vaddr)
+  *
+  * Returns a pointer to a PTE on success, or NULL on failure.
+  */
+-static pte_t *kaiser_pagetable_walk(unsigned long address)
++static pte_t *kaiser_pagetable_walk(unsigned long address, bool user)
+ {
+       pmd_t *pmd;
+       pud_t *pud;
+       pgd_t *pgd = native_get_shadow_pgd(pgd_offset_k(address));
+       gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
++      unsigned long prot = _KERNPG_TABLE;
+ 
+       if (pgd_none(*pgd)) {
+               WARN_ONCE(1, "All shadow pgds should have been populated");
+@@ -123,6 +125,17 @@ static pte_t *kaiser_pagetable_walk(unsigned long address)
+       }
+       BUILD_BUG_ON(pgd_large(*pgd) != 0);
+ 
++      if (user) {
++              /*
++               * The vsyscall page is the only page that will have
++               *  _PAGE_USER set. Catch everything else.
++               */
++              BUG_ON(address != VSYSCALL_ADDR);
++
++              set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
++              prot = _PAGE_TABLE;
++      }
++
+       pud = pud_offset(pgd, address);
+       /* The shadow page tables do not use large mappings: */
+       if (pud_large(*pud)) {
+@@ -135,7 +148,7 @@ static pte_t *kaiser_pagetable_walk(unsigned long address)
+                       return NULL;
+               spin_lock(&shadow_table_allocation_lock);
+               if (pud_none(*pud)) {
+-                      set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
++                      set_pud(pud, __pud(prot | __pa(new_pmd_page)));
+                       __inc_zone_page_state(virt_to_page((void *)
+                                               new_pmd_page), NR_KAISERTABLE);
+               } else
+@@ -155,7 +168,7 @@ static pte_t *kaiser_pagetable_walk(unsigned long address)
+                       return NULL;
+               spin_lock(&shadow_table_allocation_lock);
+               if (pmd_none(*pmd)) {
+-                      set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
++                      set_pmd(pmd, __pmd(prot | __pa(new_pte_page)));
+                       __inc_zone_page_state(virt_to_page((void *)
+                                               new_pte_page), NR_KAISERTABLE);
+               } else
+@@ -191,7 +204,7 @@ static int kaiser_add_user_map(const void *__start_addr, 
unsigned long size,
+                       ret = -EIO;
+                       break;
+               }
+-              pte = kaiser_pagetable_walk(address);
++              pte = kaiser_pagetable_walk(address, flags & _PAGE_USER);
+               if (!pte) {
+                       ret = -ENOMEM;
+                       break;
+@@ -318,6 +331,19 @@ void __init kaiser_init(void)
+ 
+       kaiser_init_all_pgds();
+ 
++      /*
++       * Note that this sets _PAGE_USER and it needs to happen when the
++       * pagetable hierarchy gets created, i.e., early. Otherwise
++       * kaiser_pagetable_walk() will encounter initialized PTEs in the
++       * hierarchy and not set the proper permissions, leading to the
++       * pagefaults with page-protection violations when trying to read the
++       * vsyscall page. For example.
++       */
++      if (vsyscall_enabled())
++              kaiser_add_user_map_early((void *)VSYSCALL_ADDR,
++                                        PAGE_SIZE,
++                                         __PAGE_KERNEL_VSYSCALL);
++
+       for_each_possible_cpu(cpu) {
+               void *percpu_vaddr = __per_cpu_user_mapped_start +
+                                    per_cpu_offset(cpu);
+diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
+index e899ef51dc8e..cb1c3a3287b0 100644
+--- a/crypto/chacha20poly1305.c
++++ b/crypto/chacha20poly1305.c
+@@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template 
*tmpl, struct rtattr **tb,
+                                                   algt->mask));
+       if (IS_ERR(poly))
+               return PTR_ERR(poly);
++      poly_hash = __crypto_hash_alg_common(poly);
++
++      err = -EINVAL;
++      if (poly_hash->digestsize != POLY1305_DIGEST_SIZE)
++              goto out_put_poly;
+ 
+       err = -ENOMEM;
+       inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+@@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, 
struct rtattr **tb,
+ 
+       ctx = aead_instance_ctx(inst);
+       ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
+-      poly_hash = __crypto_hash_alg_common(poly);
+       err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
+                                     aead_crypto_instance(inst));
+       if (err)
+diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
+index ee9cfb99fe25..f8ec3d4ba4a8 100644
+--- a/crypto/pcrypt.c
++++ b/crypto/pcrypt.c
+@@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
+       crypto_free_aead(ctx->child);
+ }
+ 
++static void pcrypt_free(struct aead_instance *inst)
++{
++      struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
++
++      crypto_drop_aead(&ctx->spawn);
++      kfree(inst);
++}
++
+ static int pcrypt_init_instance(struct crypto_instance *inst,
+                               struct crypto_alg *alg)
+ {
+@@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template 
*tmpl, struct rtattr **tb,
+       inst->alg.encrypt = pcrypt_aead_encrypt;
+       inst->alg.decrypt = pcrypt_aead_decrypt;
+ 
++      inst->free = pcrypt_free;
++
+       err = aead_register_instance(tmpl, inst);
+       if (err)
+               goto out_drop_aead;
+@@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, 
struct rtattr **tb)
+       return -EINVAL;
+ }
+ 
+-static void pcrypt_free(struct crypto_instance *inst)
+-{
+-      struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
+-
+-      crypto_drop_aead(&ctx->spawn);
+-      kfree(inst);
+-}
+-
+ static int pcrypt_cpumask_change_notify(struct notifier_block *self,
+                                       unsigned long val, void *data)
+ {
+@@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt 
*pcrypt)
+ static struct crypto_template pcrypt_tmpl = {
+       .name = "pcrypt",
+       .create = pcrypt_create,
+-      .free = pcrypt_free,
+       .module = THIS_MODULE,
+ };
+ 
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 7d506cb73e54..4d30da269060 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -272,6 +272,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct 
nbd_cmd *cmd)
+       int result, flags;
+       struct nbd_request request;
+       unsigned long size = blk_rq_bytes(req);
++      struct bio *bio;
+       u32 type;
+ 
+       if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+@@ -305,16 +306,20 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct 
nbd_cmd *cmd)
+               return -EIO;
+       }
+ 
+-      if (type == NBD_CMD_WRITE) {
+-              struct req_iterator iter;
++      if (type != NBD_CMD_WRITE)
++              return 0;
++
++      flags = 0;
++      bio = req->bio;
++      while (bio) {
++              struct bio *next = bio->bi_next;
++              struct bvec_iter iter;
+               struct bio_vec bvec;
+-              /*
+-               * we are really probing at internals to determine
+-               * whether to set MSG_MORE or not...
+-               */
+-              rq_for_each_segment(bvec, req, iter) {
+-                      flags = 0;
+-                      if (!rq_iter_last(bvec, iter))
++
++              bio_for_each_segment(bvec, bio, iter) {
++                      bool is_last = !next && bio_iter_last(bvec, iter);
++
++                      if (is_last)
+                               flags = MSG_MORE;
+                       dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes 
data\n",
+                               cmd, bvec.bv_len);
+@@ -325,7 +330,16 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct 
nbd_cmd *cmd)
+                                       result);
+                               return -EIO;
+                       }
++                      /*
++                       * The completion might already have come in,
++                       * so break for the last one instead of letting
++                       * the iterator do it. This prevents use-after-free
++                       * of the bio.
++                       */
++                      if (is_last)
++                              break;
+               }
++              bio = next;
+       }
+       return 0;
+ }
+diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
+index 795c9d9c96a6..2051d926e303 100644
+--- a/drivers/bus/sunxi-rsb.c
++++ b/drivers/bus/sunxi-rsb.c
+@@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = {
+       .match          = sunxi_rsb_device_match,
+       .probe          = sunxi_rsb_device_probe,
+       .remove         = sunxi_rsb_device_remove,
++      .uevent         = of_device_uevent_modalias,
+ };
+ 
+ static void sunxi_rsb_dev_release(struct device *dev)
+diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
+index c5aac25a5738..b365ad78ac27 100644
+--- a/drivers/crypto/n2_core.c
++++ b/drivers/crypto/n2_core.c
+@@ -1620,6 +1620,7 @@ static int queue_cache_init(void)
+                                         CWQ_ENTRY_SIZE, 0, NULL);
+       if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
+               kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
++              queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
+               return -ENOMEM;
+       }
+       return 0;
+@@ -1629,6 +1630,8 @@ static void queue_cache_destroy(void)
+ {
+       kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+       kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
++      queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
++      queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
+ }
+ 
+ static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index cd834da5934a..59603a5728f7 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1609,7 +1609,7 @@ static int elantech_set_properties(struct elantech_data 
*etd)
+               case 5:
+                       etd->hw_version = 3;
+                       break;
+-              case 6 ... 14:
++              case 6 ... 15:
+                       etd->hw_version = 4;
+                       break;
+               default:
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index d3d975ae24b7..7f294f785ce6 100644
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -1547,13 +1547,15 @@ static int arm_smmu_domain_finalise(struct 
iommu_domain *domain)
+       domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+       domain->geometry.aperture_end = (1UL << ias) - 1;
+       domain->geometry.force_aperture = true;
+-      smmu_domain->pgtbl_ops = pgtbl_ops;
+ 
+       ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
+-      if (ret < 0)
++      if (ret < 0) {
+               free_io_pgtable_ops(pgtbl_ops);
++              return ret;
++      }
+ 
+-      return ret;
++      smmu_domain->pgtbl_ops = pgtbl_ops;
++      return 0;
+ }
+ 
+ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 
sid)
+@@ -1580,7 +1582,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct 
arm_smmu_device *smmu, u32 sid)
+ 
+ static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
+ {
+-      int i;
++      int i, j;
+       struct arm_smmu_master_data *master = fwspec->iommu_priv;
+       struct arm_smmu_device *smmu = master->smmu;
+ 
+@@ -1588,6 +1590,13 @@ static int arm_smmu_install_ste_for_dev(struct 
iommu_fwspec *fwspec)
+               u32 sid = fwspec->ids[i];
+               __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
+ 
++              /* Bridged PCI devices may end up with duplicated IDs */
++              for (j = 0; j < i; j++)
++                      if (fwspec->ids[j] == sid)
++                              break;
++              if (j < i)
++                      continue;
++
+               arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
+       }
+ 
+diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
+index b121bf4ed73a..3b8911cd3a19 100644
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -950,6 +950,7 @@ static void prepare_start_command(struct pxa3xx_nand_info 
*info, int command)
+ 
+       switch (command) {
+       case NAND_CMD_READ0:
++      case NAND_CMD_READOOB:
+       case NAND_CMD_PAGEPROG:
+               info->use_ecc = 1;
+               break;
+diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
+index 62469c60be23..75f942ae5176 100644
+--- a/fs/nfsd/auth.c
++++ b/fs/nfsd/auth.c
+@@ -59,6 +59,9 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export 
*exp)
+                               gi->gid[i] = exp->ex_anon_gid;
+                       else
+                               gi->gid[i] = rqgi->gid[i];
++
++                      /* Each thread allocates its own gi, no race */
++                      groups_sort(gi);
+               }
+       } else {
+               gi = get_group_info(rqgi);
+diff --git a/include/linux/cred.h b/include/linux/cred.h
+index f0e70a1bb3ac..cf1a5d0c4eb4 100644
+--- a/include/linux/cred.h
++++ b/include/linux/cred.h
+@@ -82,6 +82,7 @@ extern int set_current_groups(struct group_info *);
+ extern void set_groups(struct cred *, struct group_info *);
+ extern int groups_search(const struct group_info *, kgid_t);
+ extern bool may_setgroups(void);
++extern void groups_sort(struct group_info *);
+ 
+ /*
+  * The security context of a task
+diff --git a/include/linux/fscache.h b/include/linux/fscache.h
+index 115bb81912cc..94a8aae8f9e2 100644
+--- a/include/linux/fscache.h
++++ b/include/linux/fscache.h
+@@ -764,7 +764,7 @@ bool fscache_maybe_release_page(struct fscache_cookie 
*cookie,
+ {
+       if (fscache_cookie_valid(cookie) && PageFsCache(page))
+               return __fscache_maybe_release_page(cookie, page, gfp);
+-      return false;
++      return true;
+ }
+ 
+ /**
+diff --git a/kernel/acct.c b/kernel/acct.c
+index 74963d192c5d..37f1dc696fbd 100644
+--- a/kernel/acct.c
++++ b/kernel/acct.c
+@@ -99,7 +99,7 @@ static int check_free_space(struct bsd_acct_struct *acct)
+ {
+       struct kstatfs sbuf;
+ 
+-      if (time_is_before_jiffies(acct->needcheck))
++      if (time_is_after_jiffies(acct->needcheck))
+               goto out;
+ 
+       /* May block */
+diff --git a/kernel/groups.c b/kernel/groups.c
+index 2fcadd66a8fd..94bde5210e3d 100644
+--- a/kernel/groups.c
++++ b/kernel/groups.c
+@@ -77,7 +77,7 @@ static int groups_from_user(struct group_info *group_info,
+ }
+ 
+ /* a simple Shell sort */
+-static void groups_sort(struct group_info *group_info)
++void groups_sort(struct group_info *group_info)
+ {
+       int base, max, stride;
+       int gidsetsize = group_info->ngroups;
+@@ -103,6 +103,7 @@ static void groups_sort(struct group_info *group_info)
+               stride /= 3;
+       }
+ }
++EXPORT_SYMBOL(groups_sort);
+ 
+ /* a simple bsearch */
+ int groups_search(const struct group_info *group_info, kgid_t grp)
+@@ -134,7 +135,6 @@ int groups_search(const struct group_info *group_info, 
kgid_t grp)
+ void set_groups(struct cred *new, struct group_info *group_info)
+ {
+       put_group_info(new->group_info);
+-      groups_sort(group_info);
+       get_group_info(group_info);
+       new->group_info = group_info;
+ }
+@@ -218,6 +218,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user 
*, grouplist)
+               return retval;
+       }
+ 
++      groups_sort(group_info);
+       retval = set_current_groups(group_info);
+       put_group_info(group_info);
+ 
+diff --git a/kernel/signal.c b/kernel/signal.c
+index e48668c3c972..7ebe236a5364 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -72,7 +72,7 @@ static int sig_task_ignored(struct task_struct *t, int sig, 
bool force)
+       handler = sig_handler(t, sig);
+ 
+       if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
+-                      handler == SIG_DFL && !force)
++          handler == SIG_DFL && !(force && sig_kernel_only(sig)))
+               return 1;
+ 
+       return sig_handler_ignored(handler, sig);
+@@ -88,13 +88,15 @@ static int sig_ignored(struct task_struct *t, int sig, 
bool force)
+       if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
+               return 0;
+ 
+-      if (!sig_task_ignored(t, sig, force))
+-              return 0;
+-
+       /*
+-       * Tracers may want to know about even ignored signals.
++       * Tracers may want to know about even ignored signal unless it
++       * is SIGKILL which can't be reported anyway but can be ignored
++       * by SIGNAL_UNKILLABLE task.
+        */
+-      return !t->ptrace;
++      if (t->ptrace && sig != SIGKILL)
++              return 0;
++
++      return sig_task_ignored(t, sig, force);
+ }
+ 
+ /*
+@@ -917,9 +919,9 @@ static void complete_signal(int sig, struct task_struct 
*p, int group)
+        * then start taking the whole group down immediately.
+        */
+       if (sig_fatal(p, sig) &&
+-          !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
++          !(signal->flags & SIGNAL_GROUP_EXIT) &&
+           !sigismember(&t->real_blocked, sig) &&
+-          (sig == SIGKILL || !t->ptrace)) {
++          (sig == SIGKILL || !p->ptrace)) {
+               /*
+                * This signal will be fatal to the whole group.
+                */
+diff --git a/kernel/uid16.c b/kernel/uid16.c
+index cc40793464e3..dcffcce9d75e 100644
+--- a/kernel/uid16.c
++++ b/kernel/uid16.c
+@@ -190,6 +190,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t 
__user *, grouplist)
+               return retval;
+       }
+ 
++      groups_sort(group_info);
+       retval = set_current_groups(group_info);
+       put_group_info(group_info);
+ 
+diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c 
b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+index 25d9a9cf7b66..624c322af3ab 100644
+--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
++++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+@@ -231,6 +231,7 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
+                       goto out_free_groups;
+               creds->cr_group_info->gid[i] = kgid;
+       }
++      groups_sort(creds->cr_group_info);
+ 
+       return 0;
+ out_free_groups:
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c 
b/net/sunrpc/auth_gss/svcauth_gss.c
+index 153082598522..6a08bc451247 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -481,6 +481,7 @@ static int rsc_parse(struct cache_detail *cd,
+                               goto out;
+                       rsci.cred.cr_group_info->gid[i] = kgid;
+               }
++              groups_sort(rsci.cred.cr_group_info);
+ 
+               /* mech name */
+               len = qword_get(&mesg, buf, mlen);
+diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
+index 64af4f034de6..738a243c68a2 100644
+--- a/net/sunrpc/svcauth_unix.c
++++ b/net/sunrpc/svcauth_unix.c
+@@ -520,6 +520,7 @@ static int unix_gid_parse(struct cache_detail *cd,
+               ug.gi->gid[i] = kgid;
+       }
+ 
++      groups_sort(ug.gi);
+       ugp = unix_gid_lookup(cd, uid);
+       if (ugp) {
+               struct cache_head *ch;
+@@ -819,6 +820,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
+               kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
+               cred->cr_group_info->gid[i] = kgid;
+       }
++      groups_sort(cred->cr_group_info);
+       if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
+               *authp = rpc_autherr_badverf;
+               return SVC_DENIED;

Reply via email to