commit:     f304182d52f7091718f845a500bb9ac45fca90d1
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Aug  6 19:15:56 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Aug  6 19:15:56 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f304182d

Linux patch 4.9.188

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1187_linux-4.9.188.patch | 1207 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1211 insertions(+)

diff --git a/0000_README b/0000_README
index ddc9ef6..6e9f4e5 100644
--- a/0000_README
+++ b/0000_README
@@ -791,6 +791,10 @@ Patch:  1186_linux-4.9.187.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.187
 
+Patch:  1187_linux-4.9.188.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.188
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1187_linux-4.9.188.patch b/1187_linux-4.9.188.patch
new file mode 100644
index 0000000..fe6b76b
--- /dev/null
+++ b/1187_linux-4.9.188.patch
@@ -0,0 +1,1207 @@
+diff --git a/Makefile b/Makefile
+index 65ed5dc69ec9..b6b54e6f67e8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 187
++SUBLEVEL = 188
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+@@ -400,6 +400,7 @@ KBUILD_AFLAGS_MODULE  := -DMODULE
+ KBUILD_CFLAGS_MODULE  := -DMODULE
+ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+ GCC_PLUGINS_CFLAGS :=
++CLANG_FLAGS :=
+ 
+ # Read KERNELRELEASE from include/config/kernel.release (if it exists)
+ KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
+@@ -506,7 +507,7 @@ endif
+ 
+ ifeq ($(cc-name),clang)
+ ifneq ($(CROSS_COMPILE),)
+-CLANG_FLAGS   := --target=$(notdir $(CROSS_COMPILE:%-=%))
++CLANG_FLAGS   += --target=$(notdir $(CROSS_COMPILE:%-=%))
+ GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
+ CLANG_FLAGS   += --prefix=$(GCC_TOOLCHAIN_DIR)
+ GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
+diff --git a/arch/arm/boot/dts/rk3288-veyron-mickey.dts 
b/arch/arm/boot/dts/rk3288-veyron-mickey.dts
+index f36f6f459225..365382ab9ebd 100644
+--- a/arch/arm/boot/dts/rk3288-veyron-mickey.dts
++++ b/arch/arm/boot/dts/rk3288-veyron-mickey.dts
+@@ -161,10 +161,6 @@
+       };
+ };
+ 
+-&emmc {
+-      /delete-property/mmc-hs200-1_8v;
+-};
+-
+ &i2c2 {
+       status = "disabled";
+ };
+diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts 
b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
+index f72d616d1bf8..9647d9b6b299 100644
+--- a/arch/arm/boot/dts/rk3288-veyron-minnie.dts
++++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
+@@ -125,10 +125,6 @@
+       power-supply = <&backlight_regulator>;
+ };
+ 
+-&emmc {
+-      /delete-property/mmc-hs200-1_8v;
+-};
+-
+ &gpio_keys {
+       pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>;
+ 
+diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
+index 17ec2e2d7a60..30f1384f619b 100644
+--- a/arch/arm/boot/dts/rk3288.dtsi
++++ b/arch/arm/boot/dts/rk3288.dtsi
+@@ -210,6 +210,7 @@
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | 
IRQ_TYPE_LEVEL_HIGH)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | 
IRQ_TYPE_LEVEL_HIGH)>;
+               clock-frequency = <24000000>;
++              arm,no-tick-in-suspend;
+       };
+ 
+       timer: timer@ff810000 {
+diff --git a/arch/arm/mach-rpc/dma.c b/arch/arm/mach-rpc/dma.c
+index 6d3517dc4772..82aac38fa2cf 100644
+--- a/arch/arm/mach-rpc/dma.c
++++ b/arch/arm/mach-rpc/dma.c
+@@ -131,7 +131,7 @@ static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
+       } while (1);
+ 
+       idma->state = ~DMA_ST_AB;
+-      disable_irq(irq);
++      disable_irq_nosync(irq);
+ 
+       return IRQ_HANDLED;
+ }
+@@ -174,6 +174,9 @@ static void iomd_enable_dma(unsigned int chan, dma_t *dma)
+                               DMA_FROM_DEVICE : DMA_TO_DEVICE);
+               }
+ 
++              idma->dma_addr = idma->dma.sg->dma_address;
++              idma->dma_len = idma->dma.sg->length;
++
+               iomd_writeb(DMA_CR_C, dma_base + CR);
+               idma->state = DMA_ST_AB;
+       }
+diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
+index 8ac0e5994ed2..7c6f75c2aa4d 100644
+--- a/arch/mips/lantiq/irq.c
++++ b/arch/mips/lantiq/irq.c
+@@ -160,8 +160,9 @@ static int ltq_eiu_settype(struct irq_data *d, unsigned 
int type)
+                       if (edge)
+                               irq_set_handler(d->hwirq, handle_edge_irq);
+ 
+-                      ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
+-                              (val << (i * 4)), LTQ_EIU_EXIN_C);
++                      ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
++                                  (~(7 << (i * 4)))) | (val << (i * 4)),
++                                  LTQ_EIU_EXIN_C);
+               }
+       }
+ 
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index d86e68d3c794..1912b2671f10 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -15,6 +15,7 @@
+ #include "error.h"
+ #include "../string.h"
+ #include "../voffset.h"
++#include <asm/bootparam_utils.h>
+ 
+ /*
+  * WARNING!!
+diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
+index 2728e1b7e4a6..a8789aa647b4 100644
+--- a/arch/x86/boot/compressed/misc.h
++++ b/arch/x86/boot/compressed/misc.h
+@@ -19,7 +19,6 @@
+ #include <asm/page.h>
+ #include <asm/boot.h>
+ #include <asm/bootparam.h>
+-#include <asm/bootparam_utils.h>
+ 
+ #define BOOT_BOOT_H
+ #include "../ctype.h"
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 2188b5af8167..f39fd349cef6 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -50,7 +50,7 @@ static inline void generic_apic_probe(void)
+ 
+ #ifdef CONFIG_X86_LOCAL_APIC
+ 
+-extern unsigned int apic_verbosity;
++extern int apic_verbosity;
+ extern int local_apic_timer_c2_ok;
+ 
+ extern int disable_apic;
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 83b5b2990b49..222cb69e1219 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1309,25 +1309,29 @@ enum {
+ #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 
1 : 0)
+ #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
+ 
++asmlinkage void __noreturn kvm_spurious_fault(void);
++
+ /*
+  * Hardware virtualization extension instructions may fault if a
+  * reboot turns off virtualization while processes are running.
+- * Trap the fault and ignore the instruction if that happens.
++ * Usually after catching the fault we just panic; during reboot
++ * instead the instruction is ignored.
+  */
+-asmlinkage void kvm_spurious_fault(void);
+-
+-#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn)    \
+-      "666: " insn "\n\t" \
+-      "668: \n\t"                           \
+-      ".pushsection .fixup, \"ax\" \n" \
+-      "667: \n\t" \
+-      cleanup_insn "\n\t"                   \
+-      "cmpb $0, kvm_rebooting \n\t"         \
+-      "jne 668b \n\t"                       \
+-      __ASM_SIZE(push) " $666b \n\t"        \
+-      "jmp kvm_spurious_fault \n\t"         \
+-      ".popsection \n\t" \
+-      _ASM_EXTABLE(666b, 667b)
++#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn)            \
++      "666: \n\t"                                                     \
++      insn "\n\t"                                                     \
++      "jmp    668f \n\t"                                              \
++      "667: \n\t"                                                     \
++      "call   kvm_spurious_fault \n\t"                                \
++      "668: \n\t"                                                     \
++      ".pushsection .fixup, \"ax\" \n\t"                              \
++      "700: \n\t"                                                     \
++      cleanup_insn "\n\t"                                             \
++      "cmpb   $0, kvm_rebooting\n\t"                                  \
++      "je     667b \n\t"                                              \
++      "jmp    668b \n\t"                                              \
++      ".popsection \n\t"                                              \
++      _ASM_EXTABLE(666b, 700b)
+ 
+ #define __kvm_handle_fault_on_reboot(insn)            \
+       ____kvm_handle_fault_on_reboot(insn, "")
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 4f2af1ee09cb..cc9a6f680225 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -183,7 +183,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
+ /*
+  * Debug level, exported for io_apic.c
+  */
+-unsigned int apic_verbosity;
++int apic_verbosity;
+ 
+ int pic_mode;
+ 
+diff --git a/arch/x86/math-emu/fpu_emu.h b/arch/x86/math-emu/fpu_emu.h
+index afbc4d805d66..df5aee5402c4 100644
+--- a/arch/x86/math-emu/fpu_emu.h
++++ b/arch/x86/math-emu/fpu_emu.h
+@@ -176,7 +176,7 @@ static inline void reg_copy(FPU_REG const *x, FPU_REG *y)
+ #define setexponentpos(x,y) { (*(short *)&((x)->exp)) = \
+   ((y) + EXTENDED_Ebias) & 0x7fff; }
+ #define exponent16(x)         (*(short *)&((x)->exp))
+-#define setexponent16(x,y)  { (*(short *)&((x)->exp)) = (y); }
++#define setexponent16(x,y)  { (*(short *)&((x)->exp)) = (u16)(y); }
+ #define addexponent(x,y)    { (*(short *)&((x)->exp)) += (y); }
+ #define stdexp(x)           { (*(short *)&((x)->exp)) += EXTENDED_Ebias; }
+ 
+diff --git a/arch/x86/math-emu/reg_constant.c 
b/arch/x86/math-emu/reg_constant.c
+index 00548354912f..382093c5072b 100644
+--- a/arch/x86/math-emu/reg_constant.c
++++ b/arch/x86/math-emu/reg_constant.c
+@@ -17,7 +17,7 @@
+ #include "control_w.h"
+ 
+ #define MAKE_REG(s, e, l, h) { l, h, \
+-              ((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
++              (u16)((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
+ 
+ FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
+ #if 0
+diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
+index 1680768d392c..d7db45bdfb3b 100644
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -97,6 +97,20 @@ static inline int pte_allows_gup(unsigned long pteval, int 
write)
+       return 1;
+ }
+ 
++/*
++ * Return the compund head page with ref appropriately incremented,
++ * or NULL if that failed.
++ */
++static inline struct page *try_get_compound_head(struct page *page, int refs)
++{
++      struct page *head = compound_head(page);
++      if (WARN_ON_ONCE(page_ref_count(head) < 0))
++              return NULL;
++      if (unlikely(!page_cache_add_speculative(head, refs)))
++              return NULL;
++      return head;
++}
++
+ /*
+  * The performance critical leaf functions are made noinline otherwise gcc
+  * inlines everything into a single function which results in too much
+@@ -112,7 +126,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long 
addr,
+       ptep = pte_offset_map(&pmd, addr);
+       do {
+               pte_t pte = gup_get_pte(ptep);
+-              struct page *page;
++              struct page *head, *page;
+ 
+               /* Similar to the PMD case, NUMA hinting must take slow path */
+               if (pte_protnone(pte)) {
+@@ -138,7 +152,21 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned 
long addr,
+               }
+               VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+               page = pte_page(pte);
+-              get_page(page);
++
++              head = try_get_compound_head(page, 1);
++              if (!head) {
++                      put_dev_pagemap(pgmap);
++                      pte_unmap(ptep);
++                      return 0;
++              }
++
++              if (unlikely(pte_val(pte) != pte_val(*ptep))) {
++                      put_page(head);
++                      put_dev_pagemap(pgmap);
++                      pte_unmap(ptep);
++                      return 0;
++              }
++
+               put_dev_pagemap(pgmap);
+               SetPageReferenced(page);
+               pages[*nr] = page;
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 29632a6dd1c6..8056759073b0 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -581,6 +581,12 @@ static int binder_update_page_range(struct binder_proc 
*proc, int allocate,
+ 
+       if (mm) {
+               down_write(&mm->mmap_sem);
++              if (!mmget_still_valid(mm)) {
++                      if (allocate == 0)
++                              goto free_range;
++                      goto err_no_vma;
++              }
++
+               vma = proc->vma;
+               if (vma && mm != proc->vma_vm_mm) {
+                       pr_err("%d: vma mm and task mm mismatch\n",
+diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
+index f37a6ef4f544..e4fe24be3d7a 100644
+--- a/drivers/dma/sh/rcar-dmac.c
++++ b/drivers/dma/sh/rcar-dmac.c
+@@ -1111,7 +1111,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct 
scatterlist *sgl,
+       struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+ 
+       /* Someone calling slave DMA on a generic channel? */
+-      if (rchan->mid_rid < 0 || !sg_len) {
++      if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
+               dev_warn(chan->device->dev,
+                        "%s: bad parameter: len=%d, id=%d\n",
+                        __func__, sg_len, rchan->mid_rid);
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index a3251faa3ed8..d3675819f561 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -817,9 +817,11 @@ static int lineevent_create(struct gpio_device *gdev, 
void __user *ip)
+       }
+ 
+       if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
+-              irqflags |= IRQF_TRIGGER_RISING;
++              irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
++                      IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
+       if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
+-              irqflags |= IRQF_TRIGGER_FALLING;
++              irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
++                      IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
+       irqflags |= IRQF_ONESHOT;
+       irqflags |= IRQF_SHARED;
+ 
+diff --git a/drivers/infiniband/hw/mlx4/main.c 
b/drivers/infiniband/hw/mlx4/main.c
+index 8d59a5905ee8..7ccf7225f75a 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -1172,6 +1172,8 @@ static void mlx4_ib_disassociate_ucontext(struct 
ib_ucontext *ibcontext)
+        * mlx4_ib_vma_close().
+        */
+       down_write(&owning_mm->mmap_sem);
++      if (!mmget_still_valid(owning_mm))
++              goto skip_mm;
+       for (i = 0; i < HW_BAR_COUNT; i++) {
+               vma = context->hw_bar_info[i].vma;
+               if (!vma)
+@@ -1190,7 +1192,7 @@ static void mlx4_ib_disassociate_ucontext(struct 
ib_ucontext *ibcontext)
+               /* context going to be destroyed, should not access ops any 
more */
+               context->hw_bar_info[i].vma->vm_ops = NULL;
+       }
+-
++skip_mm:
+       up_write(&owning_mm->mmap_sem);
+       mmput(owning_mm);
+       put_task_struct(owning_process);
+diff --git a/drivers/infiniband/hw/mlx5/main.c 
b/drivers/infiniband/hw/mlx5/main.c
+index b1daf5c16117..f94df0e6a0f2 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -1307,6 +1307,8 @@ static void mlx5_ib_disassociate_ucontext(struct 
ib_ucontext *ibcontext)
+        * mlx5_ib_vma_close.
+        */
+       down_write(&owning_mm->mmap_sem);
++      if (!mmget_still_valid(owning_mm))
++              goto skip_mm;
+       list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
+                                list) {
+               vma = vma_private->vma;
+@@ -1321,6 +1323,7 @@ static void mlx5_ib_disassociate_ucontext(struct 
ib_ucontext *ibcontext)
+               list_del(&vma_private->list);
+               kfree(vma_private);
+       }
++skip_mm:
+       up_write(&owning_mm->mmap_sem);
+       mmput(owning_mm);
+       put_task_struct(owning_process);
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index f89489b28575..a7bc89f5dae7 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -1421,7 +1421,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev 
*dev, struct mlx5_ib_qp *qp,
+               }
+ 
+               MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
+-              MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+               memcpy(rss_key, ucmd.rx_hash_key, len);
+               break;
+       }
+diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
+index a37b9b6a315a..2eef811764ad 100644
+--- a/drivers/misc/eeprom/at24.c
++++ b/drivers/misc/eeprom/at24.c
+@@ -777,7 +777,7 @@ static int at24_probe(struct i2c_client *client, const 
struct i2c_device_id *id)
+       at24->nvmem_config.name = dev_name(&client->dev);
+       at24->nvmem_config.dev = &client->dev;
+       at24->nvmem_config.read_only = !writable;
+-      at24->nvmem_config.root_only = true;
++      at24->nvmem_config.root_only = !(chip.flags & AT24_FLAG_IRUGO);
+       at24->nvmem_config.owner = THIS_MODULE;
+       at24->nvmem_config.compat = true;
+       at24->nvmem_config.base_dev = &client->dev;
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index e10a00d0d44d..d9c7fd0cabaf 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -1864,8 +1864,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
+                                * delayed. Allowing the transfer to take place
+                                * avoids races and keeps things simple.
+                                */
+-                              if ((err != -ETIMEDOUT) &&
+-                                  (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
++                              if (err != -ETIMEDOUT) {
+                                       state = STATE_SENDING_DATA;
+                                       continue;
+                               }
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c 
b/drivers/net/ethernet/emulex/benet/be_main.c
+index b2eeecb26939..289560b0f643 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -4701,8 +4701,12 @@ int be_update_queues(struct be_adapter *adapter)
+       struct net_device *netdev = adapter->netdev;
+       int status;
+ 
+-      if (netif_running(netdev))
++      if (netif_running(netdev)) {
++              /* device cannot transmit now, avoid dev_watchdog timeouts */
++              netif_carrier_off(netdev);
++
+               be_close(netdev);
++      }
+ 
+       be_cancel_worker(adapter);
+ 
+diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
+index af82edc7fa5c..9b899af86cd5 100644
+--- a/drivers/perf/arm_pmu.c
++++ b/drivers/perf/arm_pmu.c
+@@ -804,8 +804,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, 
unsigned long cmd,
+               cpu_pm_pmu_setup(armpmu, cmd);
+               break;
+       case CPU_PM_EXIT:
+-              cpu_pm_pmu_setup(armpmu, cmd);
+       case CPU_PM_ENTER_FAILED:
++              cpu_pm_pmu_setup(armpmu, cmd);
+               armpmu->start(armpmu);
+               break;
+       default:
+diff --git a/drivers/rapidio/devices/rio_mport_cdev.c 
b/drivers/rapidio/devices/rio_mport_cdev.c
+index f32fc704cb7e..28c45db45aba 100644
+--- a/drivers/rapidio/devices/rio_mport_cdev.c
++++ b/drivers/rapidio/devices/rio_mport_cdev.c
+@@ -1743,6 +1743,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv 
*priv,
+ 
+       if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
+               return -EFAULT;
++      dev_info.name[sizeof(dev_info.name) - 1] = '\0';
+ 
+       rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
+                  dev_info.comptag, dev_info.destid, dev_info.hopcount);
+@@ -1874,6 +1875,7 @@ static int rio_mport_del_riodev(struct mport_cdev_priv 
*priv, void __user *arg)
+ 
+       if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
+               return -EFAULT;
++      dev_info.name[sizeof(dev_info.name) - 1] = '\0';
+ 
+       mport = priv->md->mport;
+ 
+diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
+index e453d2a7d7f9..f40d606f86c9 100644
+--- a/drivers/s390/block/dasd_alias.c
++++ b/drivers/s390/block/dasd_alias.c
+@@ -382,6 +382,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
+       char msg_format;
+       char msg_no;
+ 
++      /*
++       * intrc values ENODEV, ENOLINK and EPERM
++       * will be optained from sleep_on to indicate that no
++       * IO operation can be started
++       */
++      if (cqr->intrc == -ENODEV)
++              return 1;
++
++      if (cqr->intrc == -ENOLINK)
++              return 1;
++
++      if (cqr->intrc == -EPERM)
++              return 1;
++
+       sense = dasd_get_sense(&cqr->irb);
+       if (!sense)
+               return 0;
+@@ -446,12 +460,8 @@ static int read_unit_address_configuration(struct 
dasd_device *device,
+       lcu->flags &= ~NEED_UAC_UPDATE;
+       spin_unlock_irqrestore(&lcu->lock, flags);
+ 
+-      do {
+-              rc = dasd_sleep_on(cqr);
+-              if (rc && suborder_not_supported(cqr))
+-                      return -EOPNOTSUPP;
+-      } while (rc && (cqr->retries > 0));
+-      if (rc) {
++      rc = dasd_sleep_on(cqr);
++      if (rc && !suborder_not_supported(cqr)) {
+               spin_lock_irqsave(&lcu->lock, flags);
+               lcu->flags |= NEED_UAC_UPDATE;
+               spin_unlock_irqrestore(&lcu->lock, flags);
+diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
+index abe460eac712..cc62d8cc8cfd 100644
+--- a/drivers/s390/scsi/zfcp_erp.c
++++ b/drivers/s390/scsi/zfcp_erp.c
+@@ -10,6 +10,7 @@
+ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+ 
+ #include <linux/kthread.h>
++#include <linux/bug.h>
+ #include "zfcp_ext.h"
+ #include "zfcp_reqlist.h"
+ 
+@@ -244,6 +245,12 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int 
need, u32 act_status,
+       struct zfcp_erp_action *erp_action;
+       struct zfcp_scsi_dev *zfcp_sdev;
+ 
++      if (WARN_ON_ONCE(need != ZFCP_ERP_ACTION_REOPEN_LUN &&
++                       need != ZFCP_ERP_ACTION_REOPEN_PORT &&
++                       need != ZFCP_ERP_ACTION_REOPEN_PORT_FORCED &&
++                       need != ZFCP_ERP_ACTION_REOPEN_ADAPTER))
++              return NULL;
++
+       switch (need) {
+       case ZFCP_ERP_ACTION_REOPEN_LUN:
+               zfcp_sdev = sdev_to_zfcp(sdev);
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 5d04b362837d..9fdb39f377db 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -365,8 +365,8 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t 
size, void *vaddr,
+       /* Convert the size to actually allocated. */
+       size = 1UL << (order + XEN_PAGE_SHIFT);
+ 
+-      if (((dev_addr + size - 1 <= dma_mask)) ||
+-          range_straddles_page_boundary(phys, size))
++      if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
++                   range_straddles_page_boundary(phys, size)))
+               xen_destroy_contiguous_region(phys, order);
+ 
+       xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
+diff --git a/fs/adfs/super.c b/fs/adfs/super.c
+index c9fdfb112933..e42c30001509 100644
+--- a/fs/adfs/super.c
++++ b/fs/adfs/super.c
+@@ -368,6 +368,7 @@ static int adfs_fill_super(struct super_block *sb, void 
*data, int silent)
+       struct buffer_head *bh;
+       struct object_info root_obj;
+       unsigned char *b_data;
++      unsigned int blocksize;
+       struct adfs_sb_info *asb;
+       struct inode *root;
+       int ret = -EINVAL;
+@@ -419,8 +420,10 @@ static int adfs_fill_super(struct super_block *sb, void 
*data, int silent)
+               goto error_free_bh;
+       }
+ 
++      blocksize = 1 << dr->log2secsize;
+       brelse(bh);
+-      if (sb_set_blocksize(sb, 1 << dr->log2secsize)) {
++
++      if (sb_set_blocksize(sb, blocksize)) {
+               bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
+               if (!bh) {
+                       adfs_error(sb, "couldn't read superblock on "
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index a45f26ac5da7..d9e49705a289 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -5835,68 +5835,21 @@ static int changed_extent(struct send_ctx *sctx,
+ {
+       int ret = 0;
+ 
+-      if (sctx->cur_ino != sctx->cmp_key->objectid) {
+-
+-              if (result == BTRFS_COMPARE_TREE_CHANGED) {
+-                      struct extent_buffer *leaf_l;
+-                      struct extent_buffer *leaf_r;
+-                      struct btrfs_file_extent_item *ei_l;
+-                      struct btrfs_file_extent_item *ei_r;
+-
+-                      leaf_l = sctx->left_path->nodes[0];
+-                      leaf_r = sctx->right_path->nodes[0];
+-                      ei_l = btrfs_item_ptr(leaf_l,
+-                                            sctx->left_path->slots[0],
+-                                            struct btrfs_file_extent_item);
+-                      ei_r = btrfs_item_ptr(leaf_r,
+-                                            sctx->right_path->slots[0],
+-                                            struct btrfs_file_extent_item);
+-
+-                      /*
+-                       * We may have found an extent item that has changed
+-                       * only its disk_bytenr field and the corresponding
+-                       * inode item was not updated. This case happens due to
+-                       * very specific timings during relocation when a leaf
+-                       * that contains file extent items is COWed while
+-                       * relocation is ongoing and its in the stage where it
+-                       * updates data pointers. So when this happens we can
+-                       * safely ignore it since we know it's the same extent,
+-                       * but just at different logical and physical locations
+-                       * (when an extent is fully replaced with a new one, we
+-                       * know the generation number must have changed too,
+-                       * since snapshot creation implies committing the 
current
+-                       * transaction, and the inode item must have been 
updated
+-                       * as well).
+-                       * This replacement of the disk_bytenr happens at
+-                       * relocation.c:replace_file_extents() through
+-                       * relocation.c:btrfs_reloc_cow_block().
+-                       */
+-                      if (btrfs_file_extent_generation(leaf_l, ei_l) ==
+-                          btrfs_file_extent_generation(leaf_r, ei_r) &&
+-                          btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
+-                          btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
+-                          btrfs_file_extent_compression(leaf_l, ei_l) ==
+-                          btrfs_file_extent_compression(leaf_r, ei_r) &&
+-                          btrfs_file_extent_encryption(leaf_l, ei_l) ==
+-                          btrfs_file_extent_encryption(leaf_r, ei_r) &&
+-                          btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
+-                          btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
+-                          btrfs_file_extent_type(leaf_l, ei_l) ==
+-                          btrfs_file_extent_type(leaf_r, ei_r) &&
+-                          btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
+-                          btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
+-                          btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
+-                          btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
+-                          btrfs_file_extent_offset(leaf_l, ei_l) ==
+-                          btrfs_file_extent_offset(leaf_r, ei_r) &&
+-                          btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
+-                          btrfs_file_extent_num_bytes(leaf_r, ei_r))
+-                              return 0;
+-              }
+-
+-              inconsistent_snapshot_error(sctx, result, "extent");
+-              return -EIO;
+-      }
++      /*
++       * We have found an extent item that changed without the inode item
++       * having changed. This can happen either after relocation (where the
++       * disk_bytenr of an extent item is replaced at
++       * relocation.c:replace_file_extents()) or after deduplication into a
++       * file in both the parent and send snapshots (where an extent item can
++       * get modified or replaced with a new one). Note that deduplication
++       * updates the inode item, but it only changes the iversion (sequence
++       * field in the inode item) of the inode, so if a file is deduplicated
++       * the same amount of times in both the parent and send snapshots, its
++       * iversion becames the same in both snapshots, whence the inode item is
++       * the same on both snapshots.
++       */
++      if (sctx->cur_ino != sctx->cmp_key->objectid)
++              return 0;
+ 
+       if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
+               if (result != BTRFS_COMPARE_TREE_DELETED)
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 94b61afe996c..70aa22a8a9cc 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -5072,8 +5072,7 @@ static inline int btrfs_chunk_max_errors(struct 
map_lookup *map)
+ 
+       if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
+                        BTRFS_BLOCK_GROUP_RAID10 |
+-                       BTRFS_BLOCK_GROUP_RAID5 |
+-                       BTRFS_BLOCK_GROUP_DUP)) {
++                       BTRFS_BLOCK_GROUP_RAID5)) {
+               max_errors = 1;
+       } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
+               max_errors = 2;
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 622d5dd9f616..9bd0d928057b 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -476,7 +476,12 @@ static inline void __ceph_dir_set_complete(struct 
ceph_inode_info *ci,
+                                          long long release_count,
+                                          long long ordered_count)
+ {
+-      smp_mb__before_atomic();
++      /*
++       * Makes sure operations that setup readdir cache (update page
++       * cache and i_size) are strongly ordered w.r.t. the following
++       * atomic64_set() operations.
++       */
++      smp_mb();
+       atomic64_set(&ci->i_complete_seq[0], release_count);
+       atomic64_set(&ci->i_complete_seq[1], ordered_count);
+ }
+diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
+index 75267cdd5dfd..81144a8c0927 100644
+--- a/fs/ceph/xattr.c
++++ b/fs/ceph/xattr.c
+@@ -74,7 +74,7 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info 
*ci, char *val,
+       const char *ns_field = " pool_namespace=";
+       char buf[128];
+       size_t len, total_len = 0;
+-      int ret;
++      ssize_t ret;
+ 
+       pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
+ 
+@@ -98,11 +98,8 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info 
*ci, char *val,
+       if (pool_ns)
+               total_len += strlen(ns_field) + pool_ns->len;
+ 
+-      if (!size) {
+-              ret = total_len;
+-      } else if (total_len > size) {
+-              ret = -ERANGE;
+-      } else {
++      ret = total_len;
++      if (size >= total_len) {
+               memcpy(val, buf, len);
+               ret = len;
+               if (pool_name) {
+@@ -757,8 +754,11 @@ ssize_t __ceph_getxattr(struct inode *inode, const char 
*name, void *value,
+       vxattr = ceph_match_vxattr(inode, name);
+       if (vxattr) {
+               err = -ENODATA;
+-              if (!(vxattr->exists_cb && !vxattr->exists_cb(ci)))
++              if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
+                       err = vxattr->getxattr_cb(ci, value, size);
++                      if (size && size < err)
++                              err = -ERANGE;
++              }
+               return err;
+       }
+ 
+diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
+index 822629126e89..ff9b5cf8ff01 100644
+--- a/fs/coda/psdev.c
++++ b/fs/coda/psdev.c
+@@ -187,8 +187,11 @@ static ssize_t coda_psdev_write(struct file *file, const 
char __user *buf,
+       if (req->uc_opcode == CODA_OPEN_BY_FD) {
+               struct coda_open_by_fd_out *outp =
+                       (struct coda_open_by_fd_out *)req->uc_data;
+-              if (!outp->oh.result)
++              if (!outp->oh.result) {
+                       outp->fh = fget(outp->fd);
++                      if (!outp->fh)
++                              return -EBADF;
++              }
+       }
+ 
+         wake_up(&req->uc_sleep);
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 5138e781737a..4b207b10db03 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1057,6 +1057,24 @@ static ssize_t clear_refs_write(struct file *file, 
const char __user *buf,
+                                       count = -EINTR;
+                                       goto out_mm;
+                               }
++                              /*
++                               * Avoid to modify vma->vm_flags
++                               * without locked ops while the
++                               * coredump reads the vm_flags.
++                               */
++                              if (!mmget_still_valid(mm)) {
++                                      /*
++                                       * Silently return "count"
++                                       * like if get_task_mm()
++                                       * failed. FIXME: should this
++                                       * function have returned
++                                       * -ESRCH if get_task_mm()
++                                       * failed like if
++                                       * get_proc_task() fails?
++                                       */
++                                      up_write(&mm->mmap_sem);
++                                      goto out_mm;
++                              }
+                               for (vma = mm->mmap; vma; vma = vma->vm_next) {
+                                       vma->vm_flags &= ~VM_SOFTDIRTY;
+                                       vma_set_page_prot(vma);
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index 784d667475ae..8bf425a103f0 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -479,6 +479,8 @@ static int userfaultfd_release(struct inode *inode, struct 
file *file)
+        * taking the mmap_sem for writing.
+        */
+       down_write(&mm->mmap_sem);
++      if (!mmget_still_valid(mm))
++              goto skip_mm;
+       prev = NULL;
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               cond_resched();
+@@ -501,6 +503,7 @@ static int userfaultfd_release(struct inode *inode, struct 
file *file)
+               vma->vm_flags = new_flags;
+               vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+       }
++skip_mm:
+       up_write(&mm->mmap_sem);
+       mmput(mm);
+ wakeup:
+@@ -802,6 +805,9 @@ static int userfaultfd_register(struct userfaultfd_ctx 
*ctx,
+               goto out;
+ 
+       down_write(&mm->mmap_sem);
++      if (!mmget_still_valid(mm))
++              goto out_unlock;
++
+       vma = find_vma_prev(mm, start, &prev);
+       if (!vma)
+               goto out_unlock;
+@@ -947,6 +953,9 @@ static int userfaultfd_unregister(struct userfaultfd_ctx 
*ctx,
+               goto out;
+ 
+       down_write(&mm->mmap_sem);
++      if (!mmget_still_valid(mm))
++              goto out_unlock;
++
+       vma = find_vma_prev(mm, start, &prev);
+       if (!vma)
+               goto out_unlock;
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index ca2b4c4aec42..719eb97217a3 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -309,7 +309,10 @@ void acpi_set_irq_model(enum acpi_irq_model_id model,
+ #ifdef CONFIG_X86_IO_APIC
+ extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
+ #else
+-#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
++static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
++{
++      return -1;
++}
+ #endif
+ /*
+  * This function undoes the effect of one call to acpi_register_gsi().
+diff --git a/include/linux/coda.h b/include/linux/coda.h
+index d30209b9cef8..0ca0c83fdb1c 100644
+--- a/include/linux/coda.h
++++ b/include/linux/coda.h
+@@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without 
encumbrance.
+ #ifndef _CODA_HEADER_
+ #define _CODA_HEADER_
+ 
+-#if defined(__linux__)
+ typedef unsigned long long u_quad_t;
+-#endif
++
+ #include <uapi/linux/coda.h>
+ #endif 
+diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
+index 5b8721efa948..fe1466daf291 100644
+--- a/include/linux/coda_psdev.h
++++ b/include/linux/coda_psdev.h
+@@ -19,6 +19,17 @@ struct venus_comm {
+       struct mutex        vc_mutex;
+ };
+ 
++/* messages between coda filesystem in kernel and Venus */
++struct upc_req {
++      struct list_head        uc_chain;
++      caddr_t                 uc_data;
++      u_short                 uc_flags;
++      u_short                 uc_inSize;  /* Size is at most 5000 bytes */
++      u_short                 uc_outSize;
++      u_short                 uc_opcode;  /* copied from data to save lookup 
*/
++      int                     uc_unique;
++      wait_queue_head_t       uc_sleep;   /* process' wait queue */
++};
+ 
+ static inline struct venus_comm *coda_vcp(struct super_block *sb)
+ {
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 3050de0dac96..0020ee1cab37 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -54,6 +54,22 @@ extern void __chk_io_ptr(const volatile void __iomem *);
+ 
+ #ifdef __KERNEL__
+ 
++/*
++ * Minimal backport of compiler_attributes.h to add support for __copy
++ * to v4.9.y so that we can use it in init/exit_module to avoid
++ * -Werror=missing-attributes errors on GCC 9.
++ */
++#ifndef __has_attribute
++# define __has_attribute(x) __GCC4_has_attribute_##x
++# define __GCC4_has_attribute___copy__                0
++#endif
++
++#if __has_attribute(__copy__)
++# define __copy(symbol)                 __attribute__((__copy__(symbol)))
++#else
++# define __copy(symbol)
++#endif
++
+ #ifdef __GNUC__
+ #include <linux/compiler-gcc.h>
+ #endif
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 478466081265..ade072a6fd24 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1192,6 +1192,30 @@ void zap_page_range(struct vm_area_struct *vma, 
unsigned long address,
+               unsigned long size, struct zap_details *);
+ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
+               unsigned long start, unsigned long end);
++/*
++ * This has to be called after a get_task_mm()/mmget_not_zero()
++ * followed by taking the mmap_sem for writing before modifying the
++ * vmas or anything the coredump pretends not to change from under it.
++ *
++ * It also has to be called when mmgrab() is used in the context of
++ * the process, but then the mm_count refcount is transferred outside
++ * the context of the process to run down_write() on that pinned mm.
++ *
++ * NOTE: find_extend_vma() called from GUP context is the only place
++ * that can modify the "mm" (notably the vm_start/end) under mmap_sem
++ * for reading and outside the context of the process, so it is also
++ * the only case that holds the mmap_sem for reading that must call
++ * this function. Generally if the mmap_sem is hold for reading
++ * there's no need of this check after get_task_mm()/mmget_not_zero().
++ *
++ * This function can be obsoleted and the check can be removed, after
++ * the coredump code will hold the mmap_sem for writing before
++ * invoking the ->core_dump methods.
++ */
++static inline bool mmget_still_valid(struct mm_struct *mm)
++{
++      return likely(!mm->core_state);
++}
+ 
+ /**
+  * mm_walk - callbacks for walk_page_range
+diff --git a/include/linux/module.h b/include/linux/module.h
+index fd9e121c7b3f..99f330ae13da 100644
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -129,13 +129,13 @@ extern void cleanup_module(void);
+ #define module_init(initfn)                                   \
+       static inline initcall_t __maybe_unused __inittest(void)                
\
+       { return initfn; }                                      \
+-      int init_module(void) __attribute__((alias(#initfn)));
++      int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
+ 
+ /* This is only required if you want to be unloadable. */
+ #define module_exit(exitfn)                                   \
+       static inline exitcall_t __maybe_unused __exittest(void)                
\
+       { return exitfn; }                                      \
+-      void cleanup_module(void) __attribute__((alias(#exitfn)));
++      void cleanup_module(void) __copy(exitfn) 
__attribute__((alias(#exitfn)));
+ 
+ #endif
+ 
+diff --git a/include/uapi/linux/coda_psdev.h b/include/uapi/linux/coda_psdev.h
+index 79d05981fc4b..e2c44d2f7d5b 100644
+--- a/include/uapi/linux/coda_psdev.h
++++ b/include/uapi/linux/coda_psdev.h
+@@ -6,19 +6,6 @@
+ #define CODA_PSDEV_MAJOR 67
+ #define MAX_CODADEVS  5          /* how many do we allow */
+ 
+-
+-/* messages between coda filesystem in kernel and Venus */
+-struct upc_req {
+-      struct list_head    uc_chain;
+-      caddr_t             uc_data;
+-      u_short             uc_flags;
+-      u_short             uc_inSize;  /* Size is at most 5000 bytes */
+-      u_short             uc_outSize;
+-      u_short             uc_opcode;  /* copied from data to save lookup */
+-      int                 uc_unique;
+-      wait_queue_head_t   uc_sleep;   /* process' wait queue */
+-};
+-
+ #define CODA_REQ_ASYNC  0x1
+ #define CODA_REQ_READ   0x2
+ #define CODA_REQ_WRITE  0x4
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index d5491a880751..3f7dc5f341f7 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -369,7 +369,6 @@ static void mqueue_evict_inode(struct inode *inode)
+ {
+       struct mqueue_inode_info *info;
+       struct user_struct *user;
+-      unsigned long mq_bytes, mq_treesize;
+       struct ipc_namespace *ipc_ns;
+       struct msg_msg *msg, *nmsg;
+       LIST_HEAD(tmp_msg);
+@@ -392,16 +391,18 @@ static void mqueue_evict_inode(struct inode *inode)
+               free_msg(msg);
+       }
+ 
+-      /* Total amount of bytes accounted for the mqueue */
+-      mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+-              min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+-              sizeof(struct posix_msg_tree_node);
+-
+-      mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+-                                info->attr.mq_msgsize);
+-
+       user = info->user;
+       if (user) {
++              unsigned long mq_bytes, mq_treesize;
++
++              /* Total amount of bytes accounted for the mqueue */
++              mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
++                      min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
++                      sizeof(struct posix_msg_tree_node);
++
++              mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
++                                        info->attr.mq_msgsize);
++
+               spin_lock(&mq_lock);
+               user->mq_bytes -= mq_bytes;
+               /*
+diff --git a/kernel/module.c b/kernel/module.c
+index 2325c9821f2a..fb9e07aec49e 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -3351,8 +3351,7 @@ static bool finished_loading(const char *name)
+       sched_annotate_sleep();
+       mutex_lock(&module_mutex);
+       mod = find_module_all(name, strlen(name), true);
+-      ret = !mod || mod->state == MODULE_STATE_LIVE
+-              || mod->state == MODULE_STATE_GOING;
++      ret = !mod || mod->state == MODULE_STATE_LIVE;
+       mutex_unlock(&module_mutex);
+ 
+       return ret;
+@@ -3515,8 +3514,7 @@ again:
+       mutex_lock(&module_mutex);
+       old = find_module_all(mod->name, strlen(mod->name), true);
+       if (old != NULL) {
+-              if (old->state == MODULE_STATE_COMING
+-                  || old->state == MODULE_STATE_UNFORMED) {
++              if (old->state != MODULE_STATE_LIVE) {
+                       /* Wait in case it fails to load. */
+                       mutex_unlock(&module_mutex);
+                       err = wait_event_interruptible(module_wq,
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 0043aef0ed8d..77109b9cf733 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1631,6 +1631,11 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace 
*rec)
+       return  keep_regs;
+ }
+ 
++static struct ftrace_ops *
++ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
++static struct ftrace_ops *
++ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
++
+ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
+                                    int filter_hash,
+                                    bool inc)
+@@ -1759,15 +1764,17 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops 
*ops,
+                       }
+ 
+                       /*
+-                       * If the rec had TRAMP enabled, then it needs to
+-                       * be cleared. As TRAMP can only be enabled iff
+-                       * there is only a single ops attached to it.
+-                       * In otherwords, always disable it on decrementing.
+-                       * In the future, we may set it if rec count is
+-                       * decremented to one, and the ops that is left
+-                       * has a trampoline.
++                       * The TRAMP needs to be set only if rec count
++                       * is decremented to one, and the ops that is
++                       * left has a trampoline. As TRAMP can only be
++                       * enabled if there is only a single ops attached
++                       * to it.
+                        */
+-                      rec->flags &= ~FTRACE_FL_TRAMP;
++                      if (ftrace_rec_count(rec) == 1 &&
++                          ftrace_find_tramp_ops_any(rec))
++                              rec->flags |= FTRACE_FL_TRAMP;
++                      else
++                              rec->flags &= ~FTRACE_FL_TRAMP;
+ 
+                       /*
+                        * flags will be cleared in ftrace_check_record()
+@@ -1960,11 +1967,6 @@ static void print_ip_ins(const char *fmt, const 
unsigned char *p)
+               printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
+ }
+ 
+-static struct ftrace_ops *
+-ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
+-static struct ftrace_ops *
+-ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
+-
+ enum ftrace_bug_type ftrace_bug_type;
+ const void *ftrace_expected;
+ 
+diff --git a/mm/cma.c b/mm/cma.c
+index 4ea0f32761c1..7cb569a188c4 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -268,6 +268,12 @@ int __init cma_declare_contiguous(phys_addr_t base,
+        */
+       alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
+                         max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
++      if (fixed && base & (alignment - 1)) {
++              ret = -EINVAL;
++              pr_err("Region at %pa must be aligned to %pa bytes\n",
++                      &base, &alignment);
++              goto err;
++      }
+       base = ALIGN(base, alignment);
+       size = ALIGN(size, alignment);
+       limit &= ~(alignment - 1);
+@@ -298,6 +304,13 @@ int __init cma_declare_contiguous(phys_addr_t base,
+       if (limit == 0 || limit > memblock_end)
+               limit = memblock_end;
+ 
++      if (base + size > limit) {
++              ret = -EINVAL;
++              pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
++                      &size, &base, &limit);
++              goto err;
++      }
++
+       /* Reserve memory */
+       if (fixed) {
+               if (memblock_is_region_reserved(base, size) ||
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index e0cfc3a54b6a..8217ee5d66ef 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1004,6 +1004,9 @@ static void collapse_huge_page(struct mm_struct *mm,
+        * handled by the anon_vma lock + PG_lock.
+        */
+       down_write(&mm->mmap_sem);
++      result = SCAN_ANY_PROCESS;
++      if (!mmget_still_valid(mm))
++              goto out;
+       result = hugepage_vma_revalidate(mm, address, &vma);
+       if (result)
+               goto out;
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 3f2314ad6acd..19368fbba42a 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2448,7 +2448,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
+       vma = find_vma_prev(mm, addr, &prev);
+       if (vma && (vma->vm_start <= addr))
+               return vma;
+-      if (!prev || expand_stack(prev, addr))
++      /* don't alter vm_end if the coredump is running */
++      if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
+               return NULL;
+       if (prev->vm_flags & VM_LOCKED)
+               populate_vma_page_range(prev, addr, prev->vm_end, NULL);
+@@ -2474,6 +2475,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
+               return vma;
+       if (!(vma->vm_flags & VM_GROWSDOWN))
+               return NULL;
++      /* don't alter vm_start if the coredump is running */
++      if (!mmget_still_valid(mm))
++              return NULL;
+       start = vma->vm_start;
+       if (expand_stack(vma, addr))
+               return NULL;
+diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
+index c483de590ba3..af9cc839856f 100644
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -266,6 +266,8 @@ static int rangetr_cmp(struct hashtab *h, const void *k1, 
const void *k2)
+       return v;
+ }
+ 
++static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap);
++
+ /*
+  * Initialize a policy database structure.
+  */
+@@ -313,8 +315,10 @@ static int policydb_init(struct policydb *p)
+ out:
+       hashtab_destroy(p->filename_trans);
+       hashtab_destroy(p->range_tr);
+-      for (i = 0; i < SYM_NUM; i++)
++      for (i = 0; i < SYM_NUM; i++) {
++              hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
+               hashtab_destroy(p->symtab[i].table);
++      }
+       return rc;
+ }
+ 
+diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
+index dd4ed7c3c062..d84c28eac262 100644
+--- a/tools/objtool/elf.c
++++ b/tools/objtool/elf.c
+@@ -305,7 +305,7 @@ static int read_symbols(struct elf *elf)
+                       if (sym->type != STT_FUNC)
+                               continue;
+                       sym->pfunc = sym->cfunc = sym;
+-                      coldstr = strstr(sym->name, ".cold.");
++                      coldstr = strstr(sym->name, ".cold");
+                       if (!coldstr)
+                               continue;
+ 

Reply via email to