commit:     678f732c84b8e5770563eb32bf4f3f87041ed174
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Aug 16 12:14:29 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Oct 29 13:59:02 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=678f732c

Linux patch 4.14.139

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1138_linux-4.14.139.patch | 2094 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2098 insertions(+)

diff --git a/0000_README b/0000_README
index 2b98c17..4659ab2 100644
--- a/0000_README
+++ b/0000_README
@@ -595,6 +595,10 @@ Patch:  1137_linux-4.14.138.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.14.138
 
+Patch:  1138_linux-4.14.139.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.14.139
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1138_linux-4.14.139.patch b/1138_linux-4.14.139.patch
new file mode 100644
index 0000000..ca1dda2
--- /dev/null
+++ b/1138_linux-4.14.139.patch
@@ -0,0 +1,2094 @@
+diff --git a/Makefile b/Makefile
+index 82ae13348266..3ccf48b2714a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 138
++SUBLEVEL = 139
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
+index cd350dee4df3..efcd400b2abb 100644
+--- a/arch/arm/mach-davinci/sleep.S
++++ b/arch/arm/mach-davinci/sleep.S
+@@ -37,6 +37,7 @@
+ #define DEEPSLEEP_SLEEPENABLE_BIT     BIT(31)
+ 
+       .text
++      .arch   armv5te
+ /*
+  * Move DaVinci into deep sleep state
+  *
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index 47d45733a346..af1f065dc9f3 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -58,6 +58,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+       return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
+ }
+ 
++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
++{
++      return kvm_arch_vcpu_runnable(vcpu);
++}
++
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+ {
+       return false;
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 9f3eb334c818..94af073476ce 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1077,6 +1077,7 @@ struct kvm_x86_ops {
+       int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
+                             uint32_t guest_irq, bool set);
+       void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
++      bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
+ 
+       int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
+       void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 3a7e79f6cc77..093e7f567e69 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4637,6 +4637,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu 
*vcpu, int vec)
+               kvm_vcpu_wake_up(vcpu);
+ }
+ 
++static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
++{
++      return false;
++}
++
+ static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data 
*pi)
+ {
+       unsigned long flags;
+@@ -5746,6 +5751,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ 
+       .pmu_ops = &amd_pmu_ops,
+       .deliver_posted_interrupt = svm_deliver_avic_intr,
++      .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
+       .update_pi_irte = svm_update_pi_irte,
+       .setup_mce = svm_setup_mce,
+ };
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index ae484edcf7a3..f467d85b0352 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9431,6 +9431,11 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+       return max_irr;
+ }
+ 
++static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
++{
++      return pi_test_on(vcpu_to_pi_desc(vcpu));
++}
++
+ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+ {
+       if (!kvm_vcpu_apicv_active(vcpu))
+@@ -12756,6 +12761,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init 
= {
+       .hwapic_isr_update = vmx_hwapic_isr_update,
+       .sync_pir_to_irr = vmx_sync_pir_to_irr,
+       .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
++      .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
+ 
+       .set_tss_addr = vmx_set_tss_addr,
+       .get_tdp_level = get_ept_level,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index a8526042d176..a620936d97cf 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8711,6 +8711,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+       return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
+ }
+ 
++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
++{
++      if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
++              return true;
++
++      if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
++              kvm_test_request(KVM_REQ_SMI, vcpu) ||
++               kvm_test_request(KVM_REQ_EVENT, vcpu))
++              return true;
++
++      if (vcpu->arch.apicv_active && 
kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
++              return true;
++
++      return false;
++}
++
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+ {
+       return vcpu->arch.preempted_in_kernel;
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index b162f92fd55c..27cab342a0b2 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -260,13 +260,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, 
unsigned long address)
+ 
+       pmd = pmd_offset(pud, address);
+       pmd_k = pmd_offset(pud_k, address);
+-      if (!pmd_present(*pmd_k))
+-              return NULL;
+ 
+-      if (!pmd_present(*pmd))
++      if (pmd_present(*pmd) != pmd_present(*pmd_k))
+               set_pmd(pmd, *pmd_k);
++
++      if (!pmd_present(*pmd_k))
++              return NULL;
+       else
+-              BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
++              BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
+ 
+       return pmd_k;
+ }
+@@ -286,17 +287,13 @@ void vmalloc_sync_all(void)
+               spin_lock(&pgd_lock);
+               list_for_each_entry(page, &pgd_list, lru) {
+                       spinlock_t *pgt_lock;
+-                      pmd_t *ret;
+ 
+                       /* the pgt_lock only for Xen */
+                       pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+ 
+                       spin_lock(pgt_lock);
+-                      ret = vmalloc_sync_one(page_address(page), address);
++                      vmalloc_sync_one(page_address(page), address);
+                       spin_unlock(pgt_lock);
+-
+-                      if (!ret)
+-                              break;
+               }
+               spin_unlock(&pgd_lock);
+       }
+diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
+index ca414910710e..b0a7afd4e7d3 100644
+--- a/drivers/acpi/arm64/iort.c
++++ b/drivers/acpi/arm64/iort.c
+@@ -506,8 +506,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 
req_id,
+ 
+       /* Move to ITS specific data */
+       its = (struct acpi_iort_its_group *)node->node_data;
+-      if (idx > its->its_count) {
+-              dev_err(dev, "requested ITS ID index [%d] is greater than 
available [%d]\n",
++      if (idx >= its->its_count) {
++              dev_err(dev, "requested ITS ID index [%d] overruns ITS entries 
[%d]\n",
+                       idx, its->its_count);
+               return -ENXIO;
+       }
+diff --git a/drivers/block/drbd/drbd_receiver.c 
b/drivers/block/drbd/drbd_receiver.c
+index 1aad373da50e..8fbdfaacc222 100644
+--- a/drivers/block/drbd/drbd_receiver.c
++++ b/drivers/block/drbd/drbd_receiver.c
+@@ -5237,7 +5237,7 @@ static int drbd_do_auth(struct drbd_connection 
*connection)
+       unsigned int key_len;
+       char secret[SHARED_SECRET_MAX]; /* 64 byte */
+       unsigned int resp_size;
+-      SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
++      struct shash_desc *desc;
+       struct packet_info pi;
+       struct net_conf *nc;
+       int err, rv;
+@@ -5250,6 +5250,13 @@ static int drbd_do_auth(struct drbd_connection 
*connection)
+       memcpy(secret, nc->shared_secret, key_len);
+       rcu_read_unlock();
+ 
++      desc = kmalloc(sizeof(struct shash_desc) +
++                     crypto_shash_descsize(connection->cram_hmac_tfm),
++                     GFP_KERNEL);
++      if (!desc) {
++              rv = -1;
++              goto fail;
++      }
+       desc->tfm = connection->cram_hmac_tfm;
+       desc->flags = 0;
+ 
+@@ -5392,7 +5399,10 @@ static int drbd_do_auth(struct drbd_connection 
*connection)
+       kfree(peers_ch);
+       kfree(response);
+       kfree(right_response);
+-      shash_desc_zero(desc);
++      if (desc) {
++              shash_desc_zero(desc);
++              kfree(desc);
++      }
+ 
+       return rv;
+ }
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index bd447de4a5b8..87d7c42affbc 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -857,7 +857,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
+ 
+ static int loop_kthread_worker_fn(void *worker_ptr)
+ {
+-      current->flags |= PF_LESS_THROTTLE;
++      current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
+       return kthread_worker_fn(worker_ptr);
+ }
+ 
+diff --git a/drivers/cpufreq/pasemi-cpufreq.c 
b/drivers/cpufreq/pasemi-cpufreq.c
+index 8456492124f0..d1bdd8f62247 100644
+--- a/drivers/cpufreq/pasemi-cpufreq.c
++++ b/drivers/cpufreq/pasemi-cpufreq.c
+@@ -145,10 +145,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy 
*policy)
+       int err = -ENODEV;
+ 
+       cpu = of_get_cpu_node(policy->cpu, NULL);
++      if (!cpu)
++              goto out;
+ 
++      max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+       of_node_put(cpu);
+-      if (!cpu)
++      if (!max_freqp) {
++              err = -EINVAL;
+               goto out;
++      }
++
++      /* we need the freq in kHz */
++      max_freq = *max_freqp / 1000;
+ 
+       dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
+       if (!dn)
+@@ -185,16 +193,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy 
*policy)
+       }
+ 
+       pr_debug("init cpufreq on CPU %d\n", policy->cpu);
+-
+-      max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+-      if (!max_freqp) {
+-              err = -EINVAL;
+-              goto out_unmap_sdcpwr;
+-      }
+-
+-      /* we need the freq in kHz */
+-      max_freq = *max_freqp / 1000;
+-
+       pr_debug("max clock-frequency is at %u kHz\n", max_freq);
+       pr_debug("initializing frequency table\n");
+ 
+@@ -212,9 +210,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy 
*policy)
+ 
+       return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
+ 
+-out_unmap_sdcpwr:
+-      iounmap(sdcpwr_mapbase);
+-
+ out_unmap_sdcasr:
+       iounmap(sdcasr_mapbase);
+ out:
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c 
b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+index 52313524a4dd..2ab97ecd9a08 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+@@ -63,6 +63,19 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, 
const u8 *key,
+ static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
+                                  unsigned int authsize)
+ {
++      switch (authsize) {
++      case 16:
++      case 15:
++      case 14:
++      case 13:
++      case 12:
++      case 8:
++      case 4:
++              break;
++      default:
++              return -EINVAL;
++      }
++
+       return 0;
+ }
+ 
+@@ -109,6 +122,7 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, 
bool encrypt)
+       memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+       INIT_LIST_HEAD(&rctx->cmd.entry);
+       rctx->cmd.engine = CCP_ENGINE_AES;
++      rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm);
+       rctx->cmd.u.aes.type = ctx->u.aes.type;
+       rctx->cmd.u.aes.mode = ctx->u.aes.mode;
+       rctx->cmd.u.aes.action = encrypt;
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index 73e49840305b..1e2e42106dee 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -178,14 +178,18 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea 
*wa,
+       return 0;
+ }
+ 
+-static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int 
wa_offset,
+-                          struct scatterlist *sg, unsigned int sg_offset,
+-                          unsigned int len)
++static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
++                         struct scatterlist *sg, unsigned int sg_offset,
++                         unsigned int len)
+ {
+       WARN_ON(!wa->address);
+ 
++      if (len > (wa->length - wa_offset))
++              return -EINVAL;
++
+       scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
+                                0);
++      return 0;
+ }
+ 
+ static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int 
wa_offset,
+@@ -205,8 +209,11 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea 
*wa,
+                                  unsigned int len)
+ {
+       u8 *p, *q;
++      int     rc;
+ 
+-      ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
++      rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
++      if (rc)
++              return rc;
+ 
+       p = wa->address + wa_offset;
+       q = p + len - 1;
+@@ -509,7 +516,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue 
*cmd_q,
+               return ret;
+ 
+       dm_offset = CCP_SB_BYTES - aes->key_len;
+-      ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++      ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++      if (ret)
++              goto e_key;
+       ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+                            CCP_PASSTHRU_BYTESWAP_256BIT);
+       if (ret) {
+@@ -528,7 +537,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue 
*cmd_q,
+               goto e_key;
+ 
+       dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
+-      ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++      ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++      if (ret)
++              goto e_ctx;
+       ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+                            CCP_PASSTHRU_BYTESWAP_256BIT);
+       if (ret) {
+@@ -556,8 +567,10 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue 
*cmd_q,
+                               goto e_src;
+                       }
+ 
+-                      ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
+-                                      aes->cmac_key_len);
++                      ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
++                                            aes->cmac_key_len);
++                      if (ret)
++                              goto e_src;
+                       ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+                                            CCP_PASSTHRU_BYTESWAP_256BIT);
+                       if (ret) {
+@@ -612,6 +625,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ 
+       unsigned long long *final;
+       unsigned int dm_offset;
++      unsigned int authsize;
+       unsigned int jobid;
+       unsigned int ilen;
+       bool in_place = true; /* Default value */
+@@ -633,6 +647,21 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue 
*cmd_q,
+       if (!aes->key) /* Gotta have a key SGL */
+               return -EINVAL;
+ 
++      /* Zero defaults to 16 bytes, the maximum size */
++      authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
++      switch (authsize) {
++      case 16:
++      case 15:
++      case 14:
++      case 13:
++      case 12:
++      case 8:
++      case 4:
++              break;
++      default:
++              return -EINVAL;
++      }
++
+       /* First, decompose the source buffer into AAD & PT,
+        * and the destination buffer into AAD, CT & tag, or
+        * the input into CT & tag.
+@@ -647,7 +676,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+               p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
+       } else {
+               /* Input length for decryption includes tag */
+-              ilen = aes->src_len - AES_BLOCK_SIZE;
++              ilen = aes->src_len - authsize;
+               p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
+       }
+ 
+@@ -669,7 +698,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+               return ret;
+ 
+       dm_offset = CCP_SB_BYTES - aes->key_len;
+-      ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++      ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++      if (ret)
++              goto e_key;
+       ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+                            CCP_PASSTHRU_BYTESWAP_256BIT);
+       if (ret) {
+@@ -688,7 +719,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+               goto e_key;
+ 
+       dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
+-      ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++      ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++      if (ret)
++              goto e_ctx;
+ 
+       ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+                            CCP_PASSTHRU_BYTESWAP_256BIT);
+@@ -752,8 +785,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+               while (src.sg_wa.bytes_left) {
+                       ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
+                       if (!src.sg_wa.bytes_left) {
+-                              unsigned int nbytes = aes->src_len
+-                                                    % AES_BLOCK_SIZE;
++                              unsigned int nbytes = ilen % AES_BLOCK_SIZE;
+ 
+                               if (nbytes) {
+                                       op.eom = 1;
+@@ -780,7 +812,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+               goto e_dst;
+       }
+ 
+-      ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++      ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++      if (ret)
++              goto e_dst;
+ 
+       ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+                            CCP_PASSTHRU_BYTESWAP_256BIT);
+@@ -823,17 +857,19 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue 
*cmd_q,
+ 
+       if (aes->action == CCP_AES_ACTION_ENCRYPT) {
+               /* Put the ciphered tag after the ciphertext. */
+-              ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
++              ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
+       } else {
+               /* Does this ciphered tag match the input? */
+-              ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
++              ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
+                                          DMA_BIDIRECTIONAL);
+               if (ret)
+                       goto e_tag;
+-              ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
++              ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
++              if (ret)
++                      goto e_tag;
+ 
+               ret = crypto_memneq(tag.address, final_wa.address,
+-                                  AES_BLOCK_SIZE) ? -EBADMSG : 0;
++                                  authsize) ? -EBADMSG : 0;
+               ccp_dm_free(&tag);
+       }
+ 
+@@ -841,11 +877,11 @@ e_tag:
+       ccp_dm_free(&final_wa);
+ 
+ e_dst:
+-      if (aes->src_len && !in_place)
++      if (ilen > 0 && !in_place)
+               ccp_free_data(&dst, cmd_q);
+ 
+ e_src:
+-      if (aes->src_len)
++      if (ilen > 0)
+               ccp_free_data(&src, cmd_q);
+ 
+ e_aad:
+@@ -925,7 +961,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, 
struct ccp_cmd *cmd)
+               return ret;
+ 
+       dm_offset = CCP_SB_BYTES - aes->key_len;
+-      ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++      ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++      if (ret)
++              goto e_key;
+       ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+                            CCP_PASSTHRU_BYTESWAP_256BIT);
+       if (ret) {
+@@ -946,7 +984,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, 
struct ccp_cmd *cmd)
+       if (aes->mode != CCP_AES_MODE_ECB) {
+               /* Load the AES context - convert to LE */
+               dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
+-              ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++              ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++              if (ret)
++                      goto e_ctx;
+               ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+                                    CCP_PASSTHRU_BYTESWAP_256BIT);
+               if (ret) {
+@@ -1124,8 +1164,12 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue 
*cmd_q,
+                * big endian to little endian.
+                */
+               dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
+-              ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+-              ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
++              ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, 
xts->key_len);
++              if (ret)
++                      goto e_key;
++              ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, 
xts->key_len);
++              if (ret)
++                      goto e_key;
+       } else {
+               /* Version 5 CCPs use a 512-bit space for the key: each portion
+                * occupies 256 bits, or one entire slot, and is zero-padded.
+@@ -1134,9 +1178,13 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue 
*cmd_q,
+ 
+               dm_offset = CCP_SB_BYTES;
+               pad = dm_offset - xts->key_len;
+-              ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
+-              ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
+-                              xts->key_len);
++              ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
++              if (ret)
++                      goto e_key;
++              ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
++                                    xts->key_len, xts->key_len);
++              if (ret)
++                      goto e_key;
+       }
+       ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+                            CCP_PASSTHRU_BYTESWAP_256BIT);
+@@ -1155,7 +1203,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue 
*cmd_q,
+       if (ret)
+               goto e_key;
+ 
+-      ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
++      ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
++      if (ret)
++              goto e_ctx;
+       ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+                            CCP_PASSTHRU_BYTESWAP_NOOP);
+       if (ret) {
+@@ -1298,12 +1348,18 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue 
*cmd_q, struct ccp_cmd *cmd)
+       dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
+ 
+       len_singlekey = des3->key_len / 3;
+-      ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
+-                      des3->key, 0, len_singlekey);
+-      ccp_set_dm_area(&key, dm_offset + len_singlekey,
+-                      des3->key, len_singlekey, len_singlekey);
+-      ccp_set_dm_area(&key, dm_offset,
+-                      des3->key, 2 * len_singlekey, len_singlekey);
++      ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
++                            des3->key, 0, len_singlekey);
++      if (ret)
++              goto e_key;
++      ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
++                            des3->key, len_singlekey, len_singlekey);
++      if (ret)
++              goto e_key;
++      ret = ccp_set_dm_area(&key, dm_offset,
++                            des3->key, 2 * len_singlekey, len_singlekey);
++      if (ret)
++              goto e_key;
+ 
+       /* Copy the key to the SB */
+       ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+@@ -1331,7 +1387,10 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue 
*cmd_q, struct ccp_cmd *cmd)
+ 
+               /* Load the context into the LSB */
+               dm_offset = CCP_SB_BYTES - des3->iv_len;
+-              ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len);
++              ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
++                                    des3->iv_len);
++              if (ret)
++                      goto e_ctx;
+ 
+               if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+                       load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
+@@ -1615,8 +1674,10 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, 
struct ccp_cmd *cmd)
+               }
+       } else {
+               /* Restore the context */
+-              ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
+-                              sb_count * CCP_SB_BYTES);
++              ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
++                                    sb_count * CCP_SB_BYTES);
++              if (ret)
++                      goto e_ctx;
+       }
+ 
+       ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+@@ -1938,7 +1999,9 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue 
*cmd_q,
+               if (ret)
+                       return ret;
+ 
+-              ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
++              ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
++              if (ret)
++                      goto e_mask;
+               ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
+                                    CCP_PASSTHRU_BYTESWAP_NOOP);
+               if (ret) {
+diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
+index 6e4ed5a9c6fd..42c4ff75281b 100644
+--- a/drivers/firmware/Kconfig
++++ b/drivers/firmware/Kconfig
+@@ -156,7 +156,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
+ 
+ config ISCSI_IBFT_FIND
+       bool "iSCSI Boot Firmware Table Attributes"
+-      depends on X86 && ACPI
++      depends on X86 && ISCSI_IBFT
+       default n
+       help
+         This option enables the kernel to find the region of memory
+@@ -167,7 +167,8 @@ config ISCSI_IBFT_FIND
+ config ISCSI_IBFT
+       tristate "iSCSI Boot Firmware Table Attributes module"
+       select ISCSI_BOOT_SYSFS
+-      depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
++      select ISCSI_IBFT_FIND if X86
++      depends on ACPI && SCSI && SCSI_LOWLEVEL
+       default n
+       help
+         This option enables support for detection and exposing of iSCSI
+diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
+index 132b9bae4b6a..220bbc91cebd 100644
+--- a/drivers/firmware/iscsi_ibft.c
++++ b/drivers/firmware/iscsi_ibft.c
+@@ -93,6 +93,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT 
information");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION(IBFT_ISCSI_VERSION);
+ 
++#ifndef CONFIG_ISCSI_IBFT_FIND
++struct acpi_table_ibft *ibft_addr;
++#endif
++
+ struct ibft_hdr {
+       u8 id;
+       u8 version;
+diff --git a/drivers/gpu/drm/drm_framebuffer.c 
b/drivers/gpu/drm/drm_framebuffer.c
+index c21e10c780ac..af40189cdb60 100644
+--- a/drivers/gpu/drm/drm_framebuffer.c
++++ b/drivers/gpu/drm/drm_framebuffer.c
+@@ -773,7 +773,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
+       struct drm_device *dev = fb->dev;
+       struct drm_atomic_state *state;
+       struct drm_plane *plane;
+-      struct drm_connector *conn;
++      struct drm_connector *conn __maybe_unused;
+       struct drm_connector_state *conn_state;
+       int i, ret = 0;
+       unsigned plane_mask;
+diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c 
b/drivers/gpu/drm/i915/intel_dsi_pll.c
+index 2ff2ee7f3b78..03c592753fc3 100644
+--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
++++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
+@@ -422,8 +422,8 @@ static void glk_dsi_program_esc_clock(struct drm_device 
*dev,
+       else
+               txesc2_div = 10;
+ 
+-      I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
+-      I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
++      I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & 
GLK_TX_ESC_CLK_DIV1_MASK);
++      I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & 
GLK_TX_ESC_CLK_DIV2_MASK);
+ }
+ 
+ /* Program BXT Mipi clocks and dividers */
+diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
+index d03203a82e8f..51f7bcd799fa 100644
+--- a/drivers/hid/hid-sony.c
++++ b/drivers/hid/hid-sony.c
+@@ -578,10 +578,14 @@ static void sony_set_leds(struct sony_sc *sc);
+ static inline void sony_schedule_work(struct sony_sc *sc,
+                                     enum sony_worker which)
+ {
++      unsigned long flags;
++
+       switch (which) {
+       case SONY_WORKER_STATE:
+-              if (!sc->defer_initialization)
++              spin_lock_irqsave(&sc->lock, flags);
++              if (!sc->defer_initialization && sc->state_worker_initialized)
+                       schedule_work(&sc->state_worker);
++              spin_unlock_irqrestore(&sc->lock, flags);
+               break;
+       case SONY_WORKER_HOTPLUG:
+               if (sc->hotplug_worker_initialized)
+@@ -2488,13 +2492,18 @@ static inline void sony_init_output_report(struct 
sony_sc *sc,
+ 
+ static inline void sony_cancel_work_sync(struct sony_sc *sc)
+ {
++      unsigned long flags;
++
+       if (sc->hotplug_worker_initialized)
+               cancel_work_sync(&sc->hotplug_worker);
+-      if (sc->state_worker_initialized)
++      if (sc->state_worker_initialized) {
++              spin_lock_irqsave(&sc->lock, flags);
++              sc->state_worker_initialized = 0;
++              spin_unlock_irqrestore(&sc->lock, flags);
+               cancel_work_sync(&sc->state_worker);
++      }
+ }
+ 
+-
+ static int sony_input_configured(struct hid_device *hdev,
+                                       struct hid_input *hidinput)
+ {
+diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
+index ca9941fa741b..7e14143ed119 100644
+--- a/drivers/hwmon/nct6775.c
++++ b/drivers/hwmon/nct6775.c
+@@ -769,7 +769,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 
0x131 };
+ static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
+ static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
+ static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
+-static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
++static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
+ static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
+ static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
+ 
+@@ -3592,6 +3592,7 @@ static int nct6775_probe(struct platform_device *pdev)
+               data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
+               data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
+               data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
++              data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
+               data->REG_PWM[0] = NCT6106_REG_PWM;
+               data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
+               data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
+diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
+index 2876c18ed841..38ffbdb0a85f 100644
+--- a/drivers/hwmon/nct7802.c
++++ b/drivers/hwmon/nct7802.c
+@@ -768,7 +768,7 @@ static struct attribute *nct7802_in_attrs[] = {
+       &sensor_dev_attr_in3_alarm.dev_attr.attr,
+       &sensor_dev_attr_in3_beep.dev_attr.attr,
+ 
+-      &sensor_dev_attr_in4_input.dev_attr.attr,       /* 17 */
++      &sensor_dev_attr_in4_input.dev_attr.attr,       /* 16 */
+       &sensor_dev_attr_in4_min.dev_attr.attr,
+       &sensor_dev_attr_in4_max.dev_attr.attr,
+       &sensor_dev_attr_in4_alarm.dev_attr.attr,
+@@ -794,9 +794,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
+ 
+       if (index >= 6 && index < 11 && (reg & 0x03) != 0x03)   /* VSEN1 */
+               return 0;
+-      if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c)  /* VSEN2 */
++      if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c)  /* VSEN2 */
+               return 0;
+-      if (index >= 17 && (reg & 0x30) != 0x30)                /* VSEN3 */
++      if (index >= 16 && (reg & 0x30) != 0x30)                /* VSEN3 */
+               return 0;
+ 
+       return attr->mode;
+diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
+index b1dd17cbce58..f8f298c33b28 100644
+--- a/drivers/iio/adc/max9611.c
++++ b/drivers/iio/adc/max9611.c
+@@ -86,7 +86,7 @@
+ #define MAX9611_TEMP_MAX_POS          0x7f80
+ #define MAX9611_TEMP_MAX_NEG          0xff80
+ #define MAX9611_TEMP_MIN_NEG          0xd980
+-#define MAX9611_TEMP_MASK             GENMASK(7, 15)
++#define MAX9611_TEMP_MASK             GENMASK(15, 7)
+ #define MAX9611_TEMP_SHIFT            0x07
+ #define MAX9611_TEMP_RAW(_r)          ((_r) >> MAX9611_TEMP_SHIFT)
+ #define MAX9611_TEMP_SCALE_NUM                1000000
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 7c8d4baf647b..7db53eab7012 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -185,6 +185,7 @@ static const char * const smbus_pnp_ids[] = {
+       "LEN2055", /* E580 */
+       "SYN3052", /* HP EliteBook 840 G4 */
+       "SYN3221", /* HP 15-ay000 */
++      "SYN323d", /* HP Spectre X360 13-w013dx */
+       NULL
+ };
+ 
+diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
+index ad0e64fdba34..76f6a4f628b3 100644
+--- a/drivers/misc/Makefile
++++ b/drivers/misc/Makefile
+@@ -69,8 +69,7 @@ KCOV_INSTRUMENT_lkdtm_rodata.o       := n
+ 
+ OBJCOPYFLAGS :=
+ OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
+-                      --set-section-flags .text=alloc,readonly \
+-                      --rename-section .text=.rodata
++      --rename-section .text=.rodata,alloc,readonly,load
+ targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o
+ $(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE
+       $(call if_changed,objcopy)
+diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
+index fbd29f00fca0..d76fea1098e2 100644
+--- a/drivers/mmc/host/cavium.c
++++ b/drivers/mmc/host/cavium.c
+@@ -374,6 +374,7 @@ static int finish_dma_single(struct cvm_mmc_host *host, 
struct mmc_data *data)
+ {
+       data->bytes_xfered = data->blocks * data->blksz;
+       data->error = 0;
++      dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
+       return 1;
+ }
+ 
+@@ -1046,7 +1047,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct 
cvm_mmc_host *host)
+               mmc->max_segs = 1;
+ 
+       /* DMA size field can address up to 8 MB */
+-      mmc->max_seg_size = 8 * 1024 * 1024;
++      mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
++                                dma_get_max_seg_size(host->dev));
+       mmc->max_req_size = mmc->max_seg_size;
+       /* External DMA is in 512 byte blocks */
+       mmc->max_blk_size = 512;
+diff --git a/drivers/net/can/rcar/rcar_canfd.c 
b/drivers/net/can/rcar/rcar_canfd.c
+index 602c19e23f05..786d852a70d5 100644
+--- a/drivers/net/can/rcar/rcar_canfd.c
++++ b/drivers/net/can/rcar/rcar_canfd.c
+@@ -1512,10 +1512,11 @@ static int rcar_canfd_rx_poll(struct napi_struct 
*napi, int quota)
+ 
+       /* All packets processed */
+       if (num_pkts < quota) {
+-              napi_complete_done(napi, num_pkts);
+-              /* Enable Rx FIFO interrupts */
+-              rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+-                                 RCANFD_RFCC_RFIE);
++              if (napi_complete_done(napi, num_pkts)) {
++                      /* Enable Rx FIFO interrupts */
++                      rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
++                                         RCANFD_RFCC_RFIE);
++              }
+       }
+       return num_pkts;
+ }
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c 
b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index 1ca76e03e965..d68c79f9a4b9 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -594,16 +594,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
+       dev->state &= ~PCAN_USB_STATE_STARTED;
+       netif_stop_queue(netdev);
+ 
++      close_candev(netdev);
++
++      dev->can.state = CAN_STATE_STOPPED;
++
+       /* unlink all pending urbs and free used memory */
+       peak_usb_unlink_all_urbs(dev);
+ 
+       if (dev->adapter->dev_stop)
+               dev->adapter->dev_stop(dev);
+ 
+-      close_candev(netdev);
+-
+-      dev->can.state = CAN_STATE_STOPPED;
+-
+       /* can set bus off now */
+       if (dev->adapter->dev_set_bus) {
+               int err = dev->adapter->dev_set_bus(dev, 0);
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c 
b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+index 53d6bb045e9e..773fc15ac3ab 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+@@ -852,7 +852,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
+                       goto err_out;
+ 
+               /* allocate command buffer once for all for the interface */
+-              pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
++              pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
+                                               GFP_KERNEL);
+               if (!pdev->cmd_buffer_addr)
+                       goto err_out_1;
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c 
b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+index bbdd6058cd2f..d85fdc6949c6 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+@@ -500,7 +500,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device 
*dev, int loaded)
+       u8 *buffer;
+       int err;
+ 
+-      buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
++      buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c 
b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index e9e466cae322..534c0ea7b232 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -778,7 +778,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+ 
+       for (i = 0; i < n_profiles; i++) {
+               /* the tables start at element 3 */
+-              static int pos = 3;
++              int pos = 3;
+ 
+               /* The EWRD profiles officially go from 2 to 4, but we
+                * save them in sar_profiles[1-3] (because we don't
+@@ -912,6 +912,22 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int 
prof_a, int prof_b)
+       return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+ }
+ 
++static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
++{
++      /*
++       * The GEO_TX_POWER_LIMIT command is not supported on earlier
++       * firmware versions.  Unfortunately, we don't have a TLV API
++       * flag to rely on, so rely on the major version which is in
++       * the first byte of ucode_ver.  This was implemented
++       * initially on version 38 and then backported to 36, 29 and
++       * 17.
++       */
++      return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
++             IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
++             IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
++             IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
++}
++
+ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+ {
+       struct iwl_geo_tx_power_profiles_resp *resp;
+@@ -927,6 +943,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+               .data = { &geo_cmd },
+       };
+ 
++      if (!iwl_mvm_sar_geo_support(mvm))
++              return -EOPNOTSUPP;
++
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (ret) {
+               IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
+@@ -952,13 +971,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+       int ret, i, j;
+       u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
+ 
+-      /*
+-       * This command is not supported on earlier firmware versions.
+-       * Unfortunately, we don't have a TLV API flag to rely on, so
+-       * rely on the major version which is in the first byte of
+-       * ucode_ver.
+-       */
+-      if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
++      if (!iwl_mvm_sar_geo_support(mvm))
+               return 0;
+ 
+       ret = iwl_mvm_sar_get_wgds_table(mvm);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c 
b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+index 4704137a26e0..c3a2e6b6da65 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+@@ -401,6 +401,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
+                                        DMA_TO_DEVICE);
+       }
+ 
++      meta->tbs = 0;
++
+       if (trans->cfg->use_tfh) {
+               struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.h 
b/drivers/net/wireless/marvell/mwifiex/main.h
+index a76bd797e454..597af4e66325 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.h
++++ b/drivers/net/wireless/marvell/mwifiex/main.h
+@@ -122,6 +122,7 @@ enum {
+ 
+ #define MWIFIEX_MAX_TOTAL_SCAN_TIME   (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
+ 
++#define WPA_GTK_OUI_OFFSET                            2
+ #define RSN_GTK_OUI_OFFSET                            2
+ 
+ #define MWIFIEX_OUI_NOT_PRESENT                       0
+diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c 
b/drivers/net/wireless/marvell/mwifiex/scan.c
+index 29284f9a0646..67c334221077 100644
+--- a/drivers/net/wireless/marvell/mwifiex/scan.c
++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
+@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor 
*bss_desc, u32 cipher)
+       u8 ret = MWIFIEX_OUI_NOT_PRESENT;
+ 
+       if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
+-              iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
++              iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
++                                          WPA_GTK_OUI_OFFSET);
+               oui = &mwifiex_wpa_oui[cipher][0];
+               ret = mwifiex_search_oui_in_ie(iebody, oui);
+               if (ret)
+diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
+index ab8dd81fbc2b..1a40c73961b8 100644
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -1577,13 +1577,13 @@ static int handle_outbound(struct qdio_q *q, unsigned 
int callflags,
+               rc = qdio_kick_outbound_q(q, phys_aob);
+       } else if (need_siga_sync(q)) {
+               rc = qdio_siga_sync_q(q);
++      } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
++                 get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
++                 state == SLSB_CU_OUTPUT_PRIMED) {
++              /* The previous buffer is not processed yet, tack on. */
++              qperf_inc(q, fast_requeue);
+       } else {
+-              /* try to fast requeue buffers */
+-              get_buf_state(q, prev_buf(bufnr), &state, 0);
+-              if (state != SLSB_CU_OUTPUT_PRIMED)
+-                      rc = qdio_kick_outbound_q(q, 0);
+-              else
+-                      qperf_inc(q, fast_requeue);
++              rc = qdio_kick_outbound_q(q, 0);
+       }
+ 
+       /* in case of SIGA errors we must process the error immediately */
+diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
+index 1419eaea03d8..5a9e457caef3 100644
+--- a/drivers/s390/cio/vfio_ccw_cp.c
++++ b/drivers/s390/cio/vfio_ccw_cp.c
+@@ -119,8 +119,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, 
struct device *mdev,
+                                 sizeof(*pa->pa_iova_pfn) +
+                                 sizeof(*pa->pa_pfn),
+                                 GFP_KERNEL);
+-      if (unlikely(!pa->pa_iova_pfn))
++      if (unlikely(!pa->pa_iova_pfn)) {
++              pa->pa_nr = 0;
+               return -ENOMEM;
++      }
+       pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
+ 
+       ret = pfn_array_pin(pa, mdev);
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c 
b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 09c6a16fab93..41f5f6410163 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -53,6 +53,7 @@
+ #define ALUA_FAILOVER_TIMEOUT         60
+ #define ALUA_FAILOVER_RETRIES         5
+ #define ALUA_RTPG_DELAY_MSECS         5
++#define ALUA_RTPG_RETRY_DELAY         2
+ 
+ /* device handler flags */
+ #define ALUA_OPTIMIZE_STPG            0x01
+@@ -677,7 +678,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct 
alua_port_group *pg)
+       case SCSI_ACCESS_STATE_TRANSITIONING:
+               if (time_before(jiffies, pg->expiry)) {
+                       /* State transition, retry */
+-                      pg->interval = 2;
++                      pg->interval = ALUA_RTPG_RETRY_DELAY;
+                       err = SCSI_DH_RETRY;
+               } else {
+                       struct alua_dh_data *h;
+@@ -802,6 +803,8 @@ static void alua_rtpg_work(struct work_struct *work)
+                               spin_lock_irqsave(&pg->lock, flags);
+                               pg->flags &= ~ALUA_PG_RUNNING;
+                               pg->flags |= ALUA_PG_RUN_RTPG;
++                              if (!pg->interval)
++                                      pg->interval = ALUA_RTPG_RETRY_DELAY;
+                               spin_unlock_irqrestore(&pg->lock, flags);
+                               queue_delayed_work(kaluad_wq, &pg->rtpg_work,
+                                                  pg->interval * HZ);
+@@ -813,6 +816,8 @@ static void alua_rtpg_work(struct work_struct *work)
+               spin_lock_irqsave(&pg->lock, flags);
+               if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
+                       pg->flags &= ~ALUA_PG_RUNNING;
++                      if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
++                              pg->interval = ALUA_RTPG_RETRY_DELAY;
+                       pg->flags |= ALUA_PG_RUN_RTPG;
+                       spin_unlock_irqrestore(&pg->lock, flags);
+                       queue_delayed_work(kaluad_wq, &pg->rtpg_work,
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index a06b24a61622..34612add3829 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -4876,8 +4876,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
+ 
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       ibmvfc_purge_requests(vhost, DID_ERROR);
+-      ibmvfc_free_event_pool(vhost);
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
++      ibmvfc_free_event_pool(vhost);
+ 
+       ibmvfc_free_mem(vhost);
+       spin_lock(&ibmvfc_driver_lock);
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c 
b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 73acd3e9ded7..8595d83229b7 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -2976,6 +2976,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
+       u32 size;
+       unsigned long buff_addr;
+       unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
++      unsigned long chunk_left_bytes;
+       unsigned long src_addr;
+       unsigned long flags;
+       u32 buff_offset;
+@@ -3001,6 +3002,8 @@ megasas_fw_crash_buffer_show(struct device *cdev,
+       }
+ 
+       size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
++      chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
++      size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
+       size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
+ 
+       src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
+diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
+index 5c2cec298816..c6ce34161281 100644
+--- a/drivers/tty/tty_ldsem.c
++++ b/drivers/tty/tty_ldsem.c
+@@ -139,8 +139,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
+ 
+       list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
+               tsk = waiter->task;
+-              smp_mb();
+-              waiter->task = NULL;
++              smp_store_release(&waiter->task, NULL);
+               wake_up_process(tsk);
+               put_task_struct(tsk);
+       }
+@@ -235,7 +234,7 @@ down_read_failed(struct ld_semaphore *sem, long count, 
long timeout)
+       for (;;) {
+               set_current_state(TASK_UNINTERRUPTIBLE);
+ 
+-              if (!waiter.task)
++              if (!smp_load_acquire(&waiter.task))
+                       break;
+               if (!timeout)
+                       break;
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 492977f78fde..62b2a7105f02 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1811,8 +1811,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, 
struct usbdevfs_urb *uurb
+       return 0;
+ 
+  error:
+-      if (as && as->usbm)
+-              dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
+       kfree(isopkt);
+       kfree(dr);
+       if (as)
+diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
+index 425c2edfd6ea..544e03452877 100644
+--- a/drivers/usb/host/xhci-rcar.c
++++ b/drivers/usb/host/xhci-rcar.c
+@@ -231,10 +231,15 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
+        * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+        * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+        * xhci_gen_setup().
++       *
++       * And, since the firmware/internal CPU control the USBSTS.STS_HALT
++       * and the process speed is down when the roothub port enters U3,
++       * long delay for the handshake of STS_HALT is neeed in xhci_suspend().
+        */
+       if (xhci_rcar_is_gen2(hcd->self.controller) ||
+-                      xhci_rcar_is_gen3(hcd->self.controller))
+-              xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
++                      xhci_rcar_is_gen3(hcd->self.controller)) {
++              xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND;
++      }
+ 
+       xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+       return xhci_rcar_download_firmware(hcd);
+diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
+index be5881303681..43bee6dad5c9 100644
+--- a/drivers/usb/misc/iowarrior.c
++++ b/drivers/usb/misc/iowarrior.c
+@@ -870,19 +870,20 @@ static void iowarrior_disconnect(struct usb_interface 
*interface)
+       dev = usb_get_intfdata(interface);
+       mutex_lock(&iowarrior_open_disc_lock);
+       usb_set_intfdata(interface, NULL);
++      /* prevent device read, write and ioctl */
++      dev->present = 0;
+ 
+       minor = dev->minor;
++      mutex_unlock(&iowarrior_open_disc_lock);
++      /* give back our minor - this will call close() locks need to be 
dropped at this point*/
+ 
+-      /* give back our minor */
+       usb_deregister_dev(interface, &iowarrior_class);
+ 
+       mutex_lock(&dev->mutex);
+ 
+       /* prevent device read, write and ioctl */
+-      dev->present = 0;
+ 
+       mutex_unlock(&dev->mutex);
+-      mutex_unlock(&iowarrior_open_disc_lock);
+ 
+       if (dev->opened) {
+               /* There is a process that holds a filedescriptor to the device 
,
+diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
+index 8ee98bc6c468..081570677f24 100644
+--- a/drivers/usb/misc/yurex.c
++++ b/drivers/usb/misc/yurex.c
+@@ -96,7 +96,6 @@ static void yurex_delete(struct kref *kref)
+ 
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ 
+-      usb_put_dev(dev->udev);
+       if (dev->cntl_urb) {
+               usb_kill_urb(dev->cntl_urb);
+               kfree(dev->cntl_req);
+@@ -112,6 +111,7 @@ static void yurex_delete(struct kref *kref)
+                               dev->int_buffer, dev->urb->transfer_dma);
+               usb_free_urb(dev->urb);
+       }
++      usb_put_dev(dev->udev);
+       kfree(dev);
+ }
+ 
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index fd2d199dd413..0e1c36c92f60 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -166,7 +166,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
+       if (tcon == NULL)
+               return 0;
+ 
+-      if (smb2_command == SMB2_TREE_CONNECT)
++      if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
+               return 0;
+ 
+       if (tcon->tidStatus == CifsExiting) {
+@@ -834,7 +834,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
+       else
+               req->SecurityMode = 0;
+ 
++#ifdef CONFIG_CIFS_DFS_UPCALL
++      req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
++#else
+       req->Capabilities = 0;
++#endif /* DFS_UPCALL */
++
+       req->Channel = 0; /* MBZ */
+ 
+       sess_data->iov[0].iov_base = (char *)req;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 27deee5c8fa8..6409ff4876cb 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2954,7 +2954,6 @@ static int _nfs4_do_setattr(struct inode *inode,
+       };
+       struct rpc_cred *delegation_cred = NULL;
+       unsigned long timestamp = jiffies;
+-      fmode_t fmode;
+       bool truncate;
+       int status;
+ 
+@@ -2962,11 +2961,12 @@ static int _nfs4_do_setattr(struct inode *inode,
+ 
+       /* Servers should only apply open mode checks for file size changes */
+       truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
+-      fmode = truncate ? FMODE_WRITE : FMODE_READ;
++      if (!truncate)
++              goto zero_stateid;
+ 
+-      if (nfs4_copy_delegation_stateid(inode, fmode, &arg->stateid, 
&delegation_cred)) {
++      if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, 
&delegation_cred)) {
+               /* Use that stateid */
+-      } else if (truncate && ctx != NULL) {
++      } else if (ctx != NULL && ctx->state) {
+               struct nfs_lock_context *l_ctx;
+               if (!nfs4_valid_open_stateid(ctx->state))
+                       return -EBADF;
+@@ -2978,8 +2978,10 @@ static int _nfs4_do_setattr(struct inode *inode,
+               nfs_put_lock_context(l_ctx);
+               if (status == -EIO)
+                       return -EBADF;
+-      } else
++      } else {
++zero_stateid:
+               nfs4_stateid_copy(&arg->stateid, &zero_stateid);
++      }
+       if (delegation_cred)
+               msg.rpc_cred = delegation_cred;
+ 
+diff --git a/include/linux/ccp.h b/include/linux/ccp.h
+index 7e9c991c95e0..43ed9e77cf81 100644
+--- a/include/linux/ccp.h
++++ b/include/linux/ccp.h
+@@ -173,6 +173,8 @@ struct ccp_aes_engine {
+       enum ccp_aes_mode mode;
+       enum ccp_aes_action action;
+ 
++      u32 authsize;
++
+       struct scatterlist *key;
+       u32 key_len;            /* In bytes */
+ 
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 026615e242d8..c8b9d3519c8e 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -808,6 +808,7 @@ void kvm_arch_check_processor_compat(void *rtn);
+ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
+ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
+ 
+ #ifndef __KVM_HAVE_ARCH_VM_ALLOC
+ static inline struct kvm *kvm_arch_alloc_vm(void)
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 7994e569644e..9de2c8cdcc51 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1613,6 +1613,8 @@ static inline void tcp_init_send_head(struct sock *sk)
+       sk->sk_send_head = NULL;
+ }
+ 
++static inline void tcp_init_send_head(struct sock *sk);
++
+ /* write queue abstraction */
+ static inline void tcp_write_queue_purge(struct sock *sk)
+ {
+@@ -1621,6 +1623,7 @@ static inline void tcp_write_queue_purge(struct sock *sk)
+       tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
+       while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
+               sk_wmem_free_skb(sk, skb);
++      tcp_init_send_head(sk);
+       sk_mem_reclaim(sk);
+       tcp_clear_all_retrans_hints(tcp_sk(sk));
+       tcp_init_send_head(sk);
+diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
+index 392bac18398b..33a07c3badf0 100644
+--- a/include/sound/compress_driver.h
++++ b/include/sound/compress_driver.h
+@@ -186,10 +186,7 @@ static inline void snd_compr_drain_notify(struct 
snd_compr_stream *stream)
+       if (snd_BUG_ON(!stream))
+               return;
+ 
+-      if (stream->direction == SND_COMPRESS_PLAYBACK)
+-              stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+-      else
+-              stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
++      stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ 
+       wake_up(&stream->runtime->sleep);
+ }
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 3d4eb6f840eb..ea4f3f7a0c6f 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -10474,7 +10474,7 @@ perf_event_create_kernel_counter(struct 
perf_event_attr *attr, int cpu,
+               goto err_unlock;
+       }
+ 
+-      perf_install_in_context(ctx, event, cpu);
++      perf_install_in_context(ctx, event, event->cpu);
+       perf_unpin_context(ctx);
+       mutex_unlock(&ctx->mutex);
+ 
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index f978aebe60c5..2e5e18bbfd28 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -895,8 +895,11 @@ static int __init test_firmware_init(void)
+               return -ENOMEM;
+ 
+       rc = __test_firmware_config_init();
+-      if (rc)
++      if (rc) {
++              kfree(test_fw_config);
++              pr_err("could not init firmware test config: %d\n", rc);
+               return rc;
++      }
+ 
+       rc = misc_register(&test_fw_misc_device);
+       if (rc) {
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 6c906f6f16cc..0b8852d80f44 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1765,6 +1765,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned 
long align,
+       if (!addr)
+               return NULL;
+ 
++      /*
++       * First make sure the mappings are removed from all page-tables
++       * before they are freed.
++       */
++      vmalloc_sync_all();
++
+       /*
+        * In this function, newly allocated vm_struct has VM_UNINITIALIZED
+        * flag. It means that vm_struct is not fully initialized.
+@@ -2314,6 +2320,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
+ /*
+  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
+  * have one.
++ *
++ * The purpose of this function is to make sure the vmalloc area
++ * mappings are identical in all page-tables in the system.
+  */
+ void __weak vmalloc_sync_all(void)
+ {
+diff --git a/net/ipv4/netfilter/ipt_rpfilter.c 
b/net/ipv4/netfilter/ipt_rpfilter.c
+index 37fb9552e858..341d1bd637af 100644
+--- a/net/ipv4/netfilter/ipt_rpfilter.c
++++ b/net/ipv4/netfilter/ipt_rpfilter.c
+@@ -96,6 +96,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct 
xt_action_param *par)
+       flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
+       flow.flowi4_tos = RT_TOS(iph->tos);
+       flow.flowi4_scope = RT_SCOPE_UNIVERSE;
++      flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
+ 
+       return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), 
info->flags) ^ invert;
+ }
+diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c 
b/net/ipv6/netfilter/ip6t_rpfilter.c
+index 40eb16bd9786..d535768bea0f 100644
+--- a/net/ipv6/netfilter/ip6t_rpfilter.c
++++ b/net/ipv6/netfilter/ip6t_rpfilter.c
+@@ -58,7 +58,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const 
struct sk_buff *skb,
+       if (rpfilter_addr_linklocal(&iph->saddr)) {
+               lookup_flags |= RT6_LOOKUP_F_IFACE;
+               fl6.flowi6_oif = dev->ifindex;
+-      } else if ((flags & XT_RPFILTER_LOOSE) == 0)
++      /* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
++      } else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
++                (flags & XT_RPFILTER_LOOSE) == 0)
+               fl6.flowi6_oif = dev->ifindex;
+ 
+       rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags);
+@@ -73,7 +75,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const 
struct sk_buff *skb,
+               goto out;
+       }
+ 
+-      if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
++      if (rt->rt6i_idev->dev == dev ||
++          l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
++          (flags & XT_RPFILTER_LOOSE))
+               ret = true;
+  out:
+       ip6_rt_put(rt);
+diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
+index bb886e7db47f..f783d1377d9a 100644
+--- a/net/mac80211/driver-ops.c
++++ b/net/mac80211/driver-ops.c
+@@ -169,11 +169,16 @@ int drv_conf_tx(struct ieee80211_local *local,
+       if (!check_sdata_in_driver(sdata))
+               return -EIO;
+ 
+-      if (WARN_ONCE(params->cw_min == 0 ||
+-                    params->cw_min > params->cw_max,
+-                    "%s: invalid CW_min/CW_max: %d/%d\n",
+-                    sdata->name, params->cw_min, params->cw_max))
++      if (params->cw_min == 0 || params->cw_min > params->cw_max) {
++              /*
++               * If we can't configure hardware anyway, don't warn. We may
++               * never have initialized the CW parameters.
++               */
++              WARN_ONCE(local->ops->conf_tx,
++                        "%s: invalid CW_min/CW_max: %d/%d\n",
++                        sdata->name, params->cw_min, params->cw_max);
+               return -EINVAL;
++      }
+ 
+       trace_drv_conf_tx(local, sdata, ac, params);
+       if (local->ops->conf_tx)
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 960a57f7c983..d91db72b9e9e 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -1867,6 +1867,16 @@ static bool ieee80211_sta_wmm_params(struct 
ieee80211_local *local,
+               }
+       }
+ 
++      /* WMM specification requires all 4 ACIs. */
++      for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
++              if (params[ac].cw_min == 0) {
++                      sdata_info(sdata,
++                                 "AP has invalid WMM params (missing AC %d), 
using defaults\n",
++                                 ac);
++                      return false;
++              }
++      }
++
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               mlme_dbg(sdata,
+                        "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d 
uapsd=%d, downgraded=%d\n",
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index 733d3e4a30d8..2cee032af46d 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -530,7 +530,7 @@ static int nfnetlink_bind(struct net *net, int group)
+       ss = nfnetlink_get_subsys(type << 8);
+       rcu_read_unlock();
+       if (!ss)
+-              request_module("nfnetlink-subsys-%d", type);
++              request_module_nowait("nfnetlink-subsys-%d", type);
+       return 0;
+ }
+ #endif
+diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
+index 24f2f7567ddb..010a565b4000 100644
+--- a/net/netfilter/nft_hash.c
++++ b/net/netfilter/nft_hash.c
+@@ -131,7 +131,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
+       priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
+ 
+       priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
+-      if (priv->modulus <= 1)
++      if (priv->modulus < 1)
+               return -ERANGE;
+ 
+       if (priv->offset + priv->modulus - 1 < priv->offset)
+diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
+index 067459760a7b..3524dbc31316 100755
+--- a/scripts/sphinx-pre-install
++++ b/scripts/sphinx-pre-install
+@@ -301,7 +301,7 @@ sub give_redhat_hints()
+       #
+       # Checks valid for RHEL/CentOS version 7.x.
+       #
+-      if (! $system_release =~ /Fedora/) {
++      if (!($system_release =~ /Fedora/)) {
+               $map{"virtualenv"} = "python-virtualenv";
+       }
+ 
+diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
+index 555df64d46ff..2e2d18468491 100644
+--- a/sound/core/compress_offload.c
++++ b/sound/core/compress_offload.c
+@@ -575,10 +575,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, 
unsigned long arg)
+               stream->metadata_set = false;
+               stream->next_track = false;
+ 
+-              if (stream->direction == SND_COMPRESS_PLAYBACK)
+-                      stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+-              else
+-                      stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
++              stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+       } else {
+               return -EPERM;
+       }
+@@ -694,8 +691,17 @@ static int snd_compr_start(struct snd_compr_stream 
*stream)
+ {
+       int retval;
+ 
+-      if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
++      switch (stream->runtime->state) {
++      case SNDRV_PCM_STATE_SETUP:
++              if (stream->direction != SND_COMPRESS_CAPTURE)
++                      return -EPERM;
++              break;
++      case SNDRV_PCM_STATE_PREPARED:
++              break;
++      default:
+               return -EPERM;
++      }
++
+       retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
+       if (!retval)
+               stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
+@@ -706,9 +712,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
+ {
+       int retval;
+ 
+-      if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
+-                      stream->runtime->state == SNDRV_PCM_STATE_SETUP)
++      switch (stream->runtime->state) {
++      case SNDRV_PCM_STATE_OPEN:
++      case SNDRV_PCM_STATE_SETUP:
++      case SNDRV_PCM_STATE_PREPARED:
+               return -EPERM;
++      default:
++              break;
++      }
++
+       retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
+       if (!retval) {
+               snd_compr_drain_notify(stream);
+@@ -796,9 +808,17 @@ static int snd_compr_drain(struct snd_compr_stream 
*stream)
+ {
+       int retval;
+ 
+-      if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
+-                      stream->runtime->state == SNDRV_PCM_STATE_SETUP)
++      switch (stream->runtime->state) {
++      case SNDRV_PCM_STATE_OPEN:
++      case SNDRV_PCM_STATE_SETUP:
++      case SNDRV_PCM_STATE_PREPARED:
++      case SNDRV_PCM_STATE_PAUSED:
+               return -EPERM;
++      case SNDRV_PCM_STATE_XRUN:
++              return -EPIPE;
++      default:
++              break;
++      }
+ 
+       retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
+       if (retval) {
+@@ -818,6 +838,10 @@ static int snd_compr_next_track(struct snd_compr_stream 
*stream)
+       if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
+               return -EPERM;
+ 
++      /* next track doesn't have any meaning for capture streams */
++      if (stream->direction == SND_COMPRESS_CAPTURE)
++              return -EPERM;
++
+       /* you can signal next track if this is intended to be a gapless stream
+        * and current track metadata is set
+        */
+@@ -835,9 +859,23 @@ static int snd_compr_next_track(struct snd_compr_stream 
*stream)
+ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
+ {
+       int retval;
+-      if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
+-                      stream->runtime->state == SNDRV_PCM_STATE_SETUP)
++
++      switch (stream->runtime->state) {
++      case SNDRV_PCM_STATE_OPEN:
++      case SNDRV_PCM_STATE_SETUP:
++      case SNDRV_PCM_STATE_PREPARED:
++      case SNDRV_PCM_STATE_PAUSED:
++              return -EPERM;
++      case SNDRV_PCM_STATE_XRUN:
++              return -EPIPE;
++      default:
++              break;
++      }
++
++      /* partial drain doesn't have any meaning for capture streams */
++      if (stream->direction == SND_COMPRESS_CAPTURE)
+               return -EPERM;
++
+       /* stream can be drained only when next track has been signalled */
+       if (stream->next_track == false)
+               return -EPERM;
+diff --git a/sound/firewire/packets-buffer.c b/sound/firewire/packets-buffer.c
+index ea1506679c66..3b09b8ef3a09 100644
+--- a/sound/firewire/packets-buffer.c
++++ b/sound/firewire/packets-buffer.c
+@@ -37,7 +37,7 @@ int iso_packets_buffer_init(struct iso_packets_buffer *b, 
struct fw_unit *unit,
+       packets_per_page = PAGE_SIZE / packet_size;
+       if (WARN_ON(!packets_per_page)) {
+               err = -EINVAL;
+-              goto error;
++              goto err_packets;
+       }
+       pages = DIV_ROUND_UP(count, packets_per_page);
+ 
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index a12e594d4e3b..a41c1bec7c88 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -609,11 +609,9 @@ static int azx_pcm_open(struct snd_pcm_substream 
*substream)
+       }
+       runtime->private_data = azx_dev;
+ 
+-      if (chip->gts_present)
+-              azx_pcm_hw.info = azx_pcm_hw.info |
+-                      SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
+-
+       runtime->hw = azx_pcm_hw;
++      if (chip->gts_present)
++              runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
+       runtime->hw.channels_min = hinfo->channels_min;
+       runtime->hw.channels_max = hinfo->channels_max;
+       runtime->hw.formats = hinfo->formats;
+@@ -626,6 +624,13 @@ static int azx_pcm_open(struct snd_pcm_substream 
*substream)
+                                    20,
+                                    178000000);
+ 
++      /* by some reason, the playback stream stalls on PulseAudio with
++       * tsched=1 when a capture stream triggers.  Until we figure out the
++       * real cause, disable tsched mode by telling the PCM info flag.
++       */
++      if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
++              runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
++
+       if (chip->align_buffer_size)
+               /* constrain buffer sizes to be multiple of 128
+                  bytes. This is more efficient in terms of memory
+diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
+index 53c3cd28bc99..8a9dd4767b1e 100644
+--- a/sound/pci/hda/hda_controller.h
++++ b/sound/pci/hda/hda_controller.h
+@@ -40,7 +40,7 @@
+ /* 14 unused */
+ #define AZX_DCAPS_CTX_WORKAROUND (1 << 15)    /* X-Fi workaround */
+ #define AZX_DCAPS_POSFIX_LPIB (1 << 16)       /* Use LPIB as default */
+-/* 17 unused */
++#define AZX_DCAPS_AMD_WORKAROUND (1 << 17)    /* AMD-specific workaround */
+ #define AZX_DCAPS_NO_64BIT    (1 << 18)       /* No 64bit address */
+ #define AZX_DCAPS_SYNC_WRITE  (1 << 19)       /* sync each cmd write */
+ #define AZX_DCAPS_OLD_SSYNC   (1 << 20)       /* Old SSYNC reg for ICH */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index d349f69ef03c..4631579e1e18 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -78,6 +78,7 @@ enum {
+       POS_FIX_VIACOMBO,
+       POS_FIX_COMBO,
+       POS_FIX_SKL,
++      POS_FIX_FIFO,
+ };
+ 
+ /* Defines for ATI HD Audio support in SB450 south bridge */
+@@ -149,7 +150,7 @@ module_param_array(model, charp, NULL, 0444);
+ MODULE_PARM_DESC(model, "Use the given board model.");
+ module_param_array(position_fix, int, NULL, 0444);
+ MODULE_PARM_DESC(position_fix, "DMA pointer read method."
+-               "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = 
VIACOMBO, 4 = COMBO, 5 = SKL+).");
++               "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = 
VIACOMBO, 4 = COMBO, 5 = SKL+, 6 = FIFO).");
+ module_param_array(bdl_pos_adj, int, NULL, 0644);
+ MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset.");
+ module_param_array(probe_mask, int, NULL, 0444);
+@@ -350,6 +351,11 @@ enum {
+ #define AZX_DCAPS_PRESET_ATI_HDMI_NS \
+       (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
+ 
++/* quirks for AMD SB */
++#define AZX_DCAPS_PRESET_AMD_SB \
++      (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_AMD_WORKAROUND |\
++       AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
++
+ /* quirks for Nvidia */
+ #define AZX_DCAPS_PRESET_NVIDIA \
+       (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
+@@ -917,6 +923,49 @@ static unsigned int azx_via_get_position(struct azx *chip,
+       return bound_pos + mod_dma_pos;
+ }
+ 
++#define AMD_FIFO_SIZE 32
++
++/* get the current DMA position with FIFO size correction */
++static unsigned int azx_get_pos_fifo(struct azx *chip, struct azx_dev 
*azx_dev)
++{
++      struct snd_pcm_substream *substream = azx_dev->core.substream;
++      struct snd_pcm_runtime *runtime = substream->runtime;
++      unsigned int pos, delay;
++
++      pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
++      if (!runtime)
++              return pos;
++
++      runtime->delay = AMD_FIFO_SIZE;
++      delay = frames_to_bytes(runtime, AMD_FIFO_SIZE);
++      if (azx_dev->insufficient) {
++              if (pos < delay) {
++                      delay = pos;
++                      runtime->delay = bytes_to_frames(runtime, pos);
++              } else {
++                      azx_dev->insufficient = 0;
++              }
++      }
++
++      /* correct the DMA position for capture stream */
++      if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
++              if (pos < delay)
++                      pos += azx_dev->core.bufsize;
++              pos -= delay;
++      }
++
++      return pos;
++}
++
++static int azx_get_delay_from_fifo(struct azx *chip, struct azx_dev *azx_dev,
++                                 unsigned int pos)
++{
++      struct snd_pcm_substream *substream = azx_dev->core.substream;
++
++      /* just read back the calculated value in the above */
++      return substream->runtime->delay;
++}
++
+ static unsigned int azx_skl_get_dpib_pos(struct azx *chip,
+                                        struct azx_dev *azx_dev)
+ {
+@@ -1484,6 +1533,7 @@ static int check_position_fix(struct azx *chip, int fix)
+       case POS_FIX_VIACOMBO:
+       case POS_FIX_COMBO:
+       case POS_FIX_SKL:
++      case POS_FIX_FIFO:
+               return fix;
+       }
+ 
+@@ -1500,6 +1550,10 @@ static int check_position_fix(struct azx *chip, int fix)
+               dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n");
+               return POS_FIX_VIACOMBO;
+       }
++      if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) {
++              dev_dbg(chip->card->dev, "Using FIFO position fix\n");
++              return POS_FIX_FIFO;
++      }
+       if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
+               dev_dbg(chip->card->dev, "Using LPIB position fix\n");
+               return POS_FIX_LPIB;
+@@ -1520,6 +1574,7 @@ static void assign_position_fix(struct azx *chip, int 
fix)
+               [POS_FIX_VIACOMBO] = azx_via_get_position,
+               [POS_FIX_COMBO] = azx_get_pos_lpib,
+               [POS_FIX_SKL] = azx_get_pos_skl,
++              [POS_FIX_FIFO] = azx_get_pos_fifo,
+       };
+ 
+       chip->get_position[0] = chip->get_position[1] = callbacks[fix];
+@@ -1534,6 +1589,9 @@ static void assign_position_fix(struct azx *chip, int 
fix)
+                       azx_get_delay_from_lpib;
+       }
+ 
++      if (fix == POS_FIX_FIFO)
++              chip->get_delay[0] = chip->get_delay[1] =
++                      azx_get_delay_from_fifo;
+ }
+ 
+ /*
+@@ -2516,6 +2574,9 @@ static const struct pci_device_id azx_ids[] = {
+       /* AMD Hudson */
+       { PCI_DEVICE(0x1022, 0x780d),
+         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
++      /* AMD, X370 & co */
++      { PCI_DEVICE(0x1022, 0x1457),
++        .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
+       /* AMD Stoney */
+       { PCI_DEVICE(0x1022, 0x157a),
+         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+diff --git a/sound/sound_core.c b/sound/sound_core.c
+index 99b73c675743..20d4e2e1bacf 100644
+--- a/sound/sound_core.c
++++ b/sound/sound_core.c
+@@ -287,7 +287,8 @@ retry:
+                               goto retry;
+                       }
+                       spin_unlock(&sound_loader_lock);
+-                      return -EBUSY;
++                      r = -EBUSY;
++                      goto fail;
+               }
+       }
+ 
+diff --git a/tools/perf/arch/s390/util/machine.c 
b/tools/perf/arch/s390/util/machine.c
+index a19690a17291..c8c86a0c9b79 100644
+--- a/tools/perf/arch/s390/util/machine.c
++++ b/tools/perf/arch/s390/util/machine.c
+@@ -6,8 +6,9 @@
+ #include "machine.h"
+ #include "api/fs/fs.h"
+ #include "debug.h"
++#include "symbol.h"
+ 
+-int arch__fix_module_text_start(u64 *start, const char *name)
++int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
+ {
+       u64 m_start = *start;
+       char path[PATH_MAX];
+@@ -17,7 +18,35 @@ int arch__fix_module_text_start(u64 *start, const char 
*name)
+       if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
+               pr_debug2("Using module %s start:%#lx\n", path, m_start);
+               *start = m_start;
++      } else {
++              /* Successful read of the modules segment text start address.
++               * Calculate difference between module start address
++               * in memory and module text segment start address.
++               * For example module load address is 0x3ff8011b000
++               * (from /proc/modules) and module text segment start
++               * address is 0x3ff8011b870 (from file above).
++               *
++               * Adjust the module size and subtract the GOT table
++               * size located at the beginning of the module.
++               */
++              *size -= (*start - m_start);
+       }
+ 
+       return 0;
+ }
++
++/* On s390 kernel text segment start is located at very low memory addresses,
++ * for example 0x10000. Modules are located at very high memory addresses,
++ * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
++ * and beginning of first module's text segment is very big.
++ * Therefore do not fill this gap and do not assign it to the kernel dso map.
++ */
++void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
++{
++      if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
++              /* Last kernel symbol mapped to end of page */
++              p->end = roundup(p->end, page_size);
++      else
++              p->end = c->start;
++      pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
++}
+diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
+index c0065923a525..e1ac51aaedcf 100644
+--- a/tools/perf/builtin-probe.c
++++ b/tools/perf/builtin-probe.c
+@@ -712,6 +712,16 @@ __cmd_probe(int argc, const char **argv)
+ 
+               ret = perf_add_probe_events(params.events, params.nevents);
+               if (ret < 0) {
++
++                      /*
++                       * When perf_add_probe_events() fails it calls
++                       * cleanup_perf_probe_events(pevs, npevs), i.e.
++                       * cleanup_perf_probe_events(params.events, 
params.nevents), which
++                       * will call clear_perf_probe_event(), so set nevents 
to zero
++                       * to avoid cleanup_params() to call 
clear_perf_probe_event() again
++                       * on the same pevs.
++                       */
++                      params.nevents = 0;
+                       pr_err_with_code("  Error: Failed to add events.", ret);
+                       return ret;
+               }
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 26437143c940..c892a28e7b04 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -3081,7 +3081,7 @@ int perf_event__process_feature(struct perf_tool *tool,
+               return 0;
+ 
+       ff.buf  = (void *)fe->data;
+-      ff.size = event->header.size - sizeof(event->header);
++      ff.size = event->header.size - sizeof(*fe);
+       ff.ph = &session->header;
+ 
+       if (feat_ops[feat].process(&ff, NULL))
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index d246080cd85e..5145c6a84c47 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -1233,6 +1233,7 @@ static int machine__set_modules_path(struct machine 
*machine)
+       return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 
0);
+ }
+ int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
++                              u64 *size __maybe_unused,
+                               const char *name __maybe_unused)
+ {
+       return 0;
+@@ -1244,7 +1245,7 @@ static int machine__create_module(void *arg, const char 
*name, u64 start,
+       struct machine *machine = arg;
+       struct map *map;
+ 
+-      if (arch__fix_module_text_start(&start, name) < 0)
++      if (arch__fix_module_text_start(&start, &size, name) < 0)
+               return -1;
+ 
+       map = machine__findnew_module_map(machine, start, name);
+diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
+index 13041b036a5b..ec339cdf854c 100644
+--- a/tools/perf/util/machine.h
++++ b/tools/perf/util/machine.h
+@@ -213,7 +213,7 @@ struct symbol 
*machine__find_kernel_function_by_name(struct machine *machine,
+ 
+ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
+                                       const char *filename);
+-int arch__fix_module_text_start(u64 *start, const char *name);
++int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
+ 
+ int __machine__load_kallsyms(struct machine *machine, const char *filename,
+                            enum map_type type, bool no_kcore);
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index 3936f69f385c..27bffcb213eb 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -93,6 +93,11 @@ static int prefix_underscores_count(const char *str)
+       return tail - str;
+ }
+ 
++void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
++{
++      p->end = c->start;
++}
++
+ const char * __weak arch__normalize_symbol_name(const char *name)
+ {
+       return name;
+@@ -219,7 +224,7 @@ void symbols__fixup_end(struct rb_root *symbols)
+               curr = rb_entry(nd, struct symbol, rb_node);
+ 
+               if (prev->end == prev->start && prev->end != curr->start)
+-                      prev->end = curr->start;
++                      arch__symbols__fixup_end(prev, curr);
+       }
+ 
+       /* Last entry */
+diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
+index 698c65e603a8..95ac21be8481 100644
+--- a/tools/perf/util/symbol.h
++++ b/tools/perf/util/symbol.h
+@@ -351,6 +351,7 @@ const char *arch__normalize_symbol_name(const char *name);
+ #define SYMBOL_A 0
+ #define SYMBOL_B 1
+ 
++void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
+ int arch__compare_symbol_names(const char *namea, const char *nameb);
+ int arch__compare_symbol_names_n(const char *namea, const char *nameb,
+                                unsigned int n);
+diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
+index 1dbcd3c8dee0..177436c8e026 100644
+--- a/tools/perf/util/thread.c
++++ b/tools/perf/util/thread.c
+@@ -160,14 +160,24 @@ struct comm *thread__comm(const struct thread *thread)
+ 
+ struct comm *thread__exec_comm(const struct thread *thread)
+ {
+-      struct comm *comm, *last = NULL;
++      struct comm *comm, *last = NULL, *second_last = NULL;
+ 
+       list_for_each_entry(comm, &thread->comm_list, list) {
+               if (comm->exec)
+                       return comm;
++              second_last = last;
+               last = comm;
+       }
+ 
++      /*
++       * 'last' with no start time might be the parent's comm of a synthesized
++       * thread (created by processing a synthesized fork event). For a main
++       * thread, that is very probably wrong. Prefer a later comm to avoid
++       * that case.
++       */
++      if (second_last && !last->start && thread->pid_ == thread->tid)
++              return second_last;
++
+       return last;
+ }
+ 
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index dbbfcd082513..89fd40e57cae 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2314,6 +2314,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct 
kvm_vcpu *vcpu)
+ #endif
+ }
+ 
++/*
++ * Unlike kvm_arch_vcpu_runnable, this function is called outside
++ * a vcpu_load/vcpu_put pair.  However, for most architectures
++ * kvm_arch_vcpu_runnable does not require vcpu_load.
++ */
++bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
++{
++      return kvm_arch_vcpu_runnable(vcpu);
++}
++
++static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
++{
++      if (kvm_arch_dy_runnable(vcpu))
++              return true;
++
++#ifdef CONFIG_KVM_ASYNC_PF
++      if (!list_empty_careful(&vcpu->async_pf.done))
++              return true;
++#endif
++
++      return false;
++}
++
+ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
+ {
+       struct kvm *kvm = me->kvm;
+@@ -2343,7 +2366,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool 
yield_to_kernel_mode)
+                               continue;
+                       if (vcpu == me)
+                               continue;
+-                      if (swait_active(&vcpu->wq) && 
!kvm_arch_vcpu_runnable(vcpu))
++                      if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
+                               continue;
+                       if (yield_to_kernel_mode && 
!kvm_arch_vcpu_in_kernel(vcpu))
+                               continue;

Reply via email to