commit:     9e2ff11175e4ea0e03f37daf4db1c759be5dc3ce
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jan 17 16:22:00 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jan 17 16:22:00 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9e2ff111

Linux patch 4.9.252

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |   4 +
 1251_linux-4.9.252.patch | 788 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 792 insertions(+)

diff --git a/0000_README b/0000_README
index 9c820fe..266222d 100644
--- a/0000_README
+++ b/0000_README
@@ -1047,6 +1047,10 @@ Patch:  1250_linux-4.9.251.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.251
 
+Patch:  1251_linux-4.9.252.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.252
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1251_linux-4.9.252.patch b/1251_linux-4.9.252.patch
new file mode 100644
index 0000000..dee0597
--- /dev/null
+++ b/1251_linux-4.9.252.patch
@@ -0,0 +1,788 @@
+diff --git a/Makefile b/Makefile
+index 8ebbb60f2078a..2213fe336705f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 251
++SUBLEVEL = 252
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/arm/mach-omap2/omap_device.c 
b/arch/arm/mach-omap2/omap_device.c
+index f989145480c8f..bf236b7af8c1a 100644
+--- a/arch/arm/mach-omap2/omap_device.c
++++ b/arch/arm/mach-omap2/omap_device.c
+@@ -224,10 +224,12 @@ static int _omap_device_notifier_call(struct 
notifier_block *nb,
+               break;
+       case BUS_NOTIFY_BIND_DRIVER:
+               od = to_omap_device(pdev);
+-              if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
+-                  pm_runtime_status_suspended(dev)) {
++              if (od) {
+                       od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
+-                      pm_runtime_set_active(dev);
++                      if (od->_state == OMAP_DEVICE_STATE_ENABLED &&
++                          pm_runtime_status_suspended(dev)) {
++                              pm_runtime_set_active(dev);
++                      }
+               }
+               break;
+       case BUS_NOTIFY_ADD_DEVICE:
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 0ad81fa13688f..10d80456f38f1 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -450,6 +450,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const 
struct sys_reg_desc *r)
+ {
+       u64 pmcr, val;
+ 
++      /* No PMU available, PMCR_EL0 may UNDEF... */
++      if (!kvm_arm_support_pmu_v3())
++              return;
++
+       pmcr = read_sysreg(pmcr_el0);
+       /*
+        * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
+diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h 
b/arch/powerpc/include/asm/book3s/32/pgtable.h
+index 6b8b2d57fdc8c..e588028922a83 100644
+--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
++++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
+@@ -411,9 +411,9 @@ static inline void __set_pte_at(struct mm_struct *mm, 
unsigned long addr,
+       if (pte_val(*ptep) & _PAGE_HASHPTE)
+               flush_hash_entry(mm, ptep, addr);
+       __asm__ __volatile__("\
+-              stw%U0%X0 %2,%0\n\
++              stw%X0 %2,%0\n\
+               eieio\n\
+-              stw%U0%X0 %L2,%1"
++              stw%X1 %L2,%1"
+       : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+       : "r" (pte) : "memory");
+ 
+diff --git a/arch/powerpc/include/asm/nohash/pgtable.h 
b/arch/powerpc/include/asm/nohash/pgtable.h
+index 1263c22d60d85..330fe178c0c5e 100644
+--- a/arch/powerpc/include/asm/nohash/pgtable.h
++++ b/arch/powerpc/include/asm/nohash/pgtable.h
+@@ -155,9 +155,9 @@ static inline void __set_pte_at(struct mm_struct *mm, 
unsigned long addr,
+               flush_hash_entry(mm, ptep, addr);
+ #endif
+       __asm__ __volatile__("\
+-              stw%U0%X0 %2,%0\n\
++              stw%X0 %2,%0\n\
+               eieio\n\
+-              stw%U0%X0 %L2,%1"
++              stw%X1 %L2,%1"
+       : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+       : "r" (pte) : "memory");
+ 
+diff --git a/block/genhd.c b/block/genhd.c
+index fcd6d4fae657c..9c1adfd768d2c 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -159,14 +159,17 @@ struct hd_struct *disk_part_iter_next(struct 
disk_part_iter *piter)
+               part = rcu_dereference(ptbl->part[piter->idx]);
+               if (!part)
+                       continue;
++              get_device(part_to_dev(part));
++              piter->part = part;
+               if (!part_nr_sects_read(part) &&
+                   !(piter->flags & DISK_PITER_INCL_EMPTY) &&
+                   !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
+-                    piter->idx == 0))
++                    piter->idx == 0)) {
++                      put_device(part_to_dev(part));
++                      piter->part = NULL;
+                       continue;
++              }
+ 
+-              get_device(part_to_dev(part));
+-              piter->part = part;
+               piter->idx += inc;
+               break;
+       }
+diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
+index 39dd30b6ef86e..894102fd5a069 100644
+--- a/drivers/block/Kconfig
++++ b/drivers/block/Kconfig
+@@ -530,6 +530,7 @@ config BLK_DEV_RBD
+ config BLK_DEV_RSXX
+       tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
+       depends on PCI
++      select CRC32
+       help
+         Device driver for IBM's high speed PCIe SSD
+         storage device: Flash Adapter 900GB Full Height.
+diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
+index 0b5bf135b0907..59f16807921ad 100644
+--- a/drivers/cpufreq/powernow-k8.c
++++ b/drivers/cpufreq/powernow-k8.c
+@@ -887,9 +887,9 @@ static int get_transition_latency(struct powernow_k8_data 
*data)
+ 
+ /* Take a frequency, and issue the fid/vid transition command */
+ static int transition_frequency_fidvid(struct powernow_k8_data *data,
+-              unsigned int index)
++              unsigned int index,
++              struct cpufreq_policy *policy)
+ {
+-      struct cpufreq_policy *policy;
+       u32 fid = 0;
+       u32 vid = 0;
+       int res;
+@@ -921,9 +921,6 @@ static int transition_frequency_fidvid(struct 
powernow_k8_data *data,
+       freqs.old = find_khz_freq_from_fid(data->currfid);
+       freqs.new = find_khz_freq_from_fid(fid);
+ 
+-      policy = cpufreq_cpu_get(smp_processor_id());
+-      cpufreq_cpu_put(policy);
+-
+       cpufreq_freq_transition_begin(policy, &freqs);
+       res = transition_fid_vid(data, fid, vid);
+       cpufreq_freq_transition_end(policy, &freqs, res);
+@@ -978,7 +975,7 @@ static long powernowk8_target_fn(void *arg)
+ 
+       powernow_k8_acpi_pst_values(data, newstate);
+ 
+-      ret = transition_frequency_fidvid(data, newstate);
++      ret = transition_frequency_fidvid(data, newstate, pol);
+ 
+       if (ret) {
+               pr_err("transition frequency failed\n");
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index ef99ef0bb1ca2..f00652585ee31 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -2357,7 +2357,7 @@ static int xilinx_dma_chan_probe(struct 
xilinx_dma_device *xdev,
+               has_dre = false;
+ 
+       if (!has_dre)
+-              xdev->common.copy_align = fls(width - 1);
++              xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 
1);
+ 
+       if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
+           of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
+@@ -2630,7 +2630,11 @@ static int xilinx_dma_probe(struct platform_device 
*pdev)
+       }
+ 
+       /* Register the DMA engine with the core */
+-      dma_async_device_register(&xdev->common);
++      err = dma_async_device_register(&xdev->common);
++      if (err) {
++              dev_err(xdev->dev, "failed to register the dma device\n");
++              goto error;
++      }
+ 
+       err = of_dma_controller_register(node, of_dma_xilinx_xlate,
+                                        xdev);
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index 4548d89abcdc3..ff8168c60b35a 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -882,7 +882,7 @@ eb_vma_misplaced(struct i915_vma *vma)
+               return !only_mappable_for_reloc(entry->flags);
+ 
+       if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
+-          (vma->node.start + vma->node.size - 1) >> 32)
++          (vma->node.start + vma->node.size + 4095) >> 32)
+               return true;
+ 
+       return false;
+diff --git a/drivers/iommu/intel_irq_remapping.c 
b/drivers/iommu/intel_irq_remapping.c
+index ce125ec23d2a5..88ba1a65c2830 100644
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -1350,6 +1350,8 @@ static int intel_irq_remapping_alloc(struct irq_domain 
*domain,
+               irq_data = irq_domain_get_irq_data(domain, virq + i);
+               irq_cfg = irqd_cfg(irq_data);
+               if (!irq_data || !irq_cfg) {
++                      if (!i)
++                              kfree(data);
+                       ret = -EINVAL;
+                       goto out_free_data;
+               }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+index 8cd7227fbdfce..3dd0bc8804c1a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+@@ -930,6 +930,7 @@ err_destroy_groups:
+       ft->g[ft->num_groups] = NULL;
+       mlx5e_destroy_groups(ft);
+       kvfree(in);
++      kfree(ft->g);
+ 
+       return err;
+ }
+diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
+index 4e9fe75d70675..069f933b0add2 100644
+--- a/drivers/net/wan/Kconfig
++++ b/drivers/net/wan/Kconfig
+@@ -295,6 +295,7 @@ config SLIC_DS26522
+       tristate "Slic Maxim ds26522 card support"
+       depends on SPI
+       depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
++      select BITREVERSE
+       help
+         This module initializes and configures the slic maxim card
+         in T1 or E1 mode.
+diff --git a/drivers/net/wireless/ath/wil6210/Kconfig 
b/drivers/net/wireless/ath/wil6210/Kconfig
+index 6dfedc8bd6a3d..7df13a684d2df 100644
+--- a/drivers/net/wireless/ath/wil6210/Kconfig
++++ b/drivers/net/wireless/ath/wil6210/Kconfig
+@@ -1,6 +1,7 @@
+ config WIL6210
+       tristate "Wilocity 60g WiFi card wil6210 support"
+       select WANT_DEV_COREDUMP
++      select CRC32
+       depends on CFG80211
+       depends on PCI
+       default n
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index da3834fe5e570..9bb6a574ab2fe 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1606,7 +1606,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
+               return -ENODEV;
+       }
+ 
+-      master = spi_alloc_master(dev, sizeof(struct driver_data));
++      master = devm_spi_alloc_master(dev, sizeof(*drv_data));
+       if (!master) {
+               dev_err(&pdev->dev, "cannot alloc spi_master\n");
+               pxa_ssp_free(ssp);
+@@ -1788,7 +1788,6 @@ out_error_clock_enabled:
+       free_irq(ssp->irq, drv_data);
+ 
+ out_error_master_alloc:
+-      spi_master_put(master);
+       pxa_ssp_free(ssp);
+       return status;
+ }
+diff --git a/drivers/target/target_core_transport.c 
b/drivers/target/target_core_transport.c
+index e738b4621cbba..ecd707f74ddcb 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1736,6 +1736,10 @@ void transport_generic_request_failure(struct se_cmd 
*cmd,
+       case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+       case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+       case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
++      case TCM_TOO_MANY_TARGET_DESCS:
++      case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
++      case TCM_TOO_MANY_SEGMENT_DESCS:
++      case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
+               break;
+       case TCM_OUT_OF_RESOURCES:
+               sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+@@ -2886,6 +2890,26 @@ static const struct sense_info sense_info_table[] = {
+               .key = ILLEGAL_REQUEST,
+               .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
+       },
++      [TCM_TOO_MANY_TARGET_DESCS] = {
++              .key = ILLEGAL_REQUEST,
++              .asc = 0x26,
++              .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
++      },
++      [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
++              .key = ILLEGAL_REQUEST,
++              .asc = 0x26,
++              .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
++      },
++      [TCM_TOO_MANY_SEGMENT_DESCS] = {
++              .key = ILLEGAL_REQUEST,
++              .asc = 0x26,
++              .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
++      },
++      [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
++              .key = ILLEGAL_REQUEST,
++              .asc = 0x26,
++              .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
++      },
+       [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
+               .key = ILLEGAL_REQUEST,
+               .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
+diff --git a/drivers/target/target_core_xcopy.c 
b/drivers/target/target_core_xcopy.c
+index 18848ba8d2ba0..84e3bf1132fd5 100644
+--- a/drivers/target/target_core_xcopy.c
++++ b/drivers/target/target_core_xcopy.c
+@@ -52,64 +52,87 @@ static int target_xcopy_gen_naa_ieee(struct se_device 
*dev, unsigned char *buf)
+       return 0;
+ }
+ 
+-static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct 
xcopy_op *xop,
+-                                      bool src)
++/**
++ * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
++ *
++ * @se_dev: device being considered for match
++ * @dev_wwn: XCOPY requested NAA dev_wwn
++ * @return: 1 on match, 0 on no-match
++ */
++static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
++                                            const unsigned char *dev_wwn)
+ {
+-      struct se_device *se_dev;
+-      unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
++      unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+       int rc;
+ 
+-      if (src)
+-              dev_wwn = &xop->dst_tid_wwn[0];
+-      else
+-              dev_wwn = &xop->src_tid_wwn[0];
+-
+-      mutex_lock(&g_device_mutex);
+-      list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
+-
+-              if (!se_dev->dev_attrib.emulate_3pc)
+-                      continue;
++      if (!se_dev->dev_attrib.emulate_3pc) {
++              pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
++              return 0;
++      }
+ 
+-              memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
+-              target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
++      memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
++      target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
+ 
+-              rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
+-              if (rc != 0)
+-                      continue;
++      rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
++      if (rc != 0) {
++              pr_debug("XCOPY: skip non-matching: %*ph\n",
++                       XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
++              return 0;
++      }
++      pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
+ 
+-              if (src) {
+-                      xop->dst_dev = se_dev;
+-                      pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from 
located"
+-                              " se_dev\n", xop->dst_dev);
+-              } else {
+-                      xop->src_dev = se_dev;
+-                      pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from 
located"
+-                              " se_dev\n", xop->src_dev);
+-              }
++      return 1;
++}
+ 
+-              rc = target_depend_item(&se_dev->dev_group.cg_item);
+-              if (rc != 0) {
+-                      pr_err("configfs_depend_item attempt failed:"
+-                              " %d for se_dev: %p\n", rc, se_dev);
+-                      mutex_unlock(&g_device_mutex);
+-                      return rc;
++static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
++                                      const unsigned char *dev_wwn,
++                                      struct se_device **_found_dev,
++                                      struct percpu_ref **_found_lun_ref)
++{
++      struct se_dev_entry *deve;
++      struct se_node_acl *nacl;
++      struct se_lun *this_lun = NULL;
++      struct se_device *found_dev = NULL;
++
++      /* cmd with NULL sess indicates no associated $FABRIC_MOD */
++      if (!sess)
++              goto err_out;
++
++      pr_debug("XCOPY 0xe4: searching for: %*ph\n",
++               XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
++
++      nacl = sess->se_node_acl;
++      rcu_read_lock();
++      hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
++              struct se_device *this_dev;
++              int rc;
++
++              this_lun = rcu_dereference(deve->se_lun);
++              this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
++
++              rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
++              if (rc) {
++                      if (percpu_ref_tryget_live(&this_lun->lun_ref))
++                              found_dev = this_dev;
++                      break;
+               }
+-
+-              pr_debug("Called configfs_depend_item for se_dev: %p"
+-                      " se_dev->se_dev_group: %p\n", se_dev,
+-                      &se_dev->dev_group);
+-
+-              mutex_unlock(&g_device_mutex);
+-              return 0;
+       }
+-      mutex_unlock(&g_device_mutex);
+-
++      rcu_read_unlock();
++      if (found_dev == NULL)
++              goto err_out;
++
++      pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
++               found_dev, &found_dev->dev_group);
++      *_found_dev = found_dev;
++      *_found_lun_ref = &this_lun->lun_ref;
++      return 0;
++err_out:
+       pr_debug_ratelimited("Unable to locate 0xe4 descriptor for 
EXTENDED_COPY\n");
+       return -EINVAL;
+ }
+ 
+ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct 
xcopy_op *xop,
+-                              unsigned char *p, bool src)
++                              unsigned char *p, unsigned short cscd_index)
+ {
+       unsigned char *desc = p;
+       unsigned short ript;
+@@ -154,7 +177,13 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd 
*se_cmd, struct xcopy_op
+               return -EINVAL;
+       }
+ 
+-      if (src) {
++      if (cscd_index != xop->stdi && cscd_index != xop->dtdi) {
++              pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor "
++                       "dest\n", cscd_index);
++              return 0;
++      }
++
++      if (cscd_index == xop->stdi) {
+               memcpy(&xop->src_tid_wwn[0], &desc[8], 
XCOPY_NAA_IEEE_REGEX_LEN);
+               /*
+                * Determine if the source designator matches the local device
+@@ -166,10 +195,15 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd 
*se_cmd, struct xcopy_op
+                       pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
+                                       " received xop\n", xop->src_dev);
+               }
+-      } else {
++      }
++
++      if (cscd_index == xop->dtdi) {
+               memcpy(&xop->dst_tid_wwn[0], &desc[8], 
XCOPY_NAA_IEEE_REGEX_LEN);
+               /*
+-               * Determine if the destination designator matches the local 
device
++               * Determine if the destination designator matches the local
++               * device. If @cscd_index corresponds to both source (stdi) and
++               * destination (dtdi), or dtdi comes after stdi, then
++               * XCOL_DEST_RECV_OP wins.
+                */
+               if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
+                               XCOPY_NAA_IEEE_REGEX_LEN)) {
+@@ -189,9 +223,9 @@ static int target_xcopy_parse_target_descriptors(struct 
se_cmd *se_cmd,
+ {
+       struct se_device *local_dev = se_cmd->se_dev;
+       unsigned char *desc = p;
+-      int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
++      int offset = tdll % XCOPY_TARGET_DESC_LEN, rc;
++      unsigned short cscd_index = 0;
+       unsigned short start = 0;
+-      bool src = true;
+ 
+       *sense_ret = TCM_INVALID_PARAMETER_LIST;
+ 
+@@ -214,25 +248,19 @@ static int target_xcopy_parse_target_descriptors(struct 
se_cmd *se_cmd,
+ 
+       while (start < tdll) {
+               /*
+-               * Check target descriptor identification with 0xE4 type with
+-               * use VPD 0x83 WWPN matching ..
++               * Check target descriptor identification with 0xE4 type, and
++               * compare the current index with the CSCD descriptor IDs in
++               * the segment descriptor. Use VPD 0x83 WWPN matching ..
+                */
+               switch (desc[0]) {
+               case 0xe4:
+                       rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
+-                                                      &desc[0], src);
++                                                      &desc[0], cscd_index);
+                       if (rc != 0)
+                               goto out;
+-                      /*
+-                       * Assume target descriptors are in source -> 
destination order..
+-                       */
+-                      if (src)
+-                              src = false;
+-                      else
+-                              src = true;
+                       start += XCOPY_TARGET_DESC_LEN;
+                       desc += XCOPY_TARGET_DESC_LEN;
+-                      ret++;
++                      cscd_index++;
+                       break;
+               default:
+                       pr_err("XCOPY unsupported descriptor type code:"
+@@ -241,10 +269,25 @@ static int target_xcopy_parse_target_descriptors(struct 
se_cmd *se_cmd,
+               }
+       }
+ 
+-      if (xop->op_origin == XCOL_SOURCE_RECV_OP)
+-              rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
+-      else
+-              rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
++      switch (xop->op_origin) {
++      case XCOL_SOURCE_RECV_OP:
++              rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
++                                              xop->dst_tid_wwn,
++                                              &xop->dst_dev,
++                                              &xop->remote_lun_ref);
++              break;
++      case XCOL_DEST_RECV_OP:
++              rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
++                                              xop->src_tid_wwn,
++                                              &xop->src_dev,
++                                              &xop->remote_lun_ref);
++              break;
++      default:
++              pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
++                      "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi);
++              rc = -EINVAL;
++              break;
++      }
+       /*
+        * If a matching IEEE NAA 0x83 descriptor for the requested device
+        * is not located on this node, return COPY_ABORTED with ASQ/ASQC
+@@ -261,7 +304,7 @@ static int target_xcopy_parse_target_descriptors(struct 
se_cmd *se_cmd,
+       pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
+                xop->dst_dev, &xop->dst_tid_wwn[0]);
+ 
+-      return ret;
++      return cscd_index;
+ 
+ out:
+       return -EINVAL;
+@@ -305,17 +348,26 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd 
*se_cmd, struct xcopy_op
+ 
+ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
+                               struct xcopy_op *xop, unsigned char *p,
+-                              unsigned int sdll)
++                              unsigned int sdll, sense_reason_t *sense_ret)
+ {
+       unsigned char *desc = p;
+       unsigned int start = 0;
+       int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
+ 
++      *sense_ret = TCM_INVALID_PARAMETER_LIST;
++
+       if (offset != 0) {
+               pr_err("XCOPY segment descriptor list length is not"
+                       " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
+               return -EINVAL;
+       }
++      if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) {
++              pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too"
++                      " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll);
++              /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */
++              *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS;
++              return -EINVAL;
++      }
+ 
+       while (start < sdll) {
+               /*
+@@ -372,18 +424,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
+ 
+ static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
+ {
+-      struct se_device *remote_dev;
+-
+       if (xop->op_origin == XCOL_SOURCE_RECV_OP)
+-              remote_dev = xop->dst_dev;
++              pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
+       else
+-              remote_dev = xop->src_dev;
++              pr_debug("putting src lun_ref for %p\n", xop->src_dev);
+ 
+-      pr_debug("Calling configfs_undepend_item for"
+-                " remote_dev: %p remote_dev->dev_group: %p\n",
+-                remote_dev, &remote_dev->dev_group.cg_item);
+-
+-      target_undepend_item(&remote_dev->dev_group.cg_item);
++      percpu_ref_put(xop->remote_lun_ref);
+ }
+ 
+ static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
+@@ -893,6 +939,20 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
+               " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
+               tdll, sdll, inline_dl);
+ 
++      /*
++       * skip over the target descriptors until segment descriptors
++       * have been passed - CSCD ids are needed to determine src and dest.
++       */
++      seg_desc = &p[16] + tdll;
++
++      rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc,
++                                                  sdll, &ret);
++      if (rc <= 0)
++              goto out;
++
++      pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
++                              rc * XCOPY_SEGMENT_DESC_LEN);
++
+       rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, 
&ret);
+       if (rc <= 0)
+               goto out;
+@@ -910,18 +970,8 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
+ 
+       pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
+                               rc * XCOPY_TARGET_DESC_LEN);
+-      seg_desc = &p[16];
+-      seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
+-
+-      rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, 
sdll);
+-      if (rc <= 0) {
+-              xcopy_pt_undepend_remotedev(xop);
+-              goto out;
+-      }
+       transport_kunmap_data_sg(se_cmd);
+ 
+-      pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
+-                              rc * XCOPY_SEGMENT_DESC_LEN);
+       INIT_WORK(&xop->xop_work, target_xcopy_do_work);
+       queue_work(xcopy_wq, &xop->xop_work);
+       return TCM_NO_SENSE;
+diff --git a/drivers/target/target_core_xcopy.h 
b/drivers/target/target_core_xcopy.h
+index 700a981c7b415..7db8d0c9223f8 100644
+--- a/drivers/target/target_core_xcopy.h
++++ b/drivers/target/target_core_xcopy.h
+@@ -19,6 +19,7 @@ struct xcopy_op {
+       struct se_device *dst_dev;
+       unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+       unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
++      struct percpu_ref *remote_lun_ref;
+ 
+       sector_t src_lba;
+       sector_t dst_lba;
+diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
+index 9213a9e046ae0..99caaae01caba 100644
+--- a/fs/ubifs/io.c
++++ b/fs/ubifs/io.c
+@@ -331,7 +331,7 @@ void ubifs_pad(const struct ubifs_info *c, void *buf, int 
pad)
+ {
+       uint32_t crc;
+ 
+-      ubifs_assert(pad >= 0 && !(pad & 7));
++      ubifs_assert(pad >= 0);
+ 
+       if (pad >= UBIFS_PAD_NODE_SZ) {
+               struct ubifs_ch *ch = buf;
+@@ -721,6 +721,10 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void 
*buf, int len)
+                * write-buffer.
+                */
+               memcpy(wbuf->buf + wbuf->used, buf, len);
++              if (aligned_len > len) {
++                      ubifs_assert(aligned_len - len < 8);
++                      ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len 
- len);
++              }
+ 
+               if (aligned_len == wbuf->avail) {
+                       dbg_io("flush jhead %s wbuf to LEB %d:%d",
+@@ -813,13 +817,18 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, 
void *buf, int len)
+       }
+ 
+       spin_lock(&wbuf->lock);
+-      if (aligned_len)
++      if (aligned_len) {
+               /*
+                * And now we have what's left and what does not take whole
+                * max. write unit, so write it to the write-buffer and we are
+                * done.
+                */
+               memcpy(wbuf->buf, buf + written, len);
++              if (aligned_len > len) {
++                      ubifs_assert(aligned_len - len < 8);
++                      ubifs_pad(c, wbuf->buf + len, aligned_len - len);
++              }
++      }
+ 
+       if (c->leb_size - wbuf->offs >= c->max_write_size)
+               wbuf->size = c->max_write_size;
+diff --git a/include/asm-generic/vmlinux.lds.h 
b/include/asm-generic/vmlinux.lds.h
+index 4fdb1d9848444..36198563fb8bc 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -460,7 +460,10 @@
+  */
+ #define TEXT_TEXT                                                     \
+               ALIGN_FUNCTION();                                       \
+-              *(.text.hot TEXT_MAIN .text.fixup .text.unlikely)       \
++              *(.text.hot .text.hot.*)                                \
++              *(TEXT_MAIN .text.fixup)                                \
++              *(.text.unlikely .text.unlikely.*)                      \
++              *(.text.unknown .text.unknown.*)                        \
+               *(.ref.text)                                            \
+       MEM_KEEP(init.text)                                             \
+       MEM_KEEP(exit.text)                                             \
+diff --git a/include/target/target_core_base.h 
b/include/target/target_core_base.h
+index 30f99ce4c6cea..8a70d38f13329 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -178,6 +178,10 @@ enum tcm_sense_reason_table {
+       TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED  = R(0x16),
+       TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED  = R(0x17),
+       TCM_COPY_TARGET_DEVICE_NOT_REACHABLE    = R(0x18),
++      TCM_TOO_MANY_TARGET_DESCS               = R(0x19),
++      TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE   = R(0x1a),
++      TCM_TOO_MANY_SEGMENT_DESCS              = R(0x1b),
++      TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE  = R(0x1c),
+ #undef R
+ };
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index a4c4234976862..026f4525063c1 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1592,6 +1592,12 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned 
int len)
+               skb->csum = csum_block_sub(skb->csum,
+                                          skb_checksum(skb, len, delta, 0),
+                                          len);
++      } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
++              int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
++              int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
++
++              if (offset + sizeof(__sum16) > hdlen)
++                      return -EINVAL;
+       }
+       return __pskb_trim(skb, len);
+ }
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index c37e9598262e5..3164bae4024a4 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -300,7 +300,7 @@ static int ip_finish_output(struct net *net, struct sock 
*sk, struct sk_buff *sk
+       if (skb_is_gso(skb))
+               return ip_finish_output_gso(net, sk, skb, mtu);
+ 
+-      if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
++      if (skb->len > mtu || IPCB(skb)->frag_max_size)
+               return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
+ 
+       return ip_finish_output2(net, sk, skb);
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 5f2e3334cccec..26e1dbc958189 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -743,7 +743,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct 
net_device *dev,
+               goto tx_error;
+       }
+ 
+-      if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
++      df = tnl_params->frag_off;
++      if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
++              df |= (inner_iph->frag_off & htons(IP_DF));
++
++      if (tnl_update_pmtu(dev, skb, rt, df, inner_iph)) {
+               ip_rt_put(rt);
+               goto tx_error;
+       }
+@@ -771,10 +775,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct 
net_device *dev,
+                       ttl = ip4_dst_hoplimit(&rt->dst);
+       }
+ 
+-      df = tnl_params->frag_off;
+-      if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
+-              df |= (inner_iph->frag_off&htons(IP_DF));
+-
+       max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+                       + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
+       if (max_headroom > dev->needed_headroom)

Reply via email to