commit:     fa2fadac05034f2158a89cd4dcce856015dcae8c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Aug  9 10:54:46 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Aug  9 10:54:46 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fa2fadac

Linux patch 4.14.62

 0000_README              |   4 +
 1061_linux-4.14.62.patch | 797 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 801 insertions(+)

diff --git a/0000_README b/0000_README
index 64029e1..b530931 100644
--- a/0000_README
+++ b/0000_README
@@ -287,6 +287,10 @@ Patch:  1060_linux-4.14.61.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.14.61
 
+Patch:  1061_linux-4.14.62.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.14.62
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1061_linux-4.14.62.patch b/1061_linux-4.14.62.patch
new file mode 100644
index 0000000..a1d7ceb
--- /dev/null
+++ b/1061_linux-4.14.62.patch
@@ -0,0 +1,797 @@
+diff --git a/Makefile b/Makefile
+index 4bd65eabd298..d407ecfdee0b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 61
++SUBLEVEL = 62
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index f96830ffd9f1..75c6b98585ba 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -376,6 +376,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
+               goto err_desc;
+       }
+ 
++      reinit_completion(&dma->cmd_complete);
+       txdesc->callback = i2c_imx_dma_callback;
+       txdesc->callback_param = i2c_imx;
+       if (dma_submit_error(dmaengine_submit(txdesc))) {
+@@ -619,7 +620,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct 
*i2c_imx,
+        * The first byte must be transmitted by the CPU.
+        */
+       imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
+-      reinit_completion(&i2c_imx->dma->cmd_complete);
+       time_left = wait_for_completion_timeout(
+                               &i2c_imx->dma->cmd_complete,
+                               msecs_to_jiffies(DMA_TIMEOUT));
+@@ -678,7 +678,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
+       if (result)
+               return result;
+ 
+-      reinit_completion(&i2c_imx->dma->cmd_complete);
+       time_left = wait_for_completion_timeout(
+                               &i2c_imx->dma->cmd_complete,
+                               msecs_to_jiffies(DMA_TIMEOUT));
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index f0b06b14e782..16249b0953ff 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -1061,7 +1061,7 @@ static const struct idle_cpu idle_cpu_dnv = {
+ };
+ 
+ #define ICPU(model, cpu) \
+-      { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
++      { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu }
+ 
+ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
+       ICPU(INTEL_FAM6_NEHALEM_EP,             idle_cpu_nehalem),
+@@ -1125,6 +1125,11 @@ static int __init intel_idle_probe(void)
+               return -ENODEV;
+       }
+ 
++      if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
++              pr_debug("Please enable MWAIT in BIOS SETUP\n");
++              return -ENODEV;
++      }
++
+       if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+               return -ENODEV;
+ 
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index f5643d107cc6..a67d03716510 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -77,7 +77,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool 
shutdown);
+  * Represents an NVM Express device.  Each nvme_dev is a PCI function.
+  */
+ struct nvme_dev {
+-      struct nvme_queue **queues;
++      struct nvme_queue *queues;
+       struct blk_mq_tag_set tagset;
+       struct blk_mq_tag_set admin_tagset;
+       u32 __iomem *dbs;
+@@ -348,7 +348,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx 
*hctx, void *data,
+                               unsigned int hctx_idx)
+ {
+       struct nvme_dev *dev = data;
+-      struct nvme_queue *nvmeq = dev->queues[0];
++      struct nvme_queue *nvmeq = &dev->queues[0];
+ 
+       WARN_ON(hctx_idx != 0);
+       WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
+@@ -370,7 +370,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void 
*data,
+                         unsigned int hctx_idx)
+ {
+       struct nvme_dev *dev = data;
+-      struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
++      struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
+ 
+       if (!nvmeq->tags)
+               nvmeq->tags = &dev->tagset.tags[hctx_idx];
+@@ -386,7 +386,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, 
struct request *req,
+       struct nvme_dev *dev = set->driver_data;
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
+-      struct nvme_queue *nvmeq = dev->queues[queue_idx];
++      struct nvme_queue *nvmeq = &dev->queues[queue_idx];
+ 
+       BUG_ON(!nvmeq);
+       iod->nvmeq = nvmeq;
+@@ -900,7 +900,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned 
int tag)
+ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
+ {
+       struct nvme_dev *dev = to_nvme_dev(ctrl);
+-      struct nvme_queue *nvmeq = dev->queues[0];
++      struct nvme_queue *nvmeq = &dev->queues[0];
+       struct nvme_command c;
+ 
+       memset(&c, 0, sizeof(c));
+@@ -1146,7 +1146,6 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
+       if (nvmeq->sq_cmds)
+               dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+                                       nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+-      kfree(nvmeq);
+ }
+ 
+ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
+@@ -1154,10 +1153,8 @@ static void nvme_free_queues(struct nvme_dev *dev, int 
lowest)
+       int i;
+ 
+       for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
+-              struct nvme_queue *nvmeq = dev->queues[i];
+               dev->ctrl.queue_count--;
+-              dev->queues[i] = NULL;
+-              nvme_free_queue(nvmeq);
++              nvme_free_queue(&dev->queues[i]);
+       }
+ }
+ 
+@@ -1189,10 +1186,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
+ 
+ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
+ {
+-      struct nvme_queue *nvmeq = dev->queues[0];
++      struct nvme_queue *nvmeq = &dev->queues[0];
+ 
+-      if (!nvmeq)
+-              return;
+       if (nvme_suspend_queue(nvmeq))
+               return;
+ 
+@@ -1246,13 +1241,13 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, 
struct nvme_queue *nvmeq,
+       return 0;
+ }
+ 
+-static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
+-                                                      int depth, int node)
++static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
++              int depth, int node)
+ {
+-      struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL,
+-                                                      node);
+-      if (!nvmeq)
+-              return NULL;
++      struct nvme_queue *nvmeq = &dev->queues[qid];
++
++      if (dev->ctrl.queue_count > qid)
++              return 0;
+ 
+       nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
+                                         &nvmeq->cq_dma_addr, GFP_KERNEL);
+@@ -1271,17 +1266,15 @@ static struct nvme_queue *nvme_alloc_queue(struct 
nvme_dev *dev, int qid,
+       nvmeq->q_depth = depth;
+       nvmeq->qid = qid;
+       nvmeq->cq_vector = -1;
+-      dev->queues[qid] = nvmeq;
+       dev->ctrl.queue_count++;
+ 
+-      return nvmeq;
++      return 0;
+ 
+  free_cqdma:
+       dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
+                                                       nvmeq->cq_dma_addr);
+  free_nvmeq:
+-      kfree(nvmeq);
+-      return NULL;
++      return -ENOMEM;
+ }
+ 
+ static int queue_request_irq(struct nvme_queue *nvmeq)
+@@ -1468,14 +1461,12 @@ static int nvme_pci_configure_admin_queue(struct 
nvme_dev *dev)
+       if (result < 0)
+               return result;
+ 
+-      nvmeq = dev->queues[0];
+-      if (!nvmeq) {
+-              nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
+-                                      dev_to_node(dev->dev));
+-              if (!nvmeq)
+-                      return -ENOMEM;
+-      }
++      result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
++                      dev_to_node(dev->dev));
++      if (result)
++              return result;
+ 
++      nvmeq = &dev->queues[0];
+       aqa = nvmeq->q_depth - 1;
+       aqa |= aqa << 16;
+ 
+@@ -1505,7 +1496,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
+ 
+       for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
+               /* vector == qid - 1, match nvme_create_queue */
+-              if (!nvme_alloc_queue(dev, i, dev->q_depth,
++              if (nvme_alloc_queue(dev, i, dev->q_depth,
+                    pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
+                       ret = -ENOMEM;
+                       break;
+@@ -1514,7 +1505,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
+ 
+       max = min(dev->max_qid, dev->ctrl.queue_count - 1);
+       for (i = dev->online_queues; i <= max; i++) {
+-              ret = nvme_create_queue(dev->queues[i], i);
++              ret = nvme_create_queue(&dev->queues[i], i);
+               if (ret)
+                       break;
+       }
+@@ -1770,7 +1761,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
+ 
+ static int nvme_setup_io_queues(struct nvme_dev *dev)
+ {
+-      struct nvme_queue *adminq = dev->queues[0];
++      struct nvme_queue *adminq = &dev->queues[0];
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
+       int result, nr_io_queues;
+       unsigned long size;
+@@ -1896,7 +1887,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, 
int queues)
+  retry:
+               timeout = ADMIN_TIMEOUT;
+               for (; i > 0; i--, sent++)
+-                      if (nvme_delete_queue(dev->queues[i], opcode))
++                      if (nvme_delete_queue(&dev->queues[i], opcode))
+                               break;
+ 
+               while (sent--) {
+@@ -2081,7 +2072,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool 
shutdown)
+ 
+       queues = dev->online_queues - 1;
+       for (i = dev->ctrl.queue_count - 1; i > 0; i--)
+-              nvme_suspend_queue(dev->queues[i]);
++              nvme_suspend_queue(&dev->queues[i]);
+ 
+       if (dead) {
+               /* A device might become IO incapable very soon during
+@@ -2089,7 +2080,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool 
shutdown)
+                * queue_count can be 0 here.
+                */
+               if (dev->ctrl.queue_count)
+-                      nvme_suspend_queue(dev->queues[0]);
++                      nvme_suspend_queue(&dev->queues[0]);
+       } else {
+               nvme_disable_io_queues(dev, queues);
+               nvme_disable_admin_queue(dev, shutdown);
+@@ -2345,7 +2336,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct 
pci_device_id *id)
+       dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
+       if (!dev)
+               return -ENOMEM;
+-      dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
++
++      dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(struct 
nvme_queue),
+                                                       GFP_KERNEL, node);
+       if (!dev->queues)
+               goto free;
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index 8e21211b904b..b7a5d1065378 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
+       struct work_struct              work;
+ } __aligned(sizeof(unsigned long long));
+ 
++/* desired maximum for a single sequence - if sg list allows it */
+ #define NVMET_FC_MAX_SEQ_LENGTH               (256 * 1024)
+-#define NVMET_FC_MAX_XFR_SGENTS               (NVMET_FC_MAX_SEQ_LENGTH / 
PAGE_SIZE)
+ 
+ enum nvmet_fcp_datadir {
+       NVMET_FCP_NODATA,
+@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
+       struct nvme_fc_cmd_iu           cmdiubuf;
+       struct nvme_fc_ersp_iu          rspiubuf;
+       dma_addr_t                      rspdma;
++      struct scatterlist              *next_sg;
+       struct scatterlist              *data_sg;
+       int                             data_sg_cnt;
+       u32                             total_length;
+@@ -1000,8 +1001,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info 
*pinfo,
+       INIT_LIST_HEAD(&newrec->assoc_list);
+       kref_init(&newrec->ref);
+       ida_init(&newrec->assoc_cnt);
+-      newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
+-                                      template->max_sgl_segments);
++      newrec->max_sg_cnt = template->max_sgl_segments;
+ 
+       ret = nvmet_fc_alloc_ls_iodlist(newrec);
+       if (ret) {
+@@ -1717,6 +1717,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
+                               ((fod->io_dir == NVMET_FCP_WRITE) ?
+                                       DMA_FROM_DEVICE : DMA_TO_DEVICE));
+                               /* note: write from initiator perspective */
++      fod->next_sg = fod->data_sg;
+ 
+       return 0;
+ 
+@@ -1874,24 +1875,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport 
*tgtport,
+                               struct nvmet_fc_fcp_iod *fod, u8 op)
+ {
+       struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
++      struct scatterlist *sg = fod->next_sg;
+       unsigned long flags;
+-      u32 tlen;
++      u32 remaininglen = fod->total_length - fod->offset;
++      u32 tlen = 0;
+       int ret;
+ 
+       fcpreq->op = op;
+       fcpreq->offset = fod->offset;
+       fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
+ 
+-      tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
+-                      (fod->total_length - fod->offset));
++      /*
++       * for next sequence:
++       *  break at a sg element boundary
++       *  attempt to keep sequence length capped at
++       *    NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
++       *    be longer if a single sg element is larger
++       *    than that amount. This is done to avoid creating
++       *    a new sg list to use for the tgtport api.
++       */
++      fcpreq->sg = sg;
++      fcpreq->sg_cnt = 0;
++      while (tlen < remaininglen &&
++             fcpreq->sg_cnt < tgtport->max_sg_cnt &&
++             tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
++              fcpreq->sg_cnt++;
++              tlen += sg_dma_len(sg);
++              sg = sg_next(sg);
++      }
++      if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
++              fcpreq->sg_cnt++;
++              tlen += min_t(u32, sg_dma_len(sg), remaininglen);
++              sg = sg_next(sg);
++      }
++      if (tlen < remaininglen)
++              fod->next_sg = sg;
++      else
++              fod->next_sg = NULL;
++
+       fcpreq->transfer_length = tlen;
+       fcpreq->transferred_length = 0;
+       fcpreq->fcp_error = 0;
+       fcpreq->rsplen = 0;
+ 
+-      fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
+-      fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
+-
+       /*
+        * If the last READDATA request: check if LLDD supports
+        * combined xfr with response.
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index a8da543b3814..4708eb9df71b 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -624,7 +624,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
+       union acpi_object *obj;
+       struct pci_host_bridge *bridge;
+ 
+-      if (acpi_pci_disabled || !bus->bridge)
++      if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
+               return;
+ 
+       acpi_pci_slot_enumerate(bus);
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index 9ce28c4f9812..b09d29931393 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -2142,6 +2142,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
+               msleep(1000);
+ 
+       qla24xx_disable_vp(vha);
++      qla2x00_wait_for_sess_deletion(vha);
+ 
+       vha->flags.delete_progress = 1;
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index f852ca60c49f..89706341514e 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -200,6 +200,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host 
*, fc_port_t *,
+       uint16_t *);
+ int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
+ int qla24xx_async_abort_cmd(srb_t *);
++void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
+ 
+ /*
+  * Global Functions in qla_mid.c source file.
+diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
+index 59ecc4eda6cd..2a19ec0660cb 100644
+--- a/drivers/scsi/qla2xxx/qla_gs.c
++++ b/drivers/scsi/qla2xxx/qla_gs.c
+@@ -3368,6 +3368,10 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t 
*id)
+       return rval;
+ 
+ done_free_sp:
++      spin_lock_irqsave(&vha->hw->vport_slock, flags);
++      list_del(&sp->elem);
++      spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
++
+       if (sp->u.iocb_cmd.u.ctarg.req) {
+               dma_free_coherent(&vha->hw->pdev->dev,
+                       sizeof(struct ct_sns_pkt),
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index bcde6130f121..1d42d38f5a45 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -1326,11 +1326,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t 
flags, uint32_t lun,
+ 
+       wait_for_completion(&tm_iocb->u.tmf.comp);
+ 
+-      rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
+-          QLA_SUCCESS : QLA_FUNCTION_FAILED;
++      rval = tm_iocb->u.tmf.data;
+ 
+-      if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
+-              ql_dbg(ql_dbg_taskm, vha, 0x8030,
++      if (rval != QLA_SUCCESS) {
++              ql_log(ql_log_warn, vha, 0x8030,
+                   "TM IOCB failed (%x).\n", rval);
+       }
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h 
b/drivers/scsi/qla2xxx/qla_inline.h
+index 9a2c86eacf44..3f5a0f0f8b62 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -221,6 +221,8 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t 
*fcport, gfp_t flag)
+       sp->fcport = fcport;
+       sp->iocbs = 1;
+       sp->vha = qpair->vha;
++      INIT_LIST_HEAD(&sp->elem);
++
+ done:
+       if (!sp)
+               QLA_QPAIR_MARK_NOT_BUSY(qpair);
+diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
+index d77dde89118e..375a88e18afe 100644
+--- a/drivers/scsi/qla2xxx/qla_mid.c
++++ b/drivers/scsi/qla2xxx/qla_mid.c
+@@ -152,10 +152,15 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
+ {
+       unsigned long flags;
+       int ret;
++      fc_port_t *fcport;
+ 
+       ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
+       atomic_set(&vha->loop_state, LOOP_DOWN);
+       atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
++      list_for_each_entry(fcport, &vha->vp_fcports, list)
++              fcport->logout_on_delete = 0;
++
++      qla2x00_mark_all_devices_lost(vha, 0);
+ 
+       /* Remove port id from vp target map */
+       spin_lock_irqsave(&vha->hw->vport_slock, flags);
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 1be76695e692..7d7fb5bbb600 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1136,7 +1136,7 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
+  * qla2x00_wait_for_sess_deletion can only be called from remove_one.
+  * it has dependency on UNLOADING flag to stop device discovery
+  */
+-static void
++void
+ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
+ {
+       qla2x00_mark_all_devices_lost(vha, 0);
+@@ -5794,8 +5794,9 @@ qla2x00_do_dpc(void *data)
+                               set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+               }
+ 
+-              if (test_and_clear_bit(ISP_ABORT_NEEDED,
+-                                              &base_vha->dpc_flags)) {
++              if (test_and_clear_bit
++                  (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
++                  !test_bit(UNLOADING, &base_vha->dpc_flags)) {
+ 
+                       ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
+                           "ISP abort scheduled.\n");
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 7fa50e12f18e..5b62e06567a3 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4280,6 +4280,7 @@ int try_release_extent_mapping(struct extent_map_tree 
*map,
+       struct extent_map *em;
+       u64 start = page_offset(page);
+       u64 end = start + PAGE_SIZE - 1;
++      struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
+ 
+       if (gfpflags_allow_blocking(mask) &&
+           page->mapping->host->i_size > SZ_16M) {
+@@ -4302,6 +4303,8 @@ int try_release_extent_mapping(struct extent_map_tree 
*map,
+                                           extent_map_end(em) - 1,
+                                           EXTENT_LOCKED | EXTENT_WRITEBACK,
+                                           0, NULL)) {
++                              set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
++                                      &btrfs_inode->runtime_flags);
+                               remove_extent_mapping(map, em);
+                               /* once for the rb tree */
+                               free_extent_map(em);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 6b0c1ea95196..f30d2bf40471 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2301,7 +2301,7 @@ static int ext4_check_descriptors(struct super_block *sb,
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
+       ext4_fsblk_t last_block;
+-      ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1;
++      ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
+       ext4_fsblk_t block_bitmap;
+       ext4_fsblk_t inode_bitmap;
+       ext4_fsblk_t inode_table;
+@@ -4038,13 +4038,13 @@ static int ext4_fill_super(struct super_block *sb, 
void *data, int silent)
+                       goto failed_mount2;
+               }
+       }
++      sbi->s_gdb_count = db_count;
+       if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
+               ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
+               ret = -EFSCORRUPTED;
+               goto failed_mount2;
+       }
+ 
+-      sbi->s_gdb_count = db_count;
+       get_random_bytes(&sbi->s_next_generation, sizeof(u32));
+       spin_lock_init(&sbi->s_next_gen_lock);
+ 
+diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
+index c60f3d32ee91..a6797986b625 100644
+--- a/fs/jfs/xattr.c
++++ b/fs/jfs/xattr.c
+@@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer 
*ea_buf, int min_size)
+       if (size > PSIZE) {
+               /*
+                * To keep the rest of the code simple.  Allocate a
+-               * contiguous buffer to work with
++               * contiguous buffer to work with. Make the buffer large
++               * enough to make use of the whole extent.
+                */
+-              ea_buf->xattr = kmalloc(size, GFP_KERNEL);
++              ea_buf->max_size = (size + sb->s_blocksize - 1) &
++                  ~(sb->s_blocksize - 1);
++
++              ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
+               if (ea_buf->xattr == NULL)
+                       return -ENOMEM;
+ 
+               ea_buf->flag = EA_MALLOC;
+-              ea_buf->max_size = (size + sb->s_blocksize - 1) &
+-                  ~(sb->s_blocksize - 1);
+ 
+               if (ea_size == 0)
+                       return 0;
+diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
+index 5c16db86b38f..40e53a4fc0a6 100644
+--- a/fs/xfs/libxfs/xfs_attr_leaf.c
++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
+@@ -785,9 +785,8 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
+       ASSERT(blkno == 0);
+       error = xfs_attr3_leaf_create(args, blkno, &bp);
+       if (error) {
+-              error = xfs_da_shrink_inode(args, 0, bp);
+-              bp = NULL;
+-              if (error)
++              /* xfs_attr3_leaf_create may not have instantiated a block */
++              if (bp && (xfs_da_shrink_inode(args, 0, bp) != 0))
+                       goto out;
+               xfs_idata_realloc(dp, size, XFS_ATTR_FORK);     /* try to put */
+               memcpy(ifp->if_u1.if_data, tmpbuffer, size);    /* it back */
+diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
+index 43005fbe8b1e..544b5211221c 100644
+--- a/fs/xfs/xfs_icache.c
++++ b/fs/xfs/xfs_icache.c
+@@ -305,6 +305,46 @@ xfs_reinit_inode(
+       return error;
+ }
+ 
++/*
++ * If we are allocating a new inode, then check what was returned is
++ * actually a free, empty inode. If we are not allocating an inode,
++ * then check we didn't find a free inode.
++ *
++ * Returns:
++ *    0               if the inode free state matches the lookup context
++ *    -ENOENT         if the inode is free and we are not allocating
++ *    -EFSCORRUPTED   if there is any state mismatch at all
++ */
++static int
++xfs_iget_check_free_state(
++      struct xfs_inode        *ip,
++      int                     flags)
++{
++      if (flags & XFS_IGET_CREATE) {
++              /* should be a free inode */
++              if (VFS_I(ip)->i_mode != 0) {
++                      xfs_warn(ip->i_mount,
++"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
++                              ip->i_ino, VFS_I(ip)->i_mode);
++                      return -EFSCORRUPTED;
++              }
++
++              if (ip->i_d.di_nblocks != 0) {
++                      xfs_warn(ip->i_mount,
++"Corruption detected! Free inode 0x%llx has blocks allocated!",
++                              ip->i_ino);
++                      return -EFSCORRUPTED;
++              }
++              return 0;
++      }
++
++      /* should be an allocated inode */
++      if (VFS_I(ip)->i_mode == 0)
++              return -ENOENT;
++
++      return 0;
++}
++
+ /*
+  * Check the validity of the inode we just found it the cache
+  */
+@@ -354,12 +394,12 @@ xfs_iget_cache_hit(
+       }
+ 
+       /*
+-       * If lookup is racing with unlink return an error immediately.
++       * Check the inode free state is valid. This also detects lookup
++       * racing with unlinks.
+        */
+-      if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
+-              error = -ENOENT;
++      error = xfs_iget_check_free_state(ip, flags);
++      if (error)
+               goto out_error;
+-      }
+ 
+       /*
+        * If IRECLAIMABLE is set, we've torn down the VFS inode already.
+@@ -475,10 +515,14 @@ xfs_iget_cache_miss(
+ 
+       trace_xfs_iget_miss(ip);
+ 
+-      if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) {
+-              error = -ENOENT;
++
++      /*
++       * Check the inode free state is valid. This also detects lookup
++       * racing with unlinks.
++       */
++      error = xfs_iget_check_free_state(ip, flags);
++      if (error)
+               goto out_destroy;
+-      }
+ 
+       /*
+        * Preload the radix tree so we can insert safely under the
+diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
+index 289e4d54e3e0..5caa062a02b2 100644
+--- a/include/linux/ring_buffer.h
++++ b/include/linux/ring_buffer.h
+@@ -160,6 +160,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
+ void ring_buffer_record_off(struct ring_buffer *buffer);
+ void ring_buffer_record_on(struct ring_buffer *buffer);
+ int ring_buffer_record_is_on(struct ring_buffer *buffer);
++int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
+ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
+ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
+ 
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index b02caa442776..069311541577 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -1030,6 +1030,13 @@ static int irq_setup_forced_threading(struct irqaction 
*new)
+       if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
+               return 0;
+ 
++      /*
++       * No further action required for interrupts which are requested as
++       * threaded interrupts already
++       */
++      if (new->handler == irq_default_primary_handler)
++              return 0;
++
+       new->flags |= IRQF_ONESHOT;
+ 
+       /*
+@@ -1037,7 +1044,7 @@ static int irq_setup_forced_threading(struct irqaction 
*new)
+        * thread handler. We force thread them as well by creating a
+        * secondary action.
+        */
+-      if (new->handler != irq_default_primary_handler && new->thread_fn) {
++      if (new->handler && new->thread_fn) {
+               /* Allocate the secondary action */
+               new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+               if (!new->secondary)
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index e89c3b0cff6d..f40ac7191257 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -382,7 +382,7 @@ static inline void tick_irq_exit(void)
+ 
+       /* Make sure that timer wheel updates are propagated */
+       if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
+-              if (!in_interrupt())
++              if (!in_irq())
+                       tick_nohz_irq_exit();
+       }
+ #endif
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index bb2af74e6b62..ea3c062e7e1c 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -676,7 +676,7 @@ static void tick_nohz_restart(struct tick_sched *ts, 
ktime_t now)
+ 
+ static inline bool local_timer_softirq_pending(void)
+ {
+-      return local_softirq_pending() & TIMER_SOFTIRQ;
++      return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
+ }
+ 
+ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 36f018b15392..fd7809004297 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3109,6 +3109,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
+       return !atomic_read(&buffer->record_disabled);
+ }
+ 
++/**
++ * ring_buffer_record_is_set_on - return true if the ring buffer is set 
writable
++ * @buffer: The ring buffer to see if write is set enabled
++ *
++ * Returns true if the ring buffer is set writable by ring_buffer_record_on().
++ * Note that this does NOT mean it is in a writable state.
++ *
++ * It may return true when the ring buffer has been disabled by
++ * ring_buffer_record_disable(), as that is a temporary disabling of
++ * the ring buffer.
++ */
++int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
++{
++      return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
++}
++
+ /**
+  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
+  * @buffer: The ring buffer to stop writes to.
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index e268750bd4ad..20919489883f 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1366,6 +1366,12 @@ update_max_tr(struct trace_array *tr, struct 
task_struct *tsk, int cpu)
+ 
+       arch_spin_lock(&tr->max_lock);
+ 
++      /* Inherit the recordable setting from trace_buffer */
++      if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
++              ring_buffer_record_on(tr->max_buffer.buffer);
++      else
++              ring_buffer_record_off(tr->max_buffer.buffer);
++
+       buf = tr->trace_buffer.buffer;
+       tr->trace_buffer.buffer = tr->max_buffer.buffer;
+       tr->max_buffer.buffer = buf;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 68c9d1833b95..c67abda5d639 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -981,8 +981,8 @@ static int netlink_bind(struct socket *sock, struct 
sockaddr *addr,
+ 
+       if (nlk->ngroups == 0)
+               groups = 0;
+-      else
+-              groups &= (1ULL << nlk->ngroups) - 1;
++      else if (nlk->ngroups < 8*sizeof(groups))
++              groups &= (1UL << nlk->ngroups) - 1;
+ 
+       bound = nlk->bound;
+       if (bound) {

Reply via email to