[PATCH V4 17/25] mmc: mmci: add variant property to define irq pio mask

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

This patch allows to define specific pio mask for variants.
Needed to support the STM32 sdmmc variant which has some bits
with different meaning (bits: 21,20,13,12,9)

Signed-off-by: Ludovic Barre 
---
 drivers/mmc/host/mmci.c | 13 +++--
 drivers/mmc/host/mmci.h |  5 -
 2 files changed, 15 insertions(+), 3 deletions(-)

diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 97b0f5c..ac33d23 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -61,6 +61,7 @@ static struct variant_data variant_arm = {
.f_max  = 1,
.reversed_irq_handling  = true,
.mmcimask1  = true,
+   .irq_pio_mask   = MCI_IRQ_PIO_MASK,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_ROD,
.init   = mmci_variant_init,
@@ -79,6 +80,7 @@ static struct variant_data variant_arm_extended_fifo = {
.pwrreg_powerup = MCI_PWR_UP,
.f_max  = 1,
.mmcimask1  = true,
+   .irq_pio_mask   = MCI_IRQ_PIO_MASK,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_ROD,
.init   = mmci_variant_init,
@@ -98,6 +100,7 @@ static struct variant_data variant_arm_extended_fifo_hwfc = {
.pwrreg_powerup = MCI_PWR_UP,
.f_max  = 1,
.mmcimask1  = true,
+   .irq_pio_mask   = MCI_IRQ_PIO_MASK,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_ROD,
.init   = mmci_variant_init,
@@ -123,6 +126,7 @@ static struct variant_data variant_u300 = {
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
.mmcimask1  = true,
+   .irq_pio_mask   = MCI_IRQ_PIO_MASK,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_OD,
.init   = mmci_variant_init,
@@ -149,6 +153,7 @@ static struct variant_data variant_nomadik = {
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
.mmcimask1  = true,
+   .irq_pio_mask   = MCI_IRQ_PIO_MASK,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_OD,
.init   = mmci_variant_init,
@@ -181,6 +186,7 @@ static struct variant_data variant_ux500 = {
.busy_detect_mask   = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
.mmcimask1  = true,
+   .irq_pio_mask   = MCI_IRQ_PIO_MASK,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_OD,
.init   = mmci_variant_init,
@@ -215,6 +221,7 @@ static struct variant_data variant_ux500v2 = {
.busy_detect_mask   = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
.mmcimask1  = true,
+   .irq_pio_mask   = MCI_IRQ_PIO_MASK,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_OD,
.init   = mmci_variant_init,
@@ -231,6 +238,7 @@ static struct variant_data variant_stm32 = {
.cmdreg_lrsp_crc= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
.cmdreg_srsp_crc= MCI_CPSM_RESPONSE,
.cmdreg_srsp= MCI_CPSM_RESPONSE,
+   .irq_pio_mask   = MCI_IRQ_PIO_MASK,
.datalength_bits= 24,
.datactrl_blocksz   = 11,
.datactrl_dpsm_enable   = MCI_DPSM_ENABLE,
@@ -267,6 +275,7 @@ static struct variant_data variant_qcom = {
.qcom_fifo  = true,
.qcom_dml   = true,
.mmcimask1  = true,
+   .irq_pio_mask   = MCI_IRQ_PIO_MASK,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_ROD,
.init   = qcom_variant_init,
@@ -548,7 +557,7 @@ static void mmci_set_mask1(struct mmci_host *host, unsigned 
int mask)
if (host->singleirq) {
unsigned int mask0 = readl(base + MMCIMASK0);
 
-   mask0 &= ~MCI_IRQ1MASK;
+   mask0 &= ~variant->irq_pio_mask;
mask0 |= mask;
 
writel(mask0, base + MMCIMASK0);
@@ -1453,7 +1462,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
if (status & host->mask1_reg)
mmci_pio_irq(irq, dev_id);
 
-   status &= ~MCI_IRQ1MASK;
+   status &= ~host->variant->irq_pio_mask;
}
 
/*
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index bd89745..4b10c82 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -186,7 +186,7 @@
MCI_CMDRESPENDMASK | MCI_CMDSENTMASK)
 
 

[PATCH V4 25/25] mmc: mmci: add stm32 sdmmc variant

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

This patch adds a stm32 sdmmc variant, rev 1.1.
Introduces a new Manufacturer id "0x53, ascii 'S' to define
new stm32 sdmmc family with clean range of amba
revision/configurations bits (corresponding to sdmmc_ver
register with major/minor fields).
Add 2 variants properties:
-dma_lli, to enable link list support.
-stm32_idmabsize_mask, defines the range of SDMMC_IDMABSIZER register
 which specify the number bytes per buffer.

DT properties for sdmmc:
-Indicate signal directions (only one property
 for d0dir, d123dir, cmd_dir)
-Select command and data phase relation.
-Select "clock in" from an external driver.

Signed-off-by: Ludovic Barre 
---
 drivers/mmc/host/Kconfig|  10 ++
 drivers/mmc/host/Makefile   |   1 +
 drivers/mmc/host/mmci.c |  30 
 drivers/mmc/host/mmci.h |   6 +
 drivers/mmc/host/mmci_stm32_sdmmc.c | 282 
 5 files changed, 329 insertions(+)
 create mode 100644 drivers/mmc/host/mmci_stm32_sdmmc.c

diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 674ba43..9464e95 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -34,6 +34,16 @@ config MMC_QCOM_DML
 
  if unsure, say N.
 
+config MMC_STM32_SDMMC
+   bool "STMicroelectronics STM32 SDMMC Controller"
+   depends on MMC_ARMMMCI
+   default y
+   help
+ This selects the STMicroelectronics STM32 SDMMC host controller.
+ If you have a STM32 sdmmc host with internal dma say Y or M here.
+
+ If unsure, say N.
+
 config MMC_PXA
tristate "Intel PXA25x/26x/27x Multimedia Card Interface support"
depends on ARCH_PXA
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 5363d06..720d377 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -6,6 +6,7 @@
 obj-$(CONFIG_MMC_ARMMMCI) += armmmci.o
 armmmci-y := mmci.o
 armmmci-$(CONFIG_MMC_QCOM_DML) += mmci_qcom_dml.o
+armmmci-$(CONFIG_MMC_STM32_SDMMC) += mmci_stm32_sdmmc.o
 obj-$(CONFIG_MMC_PXA)  += pxamci.o
 obj-$(CONFIG_MMC_MXC)  += mxcmmc.o
 obj-$(CONFIG_MMC_MXS)  += mxs-mmc.o
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 0421e18..6f9671b 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -253,6 +253,25 @@ static struct variant_data variant_stm32 = {
.init   = mmci_variant_init,
 };
 
+static struct variant_data variant_stm32_sdmmc = {
+   .fifosize   = 16 * 4,
+   .fifohalfsize   = 8 * 4,
+   .f_max  = 20800,
+   .stm32_clkdiv   = true,
+   .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
+   .cmdreg_lrsp_crc= MCI_CPSM_STM32_LRSP_CRC,
+   .cmdreg_srsp_crc= MCI_CPSM_STM32_SRSP_CRC,
+   .cmdreg_srsp= MCI_CPSM_STM32_SRSP,
+   .data_cmd_enable= MCI_CPSM_STM32_CMDTRANS,
+   .irq_pio_mask   = MCI_IRQ_PIO_STM32_MASK,
+   .datactrl_first = true,
+   .datacnt_useless= true,
+   .datalength_bits= 25,
+   .datactrl_blocksz   = 14,
+   .stm32_idmabsize_mask   = GENMASK(12, 5),
+   .init   = sdmmc_variant_init,
+};
+
 static struct variant_data variant_qcom = {
.fifosize   = 16 * 4,
.fifohalfsize   = 8 * 4,
@@ -1731,6 +1750,12 @@ static int mmci_of_parse(struct device_node *np, struct 
mmc_host *mmc)
host->pwr_reg_add |= MCI_ST_CMDDIREN;
if (of_get_property(np, "st,sig-pin-fbclk", NULL))
host->pwr_reg_add |= MCI_ST_FBCLKEN;
+   if (of_get_property(np, "st,sig-dir", NULL))
+   host->pwr_reg_add |= MCI_STM32_DIRPOL;
+   if (of_get_property(np, "st,neg-edge", NULL))
+   host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
+   if (of_get_property(np, "st,use-ckin", NULL))
+   host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
 
if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
@@ -2172,6 +2197,11 @@ static const struct amba_id mmci_ids[] = {
.mask   = 0x00ff,
.data   = _stm32,
},
+   {
+   .id = 0x10153180,
+   .mask   = 0xf0ff,
+   .data   = _stm32_sdmmc,
+   },
/* Qualcomm variants */
{
.id = 0x00051180,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 583021e..48c5ba9 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -304,6 +304,8 @@ struct mmci_host;
  * @start_err: bitmask identifying the STARTBITERR bit inside MMCISTATUS
  *register.
  * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register
+ * @dma_lli: true if variant has dma link list feature.
+ * @stm32_idmabsize_mask: stm32 sdmmc idma buffer size.
  */
 struct 

[PATCH V4 21/25] mmc: mmci: add optional reset property

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

This patch adds a optional reset management.
STM32 sdmmc variant needs to reset hardware block
during the power cycle procedure (for re-initialization).

Signed-off-by: Ludovic Barre 
---
 drivers/mmc/host/mmci.c | 7 +++
 drivers/mmc/host/mmci.h | 2 ++
 2 files changed, 9 insertions(+)

diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 969b665..0898cc9 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -36,6 +36,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -1877,6 +1878,12 @@ static int mmci_probe(struct amba_device *dev,
 
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
 
+   host->rst = devm_reset_control_get_optional_exclusive(>dev, NULL);
+   if (IS_ERR(host->rst)) {
+   ret = PTR_ERR(host->rst);
+   goto clk_disable;
+   }
+
/* Get regulators and the supported OCR mask */
ret = mmc_regulator_get_supply(mmc);
if (ret)
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 33c243f..f793426 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -318,6 +318,8 @@ struct mmci_host {
struct clk  *clk;
u8  singleirq:1;
 
+   struct reset_control*rst;
+
spinlock_t  lock;
 
unsigned intmclk;
-- 
2.7.4



[PATCH V4 18/25] mmc: mmci: add variant property to write datactrl before command

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

This patch adds a boolean property to allow to write datactrl
before to send command, whatever the command type (read or write).
Needed to support the STM32 sdmmc variant.

Signed-off-by: Ludovic Barre 
---
 drivers/mmc/host/mmci.c | 6 --
 drivers/mmc/host/mmci.h | 2 ++
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index ac33d23..e639841 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1262,7 +1262,8 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command 
*cmd,
mmci_request_end(host, host->mrq);
} else if (sbc) {
mmci_start_command(host, host->mrq->cmd, 0);
-   } else if (!(cmd->data->flags & MMC_DATA_READ)) {
+   } else if (!host->variant->datactrl_first &&
+  !(cmd->data->flags & MMC_DATA_READ)) {
mmci_start_data(host, cmd->data);
}
 }
@@ -1526,7 +1527,8 @@ static void mmci_request(struct mmc_host *mmc, struct 
mmc_request *mrq)
if (mrq->data)
mmci_get_next_data(host, mrq->data);
 
-   if (mrq->data && mrq->data->flags & MMC_DATA_READ)
+   if (mrq->data &&
+   (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
mmci_start_data(host, mrq->data);
 
if (mrq->sbc)
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 4b10c82..4e5c6c6 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -223,6 +223,7 @@ struct mmci_host;
  * @datactrl_mask_sdio: SDIO enable mask in datactrl register
  * @datactrl_blksz: block size in power of two
  * @datactrl_dpsm_enable: enable value for DPSM
+ * @datactrl_first: true if data must be setup before send command
  * @pwrreg_powerup: power up value for MMCIPOWER register
  * @f_max: maximum clk frequency supported by the controller.
  * @signal_direction: input/out direction of bus signals can be indicated
@@ -262,6 +263,7 @@ struct variant_data {
unsigned intdatactrl_mask_sdio;
unsigned intdatactrl_blocksz;
unsigned intdatactrl_dpsm_enable;
+   u8  datactrl_first:1;
u8  st_sdio:1;
u8  st_clkdiv:1;
u8  blksz_datactrl16:1;
-- 
2.7.4



[PATCH V4 14/25] mmc: mmci: expand startbiterr to irqmask and error check

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

All variants don't pretend to have a startbiterr.
-While data error check, if status register return an error
(like  MCI_DATACRCFAIL) we must avoid to check MCI_STARTBITERR
(if not desired).
-expand start_err to MCI_IRQENABLE to avoid to set this bit by default.

Signed-off-by: Ludovic Barre 
---
 drivers/mmc/host/mmci.c | 27 ---
 drivers/mmc/host/mmci.h |  6 +++---
 2 files changed, 19 insertions(+), 14 deletions(-)

diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 59a2188..168bb6d 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1037,14 +1037,18 @@ static void
 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
  unsigned int status)
 {
+   unsigned int status_err;
+
/* Make sure we have data to handle */
if (!data)
return;
 
/* First check for errors */
-   if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
- host->variant->start_err |
- MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
+   status_err = status & (host->variant->start_err |
+  MCI_DATACRCFAIL | MCI_DATATIMEOUT |
+  MCI_TXUNDERRUN | MCI_RXOVERRUN);
+
+   if (status_err) {
u32 remain, success;
 
/* Terminate the DMA transfer */
@@ -1061,18 +1065,18 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data 
*data,
success = data->blksz * data->blocks - remain;
 
dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 
0x%08x\n",
-   status, success);
-   if (status & MCI_DATACRCFAIL) {
+   status_err, success);
+   if (status_err & MCI_DATACRCFAIL) {
/* Last block was not successful */
success -= 1;
data->error = -EILSEQ;
-   } else if (status & MCI_DATATIMEOUT) {
+   } else if (status_err & MCI_DATATIMEOUT) {
data->error = -ETIMEDOUT;
-   } else if (status & MCI_STARTBITERR) {
+   } else if (status_err & MCI_STARTBITERR) {
data->error = -ECOMM;
-   } else if (status & MCI_TXUNDERRUN) {
+   } else if (status_err & MCI_TXUNDERRUN) {
data->error = -EIO;
-   } else if (status & MCI_RXOVERRUN) {
+   } else if (status_err & MCI_RXOVERRUN) {
if (success > host->variant->fifosize)
success -= host->variant->fifosize;
else
@@ -1913,7 +1917,7 @@ static int mmci_probe(struct amba_device *dev,
goto clk_disable;
}
 
-   writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+   writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
 
amba_set_drvdata(dev, mmc);
 
@@ -2000,7 +2004,8 @@ static void mmci_restore(struct mmci_host *host)
writel(host->datactrl_reg, host->base + MMCIDATACTRL);
writel(host->pwr_reg, host->base + MMCIPOWER);
}
-   writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+   writel(MCI_IRQENABLE | host->variant->start_err,
+  host->base + MMCIMASK0);
mmci_reg_delay(host);
 
spin_unlock_irqrestore(>lock, flags);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 042cbef..8aafafd 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -181,9 +181,9 @@
 #define MMCIFIFO   0x080 /* to 0x0bc */
 
 #define MCI_IRQENABLE  \
-   (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
-   MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK|   \
-   MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_STARTBITERRMASK)
+   (MCI_CMDCRCFAILMASK | MCI_DATACRCFAILMASK | MCI_CMDTIMEOUTMASK | \
+   MCI_DATATIMEOUTMASK | MCI_TXUNDERRUNMASK | MCI_RXOVERRUNMASK |  \
+   MCI_CMDRESPENDMASK | MCI_CMDSENTMASK)
 
 /* These interrupts are directed to IRQ1 when two IRQ lines are available */
 #define MCI_IRQ1MASK \
-- 
2.7.4



[PATCH V4 05/25] mmc: mmci: add prepare/unprepare_data callbacks

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

This patch adds prepare/unprepare callbacks to mmci_host_ops.
Like this mmci_pre/post_request can be generic, mmci_prepare_data
and mmci_unprepare_data provide common next_cookie management.

Signed-off-by: Ludovic Barre 
---
 drivers/mmc/host/mmci.c  | 101 ++-
 drivers/mmc/host/mmci.h  |   8 
 drivers/mmc/host/mmci_qcom_dml.c |   2 +
 3 files changed, 78 insertions(+), 33 deletions(-)

diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 392fb59..7f66724 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -378,6 +378,31 @@ void mmci_dma_setup(struct mmci_host *host)
host->use_dma = true;
 }
 
+int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
+{
+   int err;
+
+   if (!host->ops || !host->ops->prep_data)
+   return 0;
+
+   err = host->ops->prep_data(host, data, next);
+
+   if (next && !err)
+   data->host_cookie = ++host->next_cookie < 0 ?
+   1 : host->next_cookie;
+
+   return err;
+}
+
+void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
+ int err)
+{
+   if (host->ops && host->ops->unprep_data)
+   host->ops->unprep_data(host, data, err);
+
+   data->host_cookie = 0;
+}
+
 static void
 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
 {
@@ -608,7 +633,7 @@ static void mmci_dma_finalize(struct mmci_host *host, 
struct mmc_data *data)
 }
 
 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */
-static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
+static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
struct dma_chan **dma_chan,
struct dma_async_tx_descriptor **dma_desc)
 {
@@ -671,21 +696,24 @@ static int __mmci_dma_prep_data(struct mmci_host *host, 
struct mmc_data *data,
return -ENOMEM;
 }
 
-static inline int mmci_dma_prep_data(struct mmci_host *host,
-struct mmc_data *data,
-bool next)
+int mmci_dmae_prep_data(struct mmci_host *host,
+   struct mmc_data *data,
+   bool next)
 {
struct mmci_dmae_priv *dmae = host->dma_priv;
struct mmci_dmae_next *nd = >next_data;
 
+   if (!host->use_dma)
+   return -EINVAL;
+
if (next)
-   return __mmci_dma_prep_data(host, data, >chan, >desc);
+   return _mmci_dmae_prep_data(host, data, >chan, >desc);
/* Check if next job is already prepared. */
if (dmae->cur && dmae->desc_current)
return 0;
 
/* No job were prepared thus do it now. */
-   return __mmci_dma_prep_data(host, data, >cur,
+   return _mmci_dmae_prep_data(host, data, >cur,
>desc_current);
 }
 
@@ -698,7 +726,7 @@ static int mmci_dma_start_data(struct mmci_host *host, 
unsigned int datactrl)
if (!host->use_dma)
return -EINVAL;
 
-   ret = mmci_dma_prep_data(host, host->data, false);
+   ret = mmci_dmae_prep_data(host, host->data, false);
if (ret)
return ret;
 
@@ -745,32 +773,13 @@ static void mmci_get_next_data(struct mmci_host *host, 
struct mmc_data *data)
next->chan = NULL;
 }
 
-static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
-{
-   struct mmci_host *host = mmc_priv(mmc);
-   struct mmc_data *data = mrq->data;
-
-   if (!host->use_dma || !data)
-   return;
-
-   BUG_ON(data->host_cookie);
-
-   if (mmci_validate_data(host, data))
-   return;
-
-   if (!mmci_dma_prep_data(host, data, true))
-   data->host_cookie = ++host->next_cookie < 0 ?
-   1 : host->next_cookie;
-}
+void mmci_dmae_unprep_data(struct mmci_host *host,
+  struct mmc_data *data, int err)
 
-static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
- int err)
 {
-   struct mmci_host *host = mmc_priv(mmc);
struct mmci_dmae_priv *dmae = host->dma_priv;
-   struct mmc_data *data = mrq->data;
 
-   if (!host->use_dma || !data || !data->host_cookie)
+   if (!host->use_dma)
return;
 
mmci_dma_unmap(host, data);
@@ -794,11 +803,12 @@ static void mmci_post_request(struct mmc_host *mmc, 
struct mmc_request *mrq,
 
next->desc = NULL;
next->chan = NULL;
-   data->host_cookie = 0;
}
 }
 
 static struct mmci_host_ops mmci_variant_ops = {
+   .prep_data = mmci_dmae_prep_data,
+   .unprep_data = mmci_dmae_unprep_data,
.dma_setup = mmci_dmae_setup,
 };
 #else
@@ -825,9 +835,6 @@ static inline int 

[PATCH V4 02/25] mmc: mmci: create generic mmci_dma_setup

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

This patch creates a generic mmci_dma_setup which calls
dma_setup callback of mmci_host_ops and manages common features
like use_dma... If there is a fallbacks to pio mode, dma
functions must check use_dma.

If one of dma channels is not defined (in dma engine config)
release channels.

Signed-off-by: Ludovic Barre 
---
change v4:
Adds mmci_dma_release prototype into CONFIG_DMA_ENGINE config,
allows to be call by mmci_dmae_setup for error management.
This prototype will be removed when dma release callback
will be add (defined in .h and removed of "else CONFIG_DMA_ENGINE")

 drivers/mmc/host/mmci.c  | 58 +---
 drivers/mmc/host/mmci.h  |  7 -
 drivers/mmc/host/mmci_qcom_dml.c |  9 +--
 3 files changed, 61 insertions(+), 13 deletions(-)

diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index db8c085..2dc37f5 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -57,6 +57,7 @@ static struct variant_data variant_arm = {
.mmcimask1  = true,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_ROD,
+   .init   = mmci_variant_init,
 };
 
 static struct variant_data variant_arm_extended_fifo = {
@@ -68,6 +69,7 @@ static struct variant_data variant_arm_extended_fifo = {
.mmcimask1  = true,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_ROD,
+   .init   = mmci_variant_init,
 };
 
 static struct variant_data variant_arm_extended_fifo_hwfc = {
@@ -80,6 +82,7 @@ static struct variant_data variant_arm_extended_fifo_hwfc = {
.mmcimask1  = true,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_ROD,
+   .init   = mmci_variant_init,
 };
 
 static struct variant_data variant_u300 = {
@@ -98,6 +101,7 @@ static struct variant_data variant_u300 = {
.mmcimask1  = true,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_OD,
+   .init   = mmci_variant_init,
 };
 
 static struct variant_data variant_nomadik = {
@@ -117,6 +121,7 @@ static struct variant_data variant_nomadik = {
.mmcimask1  = true,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_OD,
+   .init   = mmci_variant_init,
 };
 
 static struct variant_data variant_ux500 = {
@@ -142,6 +147,7 @@ static struct variant_data variant_ux500 = {
.mmcimask1  = true,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_OD,
+   .init   = mmci_variant_init,
 };
 
 static struct variant_data variant_ux500v2 = {
@@ -169,6 +175,7 @@ static struct variant_data variant_ux500v2 = {
.mmcimask1  = true,
.start_err  = MCI_STARTBITERR,
.opendrain  = MCI_OD,
+   .init   = mmci_variant_init,
 };
 
 static struct variant_data variant_stm32 = {
@@ -186,6 +193,7 @@ static struct variant_data variant_stm32 = {
.f_max  = 4800,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+   .init   = mmci_variant_init,
 };
 
 static struct variant_data variant_qcom = {
@@ -356,6 +364,17 @@ static void mmci_set_clkreg(struct mmci_host *host, 
unsigned int desired)
mmci_write_clkreg(host, clk);
 }
 
+void mmci_dma_setup(struct mmci_host *host)
+{
+   if (!host->ops || !host->ops->dma_setup)
+   return;
+
+   if (host->ops->dma_setup(host))
+   return;
+
+   host->use_dma = true;
+}
+
 static void
 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
 {
@@ -414,7 +433,9 @@ static void mmci_init_sg(struct mmci_host *host, struct 
mmc_data *data)
  * no custom DMA interfaces are supported.
  */
 #ifdef CONFIG_DMA_ENGINE
-static void mmci_dma_setup(struct mmci_host *host)
+static inline void mmci_dma_release(struct mmci_host *host);
+
+int mmci_dmae_setup(struct mmci_host *host)
 {
const char *rxname, *txname;
 
@@ -464,8 +485,12 @@ static void mmci_dma_setup(struct mmci_host *host)
host->mmc->max_seg_size = max_seg_size;
}
 
-   if (host->ops && host->ops->dma_setup)
-   host->ops->dma_setup(host);
+   if (!host->dma_tx_channel || !host->dma_rx_channel) {
+   mmci_dma_release(host);
+   return -EINVAL;
+   }
+
+   return 0;
 }
 
 /*
@@ -496,7 +521,7 @@ static void mmci_dma_unmap(struct mmci_host *host, struct 
mmc_data *data)
 
 static void mmci_dma_data_error(struct mmci_host *host)
 {
-   if (!dma_inprogress(host))
+   if (!host->use_dma || !dma_inprogress(host))
return;
 
dev_err(mmc_dev(host->mmc), "error 

[PATCH V4 03/25] mmc: mmci: introduce dma_priv pointer to mmci_host

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

-Introduces dma_priv pointer to define specific
needs for each dma engine. This patch is needed to prepare
sdmmc variant with internal dma which not use dmaengine API.
-Moves next cookie to mmci host structure to share same cookie
management between all variants.

Signed-off-by: Ludovic Barre 
---
 drivers/mmc/host/mmci.c | 144 ++--
 drivers/mmc/host/mmci.h |  19 ++-
 2 files changed, 94 insertions(+), 69 deletions(-)

diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 2dc37f5..cf0f482 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -372,6 +372,9 @@ void mmci_dma_setup(struct mmci_host *host)
if (host->ops->dma_setup(host))
return;
 
+   /* initialize pre request cookie */
+   host->next_cookie = 1;
+
host->use_dma = true;
 }
 
@@ -433,33 +436,52 @@ static void mmci_init_sg(struct mmci_host *host, struct 
mmc_data *data)
  * no custom DMA interfaces are supported.
  */
 #ifdef CONFIG_DMA_ENGINE
+struct mmci_dmae_next {
+   struct dma_async_tx_descriptor *desc;
+   struct dma_chan *chan;
+};
+
+struct mmci_dmae_priv {
+   struct dma_chan *cur;
+   struct dma_chan *rx_channel;
+   struct dma_chan *tx_channel;
+   struct dma_async_tx_descriptor  *desc_current;
+   struct mmci_dmae_next next_data;
+};
+
 static inline void mmci_dma_release(struct mmci_host *host);
 
 int mmci_dmae_setup(struct mmci_host *host)
 {
const char *rxname, *txname;
+   struct mmci_dmae_priv *dmae;
 
-   host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), 
"rx");
-   host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), 
"tx");
+   dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
+   if (!dmae)
+   return -ENOMEM;
 
-   /* initialize pre request cookie */
-   host->next_data.cookie = 1;
+   host->dma_priv = dmae;
+
+   dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
+"rx");
+   dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
+"tx");
 
/*
 * If only an RX channel is specified, the driver will
 * attempt to use it bidirectionally, however if it is
 * is specified but cannot be located, DMA will be disabled.
 */
-   if (host->dma_rx_channel && !host->dma_tx_channel)
-   host->dma_tx_channel = host->dma_rx_channel;
+   if (dmae->rx_channel && !dmae->tx_channel)
+   dmae->tx_channel = dmae->rx_channel;
 
-   if (host->dma_rx_channel)
-   rxname = dma_chan_name(host->dma_rx_channel);
+   if (dmae->rx_channel)
+   rxname = dma_chan_name(dmae->rx_channel);
else
rxname = "none";
 
-   if (host->dma_tx_channel)
-   txname = dma_chan_name(host->dma_tx_channel);
+   if (dmae->tx_channel)
+   txname = dma_chan_name(dmae->tx_channel);
else
txname = "none";
 
@@ -470,22 +492,22 @@ int mmci_dmae_setup(struct mmci_host *host)
 * Limit the maximum segment size in any SG entry according to
 * the parameters of the DMA engine device.
 */
-   if (host->dma_tx_channel) {
-   struct device *dev = host->dma_tx_channel->device->dev;
+   if (dmae->tx_channel) {
+   struct device *dev = dmae->tx_channel->device->dev;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
 
if (max_seg_size < host->mmc->max_seg_size)
host->mmc->max_seg_size = max_seg_size;
}
-   if (host->dma_rx_channel) {
-   struct device *dev = host->dma_rx_channel->device->dev;
+   if (dmae->rx_channel) {
+   struct device *dev = dmae->rx_channel->device->dev;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
 
if (max_seg_size < host->mmc->max_seg_size)
host->mmc->max_seg_size = max_seg_size;
}
 
-   if (!host->dma_tx_channel || !host->dma_rx_channel) {
+   if (!dmae->tx_channel || !dmae->rx_channel) {
mmci_dma_release(host);
return -EINVAL;
}
@@ -499,21 +521,24 @@ int mmci_dmae_setup(struct mmci_host *host)
  */
 static inline void mmci_dma_release(struct mmci_host *host)
 {
-   if (host->dma_rx_channel)
-   dma_release_channel(host->dma_rx_channel);
-   if (host->dma_tx_channel)
-   dma_release_channel(host->dma_tx_channel);
-   host->dma_rx_channel = host->dma_tx_channel = NULL;
+   struct mmci_dmae_priv *dmae = host->dma_priv;
+
+   if (dmae->rx_channel)
+   dma_release_channel(dmae->rx_channel);
+   if (dmae->tx_channel)
+   

[PATCH V4 22/25] mmc: mmci: add clock divider for stm32 sdmmc

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

The STM32 sdmmc variant has a different clock divider.

Signed-off-by: Ludovic Barre 
---
 drivers/mmc/host/mmci.c | 2 ++
 drivers/mmc/host/mmci.h | 2 ++
 2 files changed, 4 insertions(+)

diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 0898cc9..0421e18 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1857,6 +1857,8 @@ static int mmci_probe(struct amba_device *dev,
 */
if (variant->st_clkdiv)
mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
+   else if (variant->stm32_clkdiv)
+   mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
else if (variant->explicit_mclk_control)
mmc->f_min = clk_round_rate(host->clk, 10);
else
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index f793426..1520289 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -216,6 +216,7 @@ struct mmci_host;
  * @data_cmd_enable: enable value for data commands.
  * @st_sdio: enable ST specific SDIO logic
  * @st_clkdiv: true if using a ST-specific clock divider algorithm
+ * @stm32_clkdiv: true if using a STM32-specific clock divider algorithm
  * @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
  * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl 
register
  * @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
@@ -269,6 +270,7 @@ struct variant_data {
u8  datacnt_useless:1;
u8  st_sdio:1;
u8  st_clkdiv:1;
+   u8  stm32_clkdiv:1;
u8  blksz_datactrl16:1;
u8  blksz_datactrl4:1;
u32 pwrreg_powerup;
-- 
2.7.4



[PATCH V4 13/25] mmc: mmci: add datactrl block size variant property

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

This patch allows to define a datactrl block size
by variant, requested by STM32 sdmmc variant.

Signed-off-by: Ludovic Barre 
---
 drivers/mmc/host/mmci.c | 13 +++--
 drivers/mmc/host/mmci.h |  2 ++
 2 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index c221955..59a2188 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -51,6 +51,7 @@ static struct variant_data variant_arm = {
.fifosize   = 16 * 4,
.fifohalfsize   = 8 * 4,
.datalength_bits= 16,
+   .datactrl_blocksz   = 11,
.pwrreg_powerup = MCI_PWR_UP,
.f_max  = 1,
.reversed_irq_handling  = true,
@@ -64,6 +65,7 @@ static struct variant_data variant_arm_extended_fifo = {
.fifosize   = 128 * 4,
.fifohalfsize   = 64 * 4,
.datalength_bits= 16,
+   .datactrl_blocksz   = 11,
.pwrreg_powerup = MCI_PWR_UP,
.f_max  = 1,
.mmcimask1  = true,
@@ -77,6 +79,7 @@ static struct variant_data variant_arm_extended_fifo_hwfc = {
.fifohalfsize   = 64 * 4,
.clkreg_enable  = MCI_ARM_HWFCEN,
.datalength_bits= 16,
+   .datactrl_blocksz   = 11,
.pwrreg_powerup = MCI_PWR_UP,
.f_max  = 1,
.mmcimask1  = true,
@@ -91,6 +94,7 @@ static struct variant_data variant_u300 = {
.clkreg_enable  = MCI_ST_U300_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.datalength_bits= 16,
+   .datactrl_blocksz   = 11,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio= true,
.pwrreg_powerup = MCI_PWR_ON,
@@ -110,6 +114,7 @@ static struct variant_data variant_nomadik = {
.clkreg = MCI_CLK_ENABLE,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.datalength_bits= 24,
+   .datactrl_blocksz   = 11,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio= true,
.st_clkdiv  = true,
@@ -132,6 +137,7 @@ static struct variant_data variant_ux500 = {
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
.datalength_bits= 24,
+   .datactrl_blocksz   = 11,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio= true,
.st_clkdiv  = true,
@@ -159,6 +165,7 @@ static struct variant_data variant_ux500v2 = {
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
.datactrl_mask_ddrmode  = MCI_DPSM_ST_DDRMODE,
.datalength_bits= 24,
+   .datactrl_blocksz   = 11,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio= true,
.st_clkdiv  = true,
@@ -186,6 +193,7 @@ static struct variant_data variant_stm32 = {
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
.datalength_bits= 24,
+   .datactrl_blocksz   = 11,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio= true,
.st_clkdiv  = true,
@@ -207,6 +215,7 @@ static struct variant_data variant_qcom = {
.data_cmd_enable= MCI_CPSM_QCOM_DATCMD,
.blksz_datactrl4= true,
.datalength_bits= 24,
+   .datactrl_blocksz   = 11,
.pwrreg_powerup = MCI_PWR_UP,
.f_max  = 20800,
.explicit_mclk_control  = true,
@@ -1856,13 +1865,13 @@ static int mmci_probe(struct amba_device *dev,
/*
 * Block size can be up to 2048 bytes, but must be a power of two.
 */
-   mmc->max_blk_size = 1 << 11;
+   mmc->max_blk_size = 1 << variant->datactrl_blocksz;
 
/*
 * Limit the number of blocks transferred so that we don't overflow
 * the maximum request size.
 */
-   mmc->max_blk_count = mmc->max_req_size >> 11;
+   mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
 
spin_lock_init(>lock);
 
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 78fa281..042cbef 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -217,6 +217,7 @@ struct mmci_host;
  * @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
  *  register
  * @datactrl_mask_sdio: SDIO enable mask in datactrl register
+ * @datactrl_blksz: block size in power of two
  * @pwrreg_powerup: power up value for MMCIPOWER register
  * @f_max: maximum clk frequency supported by the controller.
  * @signal_direction: input/out direction of bus signals can be indicated

[PATCH V4 16/25] mmc: mmci: add variant property to define dpsm bit

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

This patch adds datactrl variant property to define
dpsm enable bit. Needed to support the STM32 variant
(STM32 has no dpsm enable bit).

Signed-off-by: Ludovic Barre 
---
 drivers/mmc/host/mmci.c | 15 ---
 drivers/mmc/host/mmci.h |  2 ++
 2 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 00a9244..97b0f5c 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -56,6 +56,7 @@ static struct variant_data variant_arm = {
.cmdreg_srsp= MCI_CPSM_RESPONSE,
.datalength_bits= 16,
.datactrl_blocksz   = 11,
+   .datactrl_dpsm_enable   = MCI_DPSM_ENABLE,
.pwrreg_powerup = MCI_PWR_UP,
.f_max  = 1,
.reversed_irq_handling  = true,
@@ -74,6 +75,7 @@ static struct variant_data variant_arm_extended_fifo = {
.cmdreg_srsp= MCI_CPSM_RESPONSE,
.datalength_bits= 16,
.datactrl_blocksz   = 11,
+   .datactrl_dpsm_enable   = MCI_DPSM_ENABLE,
.pwrreg_powerup = MCI_PWR_UP,
.f_max  = 1,
.mmcimask1  = true,
@@ -92,6 +94,7 @@ static struct variant_data variant_arm_extended_fifo_hwfc = {
.cmdreg_srsp= MCI_CPSM_RESPONSE,
.datalength_bits= 16,
.datactrl_blocksz   = 11,
+   .datactrl_dpsm_enable   = MCI_DPSM_ENABLE,
.pwrreg_powerup = MCI_PWR_UP,
.f_max  = 1,
.mmcimask1  = true,
@@ -111,6 +114,7 @@ static struct variant_data variant_u300 = {
.cmdreg_srsp= MCI_CPSM_RESPONSE,
.datalength_bits= 16,
.datactrl_blocksz   = 11,
+   .datactrl_dpsm_enable   = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio= true,
.pwrreg_powerup = MCI_PWR_ON,
@@ -135,6 +139,7 @@ static struct variant_data variant_nomadik = {
.cmdreg_srsp= MCI_CPSM_RESPONSE,
.datalength_bits= 24,
.datactrl_blocksz   = 11,
+   .datactrl_dpsm_enable   = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio= true,
.st_clkdiv  = true,
@@ -162,6 +167,7 @@ static struct variant_data variant_ux500 = {
.cmdreg_srsp= MCI_CPSM_RESPONSE,
.datalength_bits= 24,
.datactrl_blocksz   = 11,
+   .datactrl_dpsm_enable   = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio= true,
.st_clkdiv  = true,
@@ -194,6 +200,7 @@ static struct variant_data variant_ux500v2 = {
.datactrl_mask_ddrmode  = MCI_DPSM_ST_DDRMODE,
.datalength_bits= 24,
.datactrl_blocksz   = 11,
+   .datactrl_dpsm_enable   = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio= true,
.st_clkdiv  = true,
@@ -226,6 +233,7 @@ static struct variant_data variant_stm32 = {
.cmdreg_srsp= MCI_CPSM_RESPONSE,
.datalength_bits= 24,
.datactrl_blocksz   = 11,
+   .datactrl_dpsm_enable   = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio= true,
.st_clkdiv  = true,
@@ -252,6 +260,7 @@ static struct variant_data variant_qcom = {
.blksz_datactrl4= true,
.datalength_bits= 24,
.datactrl_blocksz   = 11,
+   .datactrl_dpsm_enable   = MCI_DPSM_ENABLE,
.pwrreg_powerup = MCI_PWR_UP,
.f_max  = 20800,
.explicit_mclk_control  = true,
@@ -971,11 +980,11 @@ static void mmci_start_data(struct mmci_host *host, 
struct mmc_data *data)
BUG_ON(1 << blksz_bits != data->blksz);
 
if (variant->blksz_datactrl16)
-   datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
+   datactrl = variant->datactrl_dpsm_enable | (data->blksz << 16);
else if (variant->blksz_datactrl4)
-   datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
+   datactrl = variant->datactrl_dpsm_enable | (data->blksz << 4);
else
-   datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
+   datactrl = variant->datactrl_dpsm_enable | blksz_bits << 4;
 
if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index e49aba3..bd89745 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -222,6 +222,7 @@ struct mmci_host;
  *  register
  * @datactrl_mask_sdio: SDIO enable mask in datactrl register
  * @datactrl_blksz: block size in power of two
+ 

[PATCH V4 24/25] dt-bindings: mmci: add stm32 sdmmc variant

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

This patch adds properties for stm32 sdmmc variant.

Signed-off-by: Ludovic Barre 
Acked-by: Rob Herring 
---
 Documentation/devicetree/bindings/mmc/mmci.txt | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/Documentation/devicetree/bindings/mmc/mmci.txt 
b/Documentation/devicetree/bindings/mmc/mmci.txt
index 4e69b25..6d3c626 100644
--- a/Documentation/devicetree/bindings/mmc/mmci.txt
+++ b/Documentation/devicetree/bindings/mmc/mmci.txt
@@ -16,8 +16,10 @@ Optional properties:
 - arm,primecell-periphid : contains the PrimeCell Peripheral ID, it overrides
the ID provided by the HW
 - resets : phandle to internal reset line.
+  Should be defined for sdmmc variant.
 - vqmmc-supply   : phandle to the regulator device tree node, mentioned
as the VCCQ/VDD_IO supply in the eMMC/SD specs.
+specific for ux500 variant:
 - st,sig-dir-dat0: bus signal direction pin used for DAT[0].
 - st,sig-dir-dat2: bus signal direction pin used for DAT[2].
 - st,sig-dir-dat31   : bus signal direction pin used for DAT[3] and DAT[1].
@@ -25,6 +27,14 @@ Optional properties:
 - st,sig-dir-cmd : cmd signal direction pin used for CMD.
 - st,sig-pin-fbclk   : feedback clock signal pin used.
 
+specific for sdmmc variant:
+- st,sig-dir : signal direction polarity used for cmd, dat0 dat123.
+- st,neg-edge: data & command phase relation, generated on
+   sd clock falling edge.
+- st,use-ckin: use ckin pin from an external driver to sample
+   the receive data (example: with voltage
+  switch transceiver).
+
 Deprecated properties:
 - mmc-cap-mmc-highspeed  : indicates whether MMC is high speed capable.
 - mmc-cap-sd-highspeed   : indicates whether SD is high speed capable.
-- 
2.7.4



[PATCH V4 15/25] mmc: mmci: add variant properties to define cpsm & cmdresp bits

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

This patch adds command variant properties to define
cpsm enable bit and responses.
Needed to support the STM32 variant (shift of cpsm bit,
specific definition of commands response).

Signed-off-by: Ludovic Barre 
---
 drivers/mmc/host/mmci.c | 47 +++
 drivers/mmc/host/mmci.h |  8 
 2 files changed, 51 insertions(+), 4 deletions(-)

diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 168bb6d..00a9244 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -50,6 +50,10 @@ static unsigned int fmax = 515633;
 static struct variant_data variant_arm = {
.fifosize   = 16 * 4,
.fifohalfsize   = 8 * 4,
+   .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+   .cmdreg_lrsp_crc= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+   .cmdreg_srsp_crc= MCI_CPSM_RESPONSE,
+   .cmdreg_srsp= MCI_CPSM_RESPONSE,
.datalength_bits= 16,
.datactrl_blocksz   = 11,
.pwrreg_powerup = MCI_PWR_UP,
@@ -64,6 +68,10 @@ static struct variant_data variant_arm = {
 static struct variant_data variant_arm_extended_fifo = {
.fifosize   = 128 * 4,
.fifohalfsize   = 64 * 4,
+   .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+   .cmdreg_lrsp_crc= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+   .cmdreg_srsp_crc= MCI_CPSM_RESPONSE,
+   .cmdreg_srsp= MCI_CPSM_RESPONSE,
.datalength_bits= 16,
.datactrl_blocksz   = 11,
.pwrreg_powerup = MCI_PWR_UP,
@@ -78,6 +86,10 @@ static struct variant_data variant_arm_extended_fifo_hwfc = {
.fifosize   = 128 * 4,
.fifohalfsize   = 64 * 4,
.clkreg_enable  = MCI_ARM_HWFCEN,
+   .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+   .cmdreg_lrsp_crc= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+   .cmdreg_srsp_crc= MCI_CPSM_RESPONSE,
+   .cmdreg_srsp= MCI_CPSM_RESPONSE,
.datalength_bits= 16,
.datactrl_blocksz   = 11,
.pwrreg_powerup = MCI_PWR_UP,
@@ -93,6 +105,10 @@ static struct variant_data variant_u300 = {
.fifohalfsize   = 8 * 4,
.clkreg_enable  = MCI_ST_U300_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+   .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+   .cmdreg_lrsp_crc= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+   .cmdreg_srsp_crc= MCI_CPSM_RESPONSE,
+   .cmdreg_srsp= MCI_CPSM_RESPONSE,
.datalength_bits= 16,
.datactrl_blocksz   = 11,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
@@ -113,6 +129,10 @@ static struct variant_data variant_nomadik = {
.fifohalfsize   = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+   .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+   .cmdreg_lrsp_crc= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+   .cmdreg_srsp_crc= MCI_CPSM_RESPONSE,
+   .cmdreg_srsp= MCI_CPSM_RESPONSE,
.datalength_bits= 24,
.datactrl_blocksz   = 11,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
@@ -136,6 +156,10 @@ static struct variant_data variant_ux500 = {
.clkreg_enable  = MCI_ST_UX500_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+   .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+   .cmdreg_lrsp_crc= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+   .cmdreg_srsp_crc= MCI_CPSM_RESPONSE,
+   .cmdreg_srsp= MCI_CPSM_RESPONSE,
.datalength_bits= 24,
.datactrl_blocksz   = 11,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
@@ -163,6 +187,10 @@ static struct variant_data variant_ux500v2 = {
.clkreg_enable  = MCI_ST_UX500_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+   .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+   .cmdreg_lrsp_crc= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+   .cmdreg_srsp_crc= MCI_CPSM_RESPONSE,
+   .cmdreg_srsp= MCI_CPSM_RESPONSE,
.datactrl_mask_ddrmode  = MCI_DPSM_ST_DDRMODE,
.datalength_bits= 24,
.datactrl_blocksz   = 11,
@@ -192,6 +220,10 @@ static struct variant_data variant_stm32 = {
.clkreg_enable  = MCI_ST_UX500_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+   .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+   .cmdreg_lrsp_crc= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+   .cmdreg_srsp_crc= MCI_CPSM_RESPONSE,
+   .cmdreg_srsp= MCI_CPSM_RESPONSE,
   

[PATCH V4 20/25] dt-bindings: mmci: add optional reset property

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

This patch adds a optional reset management.

Signed-off-by: Ludovic Barre 
Acked-by: Rob Herring 
---
 Documentation/devicetree/bindings/mmc/mmci.txt | 1 +
 1 file changed, 1 insertion(+)

diff --git a/Documentation/devicetree/bindings/mmc/mmci.txt 
b/Documentation/devicetree/bindings/mmc/mmci.txt
index 03796cf..4e69b25 100644
--- a/Documentation/devicetree/bindings/mmc/mmci.txt
+++ b/Documentation/devicetree/bindings/mmc/mmci.txt
@@ -15,6 +15,7 @@ Required properties:
 Optional properties:
 - arm,primecell-periphid : contains the PrimeCell Peripheral ID, it overrides
the ID provided by the HW
+- resets : phandle to internal reset line.
 - vqmmc-supply   : phandle to the regulator device tree node, mentioned
as the VCCQ/VDD_IO supply in the eMMC/SD specs.
 - st,sig-dir-dat0: bus signal direction pin used for DAT[0].
-- 
2.7.4



[PATCH V4 19/25] mmc: mmci: add variant property to not read datacnt

2018-10-02 Thread Ludovic Barre
From: Ludovic Barre 

This patch adds a boolean property to not read datacnt register.
Needed to support the STM32 sdmmc variant. MMCIDATACNT
register should be read only after the data transfer is completed.
When reading after an error event the read data count value may be
different from the real number of data bytes transferred.

Signed-off-by: Ludovic Barre 
---
 drivers/mmc/host/mmci.c | 8 ++--
 drivers/mmc/host/mmci.h | 3 +++
 2 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index e639841..969b665 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1118,8 +1118,12 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data 
*data,
 * can be as much as a FIFO-worth of data ahead.  This
 * matters for FIFO overruns only.
 */
-   remain = readl(host->base + MMCIDATACNT);
-   success = data->blksz * data->blocks - remain;
+   if (!host->variant->datacnt_useless) {
+   remain = readl(host->base + MMCIDATACNT);
+   success = data->blksz * data->blocks - remain;
+   } else {
+   success = 0;
+   }
 
dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 
0x%08x\n",
status_err, success);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 4e5c6c6..33c243f 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -224,6 +224,8 @@ struct mmci_host;
  * @datactrl_blksz: block size in power of two
  * @datactrl_dpsm_enable: enable value for DPSM
  * @datactrl_first: true if data must be setup before send command
+ * @datacnt_useless: true if you could not use datacnt register to read
+ *  remaining data
  * @pwrreg_powerup: power up value for MMCIPOWER register
  * @f_max: maximum clk frequency supported by the controller.
  * @signal_direction: input/out direction of bus signals can be indicated
@@ -264,6 +266,7 @@ struct variant_data {
unsigned intdatactrl_blocksz;
unsigned intdatactrl_dpsm_enable;
u8  datactrl_first:1;
+   u8  datacnt_useless:1;
u8  st_sdio:1;
u8  st_clkdiv:1;
u8  blksz_datactrl16:1;
-- 
2.7.4



Re: [PATCH v2] mtd: rawnand: denali: set SPARE_AREA_SKIP_BYTES register to 8 if unset

2018-10-02 Thread Boris Brezillon
On Fri, 28 Sep 2018 13:16:01 +0900
Masahiro Yamada  wrote:

> NAND devices need additional data area (OOB) for error correction,
> but it is also used for Bad Block Marker (BBM).  In many cases, the
> first byte in OOB is used for BBM, but the location actually depends
> on chip vendors.  The NAND controller should preserve the precious
> BBM to keep track of bad blocks.
> 
> In Denali IP, the SPARE_AREA_SKIP_BYTES register is used to specify
> the number of bytes to skip from the start of OOB.  The ECC engine
> will automatically skip the specified number of bytes when it gets
> access to OOB area.
> 
> The same value for SPARE_AREA_SKIP_BYTES should be used between
> firmware and the operating system if you intend to use the NAND
> device across the control hand-off.
> 
> In fact, the current denali.c code expects firmware to have already
> set the SPARE_AREA_SKIP_BYTES register, then reads the value out.
> 
> If no firmware (or bootloader) has initialized the controller, the
> register value is zero, which is the default after power-on-reset.
> In other words, the Linux driver cannot initialize the controller
> by itself.
> 
> Some possible solutions are:
> 
>  [1] Add a DT property to specify the skipped bytes in OOB
>  [2] Associate the preferred value with compatible
>  [3] Hard-code the default value in the driver
> 
> My first attempt was [1], but in the review process, [3] was suggested
> as a counter-implementation.
> (https://lore.kernel.org/patchwork/patch/983055/)
> 
> The default value 8 was chosen to match to the boot ROM of the UniPhier
> platform.  The preferred value may vary by platform.  If so, please
> trade up to a different solution.
> 
> Signed-off-by: Masahiro Yamada 

Reviewed-by: Boris Brezillon 

> ---
> 
> Changes in v2:
>   - Change approach from a DT-property to a hard-coded dafault
> 
>  drivers/mtd/nand/raw/denali.c | 14 ++
>  1 file changed, 10 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
> index aaab121..18bbfc8 100644
> --- a/drivers/mtd/nand/raw/denali.c
> +++ b/drivers/mtd/nand/raw/denali.c
> @@ -21,6 +21,7 @@
>  #include "denali.h"
>  
>  #define DENALI_NAND_NAME"denali-nand"
> +#define DENALI_DEFAULT_OOB_SKIP_BYTES8
>  
>  /* for Indexed Addressing */
>  #define DENALI_INDEXED_CTRL  0x00
> @@ -1056,12 +1057,17 @@ static void denali_hw_init(struct denali_nand_info 
> *denali)
>   denali->revision = swab16(ioread32(denali->reg + REVISION));
>  
>   /*
> -  * tell driver how many bit controller will skip before
> -  * writing ECC code in OOB, this register may be already
> -  * set by firmware. So we read this value out.
> -  * if this value is 0, just let it be.
> +  * Set how many bytes should be skipped before writing data in OOB.
> +  * If a non-zero value has already been set (by firmware or something),
> +  * just use it.  Otherwise, set the driver default.
>*/
>   denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
> + if (!denali->oob_skip_bytes) {
> + denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
> + iowrite32(denali->oob_skip_bytes,
> +   denali->reg + SPARE_AREA_SKIP_BYTES);
> + }
> +
>   denali_detect_max_banks(denali);
>   iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
>   iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);



Re: [PATCH 1/4] mm/hugetlb: Enable PUD level huge page migration

2018-10-02 Thread Suzuki K Poulose

Hi Anshuman

On 02/10/18 13:15, Anshuman Khandual wrote:

Architectures like arm64 have PUD level HugeTLB pages for certain configs
(1GB huge page is PUD based on ARM64_4K_PAGES base page size) that can be
enabled for migration. It can be achieved through checking for PUD_SHIFT
order based HugeTLB pages during migration.

Signed-off-by: Anshuman Khandual 
---
  include/linux/hugetlb.h | 3 ++-
  1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 6b68e34..9c1b77f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -483,7 +483,8 @@ static inline bool hugepage_migration_supported(struct 
hstate *h)
  {
  #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
if ((huge_page_shift(h) == PMD_SHIFT) ||
-   (huge_page_shift(h) == PGDIR_SHIFT))
+   (huge_page_shift(h) == PUD_SHIFT) ||




+   (huge_page_shift(h) == PGDIR_SHIFT))


nit: Extra Tab ^^.
Also, if only arm64 supports PUD_SHIFT, should this be added only in the 
arm64 specific backend, which we introduce later ?


Suzuki


Re: [PATCH 1/4] mm/hugetlb: Enable PUD level huge page migration

2018-10-02 Thread Anshuman Khandual



On 10/02/2018 06:08 PM, Suzuki K Poulose wrote:
> Hi Anshuman
> 
> On 02/10/18 13:15, Anshuman Khandual wrote:
>> Architectures like arm64 have PUD level HugeTLB pages for certain configs
>> (1GB huge page is PUD based on ARM64_4K_PAGES base page size) that can be
>> enabled for migration. It can be achieved through checking for PUD_SHIFT
>> order based HugeTLB pages during migration.
>>
>> Signed-off-by: Anshuman Khandual 
>> ---
>>   include/linux/hugetlb.h | 3 ++-
>>   1 file changed, 2 insertions(+), 1 deletion(-)
>>
>> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
>> index 6b68e34..9c1b77f 100644
>> --- a/include/linux/hugetlb.h
>> +++ b/include/linux/hugetlb.h
>> @@ -483,7 +483,8 @@ static inline bool hugepage_migration_supported(struct 
>> hstate *h)
>>   {
>>   #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
>>   if ((huge_page_shift(h) == PMD_SHIFT) ||
>> -    (huge_page_shift(h) == PGDIR_SHIFT))
>> +    (huge_page_shift(h) == PUD_SHIFT) ||
> 
> 
>> +    (huge_page_shift(h) == PGDIR_SHIFT))
> 
> nit: Extra Tab ^^.

The tab is in there when you apply this patch and all three checks are tab 
separated
in a newline.

> Also, if only arm64 supports PUD_SHIFT, should this be added only in the 
> arm64 specific backend, which we introduce later ?

Even if with the platform can add this up in the back end, I would think having 
this
on for default fall back function makes it complete.


Re: [PATCH] spidev: Enable the Liebherr's BK4 board to work with spidev driver

2018-10-02 Thread Mark Brown
On Tue, Oct 02, 2018 at 10:11:20AM +0200, Lukasz Majewski wrote:

> > As documented in SubmittingPatches please send patches to the 
> > maintainers for the code you would like to change. 

> I'm using the ./scripts/get_maintainer.py script to obtain list of
> relevant people.

Your patch went to the right place (and I applied it yesterday), that
was directed at Jan - the reason his patch got missed is that he didn't
send it to me.

> Please also document in the ./Documentation/devicetree the introduced
> compatible -> there are some examples (ge,ahca) in the misc directory.

Good point.


signature.asc
Description: PGP signature


Re: [PATCH] ARM: makefile: pass -march=armv4 to assembler even on CPU32v3

2018-10-02 Thread Ard Biesheuvel
On 2 October 2018 at 14:30, Jason A. Donenfeld  wrote:
> Hi Arnd,
>
> On Tue, Oct 2, 2018 at 9:58 AM Arnd Bergmann  wrote:
>> > I think we're going to wind up playing whack-a-mole in silly ways. The
>> > fact of the matter is that the ARM assembly I'm adding to the tree is
>> > for ARMv4 and up, and not for ARMv3.
>>
>> I don't see what issues remain. The 'reteq lr' that Ard mentioned
>> is definitely the correct way to return from assembly (you also need
>> that for plain armv4, as 'bx' was added in armv4t), and Russell
>> confirmed that using -march=armv3m is something we want
>> anyway for mach-rpc.
>
> I'll do that. I can confirm that after changing it to `reteq lr`,
> everything works well with armv3m.
>
>> > I think there are three options to work around this issue:
>> >
>> > 1) Not build my assembly when CONFIG_CPU_32v3 via a Kconfig "depends".
>> > 2) Set asflags-$(CONFIG_CPU_32v3) inside my module locally to select
>> > -march=armv4.
>> > 3) This patch.
>> >
>> > My initial plan was (1). ArdB recommended I do (2) instead. I thought
>> > that was a bit too nuanced and submitted (3).
>>
>> I suspect all three of the above fail to work for armv4.
>
> Armv4 does actually work in this configuration, in fact.

It builds but it doesn't run, at least not when built into the kernel
proper (which will be the case after random.c moves to this library)

Your toolchain is implicitly passing --fix-v4bx to the assembler,
which causes it to permit bx instructions in ARMv4 object code, but
tag them with special R_ARM_V4BX ELF relocations. The ARM module
loader does take these into account, so built as a module, it works.
However, when built into the core kernel, we have to rely on the
linker to patch this instruction into the ARMv4 equivalent, and a
quick check reveals that that is currently not the case.

Bottom line: let's not go there.

> But anyway,
> I'll go with your primary suggestion and we therefore can move ahead
> with changing the global cflag to march=armv3m. Would you like me to
> submit the patch for this, or would yo like to handle it?
>

Yes please go ahead.


[PATCH 4.18 128/228] ARM: dts: dra7: fix DCAN node addresses

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Kevin Hilman 

[ Upstream commit 949bdcc8a97c6078f21c8d4966436b117f2e4cd3 ]

Fix the DT node addresses to match the reg property addresses,
which were verified to match the TRM:
http://www.ti.com/lit/pdf/sprui30

Cc: Roger Quadros 
Signed-off-by: Kevin Hilman 
Acked-by: Roger Quadros 
Signed-off-by: Tony Lindgren 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/arm/boot/dts/dra7.dtsi |4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1893,7 +1893,7 @@
};
};
 
-   dcan1: can@481cc000 {
+   dcan1: can@4ae3c000 {
compatible = "ti,dra7-d_can";
ti,hwmods = "dcan1";
reg = <0x4ae3c000 0x2000>;
@@ -1903,7 +1903,7 @@
status = "disabled";
};
 
-   dcan2: can@481d {
+   dcan2: can@4848 {
compatible = "ti,dra7-d_can";
ti,hwmods = "dcan2";
reg = <0x4848 0x2000>;




[PATCH 4.14 033/137] scsi: klist: Make it safe to use klists in atomic context

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Bart Van Assche 

[ Upstream commit 624fa7790f80575a4ec28fbdb2034097dc18d051 ]

In the scsi_transport_srp implementation it cannot be avoided to
iterate over a klist from atomic context when using the legacy block
layer instead of blk-mq. Hence this patch that makes it safe to use
klists in atomic context. This patch avoids that lockdep reports the
following:

WARNING: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected
 Possible interrupt unsafe locking scenario:

   CPU0CPU1
   
  lock(&(>k_lock)->rlock);
   local_irq_disable();
   lock(&(>__queue_lock)->rlock);
   lock(&(>k_lock)->rlock);
  
lock(&(>__queue_lock)->rlock);

stack backtrace:
Workqueue: kblockd blk_timeout_work
Call Trace:
 dump_stack+0xa4/0xf5
 check_usage+0x6e6/0x700
 __lock_acquire+0x185d/0x1b50
 lock_acquire+0xd2/0x260
 _raw_spin_lock+0x32/0x50
 klist_next+0x47/0x190
 device_for_each_child+0x8e/0x100
 srp_timed_out+0xaf/0x1d0 [scsi_transport_srp]
 scsi_times_out+0xd4/0x410 [scsi_mod]
 blk_rq_timed_out+0x36/0x70
 blk_timeout_work+0x1b5/0x220
 process_one_work+0x4fe/0xad0
 worker_thread+0x63/0x5a0
 kthread+0x1c1/0x1e0
 ret_from_fork+0x24/0x30

See also commit c9ddf73476ff ("scsi: scsi_transport_srp: Fix shost to
rport translation").

Signed-off-by: Bart Van Assche 
Cc: Martin K. Petersen 
Cc: James Bottomley 
Acked-by: Greg Kroah-Hartman 
Signed-off-by: Martin K. Petersen 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 lib/klist.c |   10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)

--- a/lib/klist.c
+++ b/lib/klist.c
@@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct kli
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *prev;
+   unsigned long flags;
 
-   spin_lock(>i_klist->k_lock);
+   spin_lock_irqsave(>i_klist->k_lock, flags);
 
if (last) {
prev = to_klist_node(last->n_node.prev);
@@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct kli
prev = to_klist_node(prev->n_node.prev);
}
 
-   spin_unlock(>i_klist->k_lock);
+   spin_unlock_irqrestore(>i_klist->k_lock, flags);
 
if (put && last)
put(last);
@@ -377,8 +378,9 @@ struct klist_node *klist_next(struct kli
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *next;
+   unsigned long flags;
 
-   spin_lock(>i_klist->k_lock);
+   spin_lock_irqsave(>i_klist->k_lock, flags);
 
if (last) {
next = to_klist_node(last->n_node.next);
@@ -397,7 +399,7 @@ struct klist_node *klist_next(struct kli
next = to_klist_node(next->n_node.next);
}
 
-   spin_unlock(>i_klist->k_lock);
+   spin_unlock_irqrestore(>i_klist->k_lock, flags);
 
if (put && last)
put(last);




[PATCH 4.14 069/137] perf/x86/intel/lbr: Fix incomplete LBR call stack

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Kan Liang 

[ Upstream commit 0592e57b24e7e05ec1f4c50b9666c013abff7017 ]

LBR has a limited stack size. If a task has a deeper call stack than
LBR's stack size, only the overflowed part is reported. A complete call
stack may not be reconstructed by perf tool.

Current code doesn't access all LBR registers. It only read the ones
below the TOS. The LBR registers above the TOS will be discarded
unconditionally.

When a CALL is captured, the TOS is incremented by 1 , modulo max LBR
stack size. The LBR HW only records the call stack information to the
register which the TOS points to. It will not touch other LBR
registers. So the registers above the TOS probably still store the valid
call stack information for an overflowed call stack, which need to be
reported.

To retrieve complete call stack information, we need to start from TOS,
read all LBR registers until an invalid entry is detected.
0s can be used to detect the invalid entry, because:

 - When a RET is captured, the HW zeros the LBR register which TOS points
   to, then decreases the TOS.
 - The LBR registers are reset to 0 when adding a new LBR event or
   scheduling an existing LBR event.
 - A taken branch at IP 0 is not expected

The context switch code is also modified to save/restore all valid LBR
registers. Furthermore, the LBR registers, which don't have valid call
stack information, need to be reset in restore, because they may be
polluted while swapped out.

Here is a small test program, tchain_deep.
Its call stack is deeper than 32.

 noinline void f33(void)
 {
int i;

for (i = 0; i < 1000;) {
if (i%2)
i++;
else
i++;
}
 }

 noinline void f32(void)
 {
f33();
 }

 noinline void f31(void)
 {
f32();
 }

 ... ...

 noinline void f1(void)
 {
f2();
 }

 int main()
 {
f1();
 }

Here is the test result on SKX. The max stack size of SKX is 32.

Without the patch:

 $ perf record -e cycles --call-graph lbr -- ./tchain_deep
 $ perf report --stdio
 #
 # Children  Self  Command  Shared Object Symbol
 #     ...    .
 #
   100.00%99.99%  tchain_deeptchain_deep   [.] f33
|
 --99.99%--f30
   f31
   f32
   f33

With the patch:

 $ perf record -e cycles --call-graph lbr -- ./tchain_deep
 $ perf report --stdio
 # Children  Self  Command  Shared Object Symbol
 #     ...    ..
 #
99.99% 0.00%  tchain_deeptchain_deep   [.] f1
|
---f1
   f2
   f3
   f4
   f5
   f6
   f7
   f8
   f9
   f10
   f11
   f12
   f13
   f14
   f15
   f16
   f17
   f18
   f19
   f20
   f21
   f22
   f23
   f24
   f25
   f26
   f27
   f28
   f29
   f30
   f31
   f32
   f33

Signed-off-by: Kan Liang 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Peter Zijlstra 
Cc: Arnaldo Carvalho de Melo 
Cc: Jiri Olsa 
Cc: Stephane Eranian 
Cc: Vince Weaver 
Cc: Alexander Shishkin 
Cc: Thomas Gleixner 
Cc: a...@kernel.org
Cc: eran...@google.com
Link: 
https://lore.kernel.org/lkml/1528213126-4312-1-git-send-email-kan.li...@linux.intel.com
Signed-off-by: Ingo Molnar 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/x86/events/intel/lbr.c  |   32 ++--
 arch/x86/events/perf_event.h |1 +
 2 files changed, 27 insertions(+), 6 deletions(-)

--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -346,7 +346,7 @@ static void __intel_pmu_lbr_restore(stru
 
mask = x86_pmu.lbr_nr - 1;
tos = task_ctx->tos;
-   for (i = 0; i < tos; i++) {
+   for (i = 0; i < task_ctx->valid_lbrs; i++) {
lbr_idx = (tos - i) & mask;
wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
wrlbr_to  (lbr_idx, task_ctx->lbr_to[i]);
@@ -354,6 +354,15 @@ static void __intel_pmu_lbr_restore(stru
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
+
+   for (; i < x86_pmu.lbr_nr; i++) {
+   lbr_idx = (tos - i) & mask;
+   wrlbr_from(lbr_idx, 0);
+   wrlbr_to(lbr_idx, 0);
+   if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
+   wrmsrl(MSR_LBR_INFO_0 + 

[PATCH 4.14 075/137] module: exclude SHN_UNDEF symbols from kallsyms api

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Jessica Yu 

[ Upstream commit 9f2d1e68cf4d641def734adaccfc3823d3575e6c ]

Livepatch modules are special in that we preserve their entire symbol
tables in order to be able to apply relocations after module load. The
unwanted side effect of this is that undefined (SHN_UNDEF) symbols of
livepatch modules are accessible via the kallsyms api and this can
confuse symbol resolution in livepatch (klp_find_object_symbol()) and
cause subtle bugs in livepatch.

Have the module kallsyms api skip over SHN_UNDEF symbols. These symbols
are usually not available for normal modules anyway as we cut down their
symbol tables to just the core (non-undefined) symbols, so this should
really just affect livepatch modules. Note that this patch doesn't
affect the display of undefined symbols in /proc/kallsyms.

Reported-by: Josh Poimboeuf 
Tested-by: Josh Poimboeuf 
Reviewed-by: Josh Poimboeuf 
Signed-off-by: Jessica Yu 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 kernel/module.c |6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

--- a/kernel/module.c
+++ b/kernel/module.c
@@ -4058,7 +4058,7 @@ static unsigned long mod_find_symname(st
 
for (i = 0; i < kallsyms->num_symtab; i++)
if (strcmp(name, symname(kallsyms, i)) == 0 &&
-   kallsyms->symtab[i].st_info != 'U')
+   kallsyms->symtab[i].st_shndx != SHN_UNDEF)
return kallsyms->symtab[i].st_value;
return 0;
 }
@@ -4104,6 +4104,10 @@ int module_kallsyms_on_each_symbol(int (
if (mod->state == MODULE_STATE_UNFORMED)
continue;
for (i = 0; i < kallsyms->num_symtab; i++) {
+
+   if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
+   continue;
+
ret = fn(data, symname(kallsyms, i),
 mod, kallsyms->symtab[i].st_value);
if (ret != 0)




[PATCH 4.14 077/137] nfsd: fix corrupted reply to badly ordered compound

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: "J. Bruce Fields" 

[ Upstream commit 5b7b15aee641904ae269be9846610a3950cbd64c ]

We're encoding a single op in the reply but leaving the number of ops
zero, so the reply makes no sense.

Somewhat academic as this isn't a case any real client will hit, though
in theory perhaps that could change in a future protocol extension.

Reviewed-by: Jeff Layton 
Signed-off-by: J. Bruce Fields 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 fs/nfsd/nfs4proc.c |1 +
 1 file changed, 1 insertion(+)

--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1725,6 +1725,7 @@ nfsd4_proc_compound(struct svc_rqst *rqs
if (status) {
op = >ops[0];
op->status = status;
+   resp->opcnt = 1;
goto encode_op;
}
 




[PATCH 4.18 220/228] arm/arm64: smccc-1.1: Make return values unsigned long

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Marc Zyngier 

[ Upstream commit 1d8f574708a3fb6f18c85486d0c5217df893c0cf ]

An unfortunate consequence of having a strong typing for the input
values to the SMC call is that it also affects the type of the
return values, limiting r0 to 32 bits and r{1,2,3} to whatever
was passed as an input.

Let's turn everything into "unsigned long", which satisfies the
requirements of both architectures, and allows for the full
range of return values.

Reported-by: Julien Grall 
Signed-off-by: Marc Zyngier 
Signed-off-by: Will Deacon 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 include/linux/arm-smccc.h |   20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)

--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -199,31 +199,31 @@ asmlinkage void __arm_smccc_hvc(unsigned
 
 #define __declare_arg_0(a0, res)   \
struct arm_smccc_res   *___res = res;   \
-   register u32   r0 asm("r0") = a0;   \
+   register unsigned long r0 asm("r0") = (u32)a0;  \
register unsigned long r1 asm("r1");\
register unsigned long r2 asm("r2");\
register unsigned long r3 asm("r3")
 
 #define __declare_arg_1(a0, a1, res)   \
struct arm_smccc_res   *___res = res;   \
-   register u32   r0 asm("r0") = a0;   \
-   register typeof(a1)r1 asm("r1") = a1;   \
+   register unsigned long r0 asm("r0") = (u32)a0;  \
+   register unsigned long r1 asm("r1") = a1;   \
register unsigned long r2 asm("r2");\
register unsigned long r3 asm("r3")
 
 #define __declare_arg_2(a0, a1, a2, res)   \
struct arm_smccc_res   *___res = res;   \
-   register u32   r0 asm("r0") = a0;   \
-   register typeof(a1)r1 asm("r1") = a1;   \
-   register typeof(a2)r2 asm("r2") = a2;   \
+   register unsigned long r0 asm("r0") = (u32)a0;  \
+   register unsigned long r1 asm("r1") = a1;   \
+   register unsigned long r2 asm("r2") = a2;   \
register unsigned long r3 asm("r3")
 
 #define __declare_arg_3(a0, a1, a2, a3, res)   \
struct arm_smccc_res   *___res = res;   \
-   register u32   r0 asm("r0") = a0;   \
-   register typeof(a1)r1 asm("r1") = a1;   \
-   register typeof(a2)r2 asm("r2") = a2;   \
-   register typeof(a3)r3 asm("r3") = a3
+   register unsigned long r0 asm("r0") = (u32)a0;  \
+   register unsigned long r1 asm("r1") = a1;   \
+   register unsigned long r2 asm("r2") = a2;   \
+   register unsigned long r3 asm("r3") = a3
 
 #define __declare_arg_4(a0, a1, a2, a3, a4, res)   \
__declare_arg_3(a0, a1, a2, a3, res);   \




[PATCH 4.14 017/137] s390/mm: correct allocate_pgste proc_handler callback

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Vasily Gorbik 

[ Upstream commit 5bedf8aa03c28cb8dc98bdd32a41b66d8f7d3eaa ]

Since proc_dointvec does not perform value range control,
proc_dointvec_minmax should be used to limit value range, which is
clearly intended here, as the internal representation of the value:

unsigned int alloc_pgste:1;

In fact it currently works, since we have

  mm->context.alloc_pgste = page_table_allocate_pgste || ...

... since commit 23fefe119ceb5 ("s390/kvm: avoid global config of 
vm.alloc_pgste=1")

Before that it was

   mm->context.alloc_pgste = page_table_allocate_pgste;

which was broken. That was introduced with commit 0b46e0a3ec0d7 ("s390/kvm:
remove delayed reallocation of page tables for KVM").

Fixes: 0b46e0a3ec0d7 ("s390/kvm: remove delayed reallocation of page tables for 
KVM")
Acked-by: Christian Borntraeger 
Reviewed-by: Heiko Carstens 
Signed-off-by: Vasily Gorbik 
Signed-off-by: Martin Schwidefsky 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/s390/mm/pgalloc.c |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -27,7 +27,7 @@ static struct ctl_table page_table_sysct
.data   = _table_allocate_pgste,
.maxlen = sizeof(int),
.mode   = S_IRUGO | S_IWUSR,
-   .proc_handler   = proc_dointvec,
+   .proc_handler   = proc_dointvec_minmax,
.extra1 = _table_allocate_pgste_min,
.extra2 = _table_allocate_pgste_max,
},




[PATCH 4.14 020/137] RDMA/bnxt_re: Fix a couple off by one bugs

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Dan Carpenter 

[ Upstream commit 474e5a86067e5f12c97d1db8b170c7f45b53097a ]

The sgid_tbl->tbl[] array is allocated in bnxt_qplib_alloc_sgid_tbl().
It has sgid_tbl->max elements.  So the > should be >= to prevent
accessing one element beyond the end of the array.

Fixes: 1ac5a4047975 ("RDMA/bnxt_re: Add bnxt_re RoCE driver")
Signed-off-by: Dan Carpenter 
Acked-by: Selvin Xavier 
Signed-off-by: Jason Gunthorpe 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/infiniband/hw/bnxt_re/qplib_sp.c |4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -156,7 +156,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qpli
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid)
 {
-   if (index > sgid_tbl->max) {
+   if (index >= sgid_tbl->max) {
dev_err(>pdev->dev,
"QPLIB: Index %d exceeded SGID table max (%d)",
index, sgid_tbl->max);
@@ -361,7 +361,7 @@ int bnxt_qplib_get_pkey(struct bnxt_qpli
*pkey = 0x;
return 0;
}
-   if (index > pkey_tbl->max) {
+   if (index >= pkey_tbl->max) {
dev_err(>pdev->dev,
"QPLIB: Index %d exceeded PKEY table max (%d)",
index, pkey_tbl->max);




[PATCH 4.14 042/137] s390/dasd: correct numa_node in dasd_alloc_queue

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Vasily Gorbik 

[ Upstream commit b17e3abb0af404cb62ad4ef1a5962f58b06e2b78 ]

The numa_node field of the tag_set struct has to be explicitly
initialized, otherwise it stays as 0, which is a valid numa node id and
cause memory allocation failure if node 0 is offline.

Acked-by: Stefan Haberland 
Signed-off-by: Vasily Gorbik 
Signed-off-by: Martin Schwidefsky 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/s390/block/dasd.c |1 +
 1 file changed, 1 insertion(+)

--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3190,6 +3190,7 @@ static int dasd_alloc_queue(struct dasd_
block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+   block->tag_set.numa_node = NUMA_NO_NODE;
 
rc = blk_mq_alloc_tag_set(>tag_set);
if (rc)




[PATCH 4.14 034/137] scsi: ibmvscsi: Improve strings handling

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Breno Leitao 

[ Upstream commit 1262dc09dc9ae7bf4ad00b6a2c5ed6a6936bcd10 ]

Currently an open firmware property is copied into partition_name variable
without keeping a room for \0.

Later one, this variable (partition_name), which is 97 bytes long, is
strncpyed into ibmvcsci_host_data->madapter_info->partition_name, which is
96 bytes long, possibly truncating it 'again' and removing the \0.

This patch simply decreases the partition name to 96 and just copy using
strlcpy() which guarantees that the string is \0 terminated. I think there
is no issue if this there is a truncation in this very first copy, i.e,
when the open firmware property is read and copied into the driver for the
very first time;

This issue also causes the following warning on GCC 8:

drivers/scsi/ibmvscsi/ibmvscsi.c:281:2: warning:  strncpy  output may 
be truncated copying 96 bytes from a string of length 96 [-Wstringop-truncation]
...
inlined from  ibmvscsi_probe  at 
drivers/scsi/ibmvscsi/ibmvscsi.c:2221:7:
drivers/scsi/ibmvscsi/ibmvscsi.c:265:3: warning:  strncpy  specified 
bound 97 equals destination size [-Wstringop-truncation]

CC: Bart Van Assche 
CC: Tyrel Datwyler 
Signed-off-by: Breno Leitao 
Acked-by: Tyrel Datwyler 
Signed-off-by: Martin K. Petersen 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/scsi/ibmvscsi/ibmvscsi.c |4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -93,7 +93,7 @@ static int max_requests = IBMVSCSI_MAX_R
 static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
 static int fast_fail = 1;
 static int client_reserve = 1;
-static char partition_name[97] = "UNKNOWN";
+static char partition_name[96] = "UNKNOWN";
 static unsigned int partition_number = -1;
 static LIST_HEAD(ibmvscsi_head);
 
@@ -262,7 +262,7 @@ static void gather_partition_info(void)
 
ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL);
if (ppartition_name)
-   strncpy(partition_name, ppartition_name,
+   strlcpy(partition_name, ppartition_name,
sizeof(partition_name));
p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL);
if (p_number_ptr)




[PATCH 4.14 031/137] ARM: dts: ls1021a: Add missing cooling device properties for CPUs

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Viresh Kumar 

[ Upstream commit 47768f372eae030db6fab5225f9504a820d2c07f ]

The cooling device properties, like "#cooling-cells" and
"dynamic-power-coefficient", should either be present for all the CPUs
of a cluster or none. If these are present only for a subset of CPUs of
a cluster then things will start falling apart as soon as the CPUs are
brought online in a different order. For example, this will happen
because the operating system looks for such properties in the CPU node
it is trying to bring up, so that it can register a cooling device.

Add such missing properties.

Signed-off-by: Viresh Kumar 
Signed-off-by: Shawn Guo 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/arm/boot/dts/ls1021a.dtsi |1 +
 1 file changed, 1 insertion(+)

--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -84,6 +84,7 @@
device_type = "cpu";
reg = <0xf01>;
clocks = < 1 0>;
+   #cooling-cells = <2>;
};
};
 




[PATCH 4.14 066/137] arm: dts: mediatek: Add missing cooling device properties for CPUs

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Viresh Kumar 

[ Upstream commit 0c7f7a5150023f3c6f0b27c4d4940ce3dfaf62cc ]

The cooling device properties, like "#cooling-cells" and
"dynamic-power-coefficient", should either be present for all the CPUs
of a cluster or none. If these are present only for a subset of CPUs of
a cluster then things will start falling apart as soon as the CPUs are
brought online in a different order. For example, this will happen
because the operating system looks for such properties in the CPU node
it is trying to bring up, so that it can register a cooling device.

Add such missing properties.

Signed-off-by: Viresh Kumar 
Signed-off-by: Matthias Brugger 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/arm/boot/dts/mt7623.dtsi |3 +++
 1 file changed, 3 insertions(+)

--- a/arch/arm/boot/dts/mt7623.dtsi
+++ b/arch/arm/boot/dts/mt7623.dtsi
@@ -98,6 +98,7 @@
compatible = "arm,cortex-a7";
reg = <0x1>;
operating-points-v2 = <_opp_table>;
+   #cooling-cells = <2>;
clock-frequency = <13>;
};
 
@@ -106,6 +107,7 @@
compatible = "arm,cortex-a7";
reg = <0x2>;
operating-points-v2 = <_opp_table>;
+   #cooling-cells = <2>;
clock-frequency = <13>;
};
 
@@ -114,6 +116,7 @@
compatible = "arm,cortex-a7";
reg = <0x3>;
operating-points-v2 = <_opp_table>;
+   #cooling-cells = <2>;
clock-frequency = <13>;
};
};




[PATCH 4.14 008/137] misc: sram: enable clock before registering regions

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Johan Hovold 

[ Upstream commit d5b9653dd2bb7a2b1c8cc783c5d3b607bbb6b271 ]

Make sure to enable the clock before registering regions and exporting
partitions to user space at which point we must be prepared for I/O.

Fixes: ee895ccdf776 ("misc: sram: fix enabled clock leak on error path")
Signed-off-by: Johan Hovold 
Reviewed-by: Vladimir Zapolskiy 
Signed-off-by: Greg Kroah-Hartman 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/misc/sram.c |   13 +++--
 1 file changed, 7 insertions(+), 6 deletions(-)

--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -391,23 +391,23 @@ static int sram_probe(struct platform_de
if (IS_ERR(sram->pool))
return PTR_ERR(sram->pool);
 
-   ret = sram_reserve_regions(sram, res);
-   if (ret)
-   return ret;
-
sram->clk = devm_clk_get(sram->dev, NULL);
if (IS_ERR(sram->clk))
sram->clk = NULL;
else
clk_prepare_enable(sram->clk);
 
+   ret = sram_reserve_regions(sram, res);
+   if (ret)
+   goto err_disable_clk;
+
platform_set_drvdata(pdev, sram);
 
init_func = of_device_get_match_data(>dev);
if (init_func) {
ret = init_func();
if (ret)
-   goto err_disable_clk;
+   goto err_free_partitions;
}
 
dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
@@ -415,10 +415,11 @@ static int sram_probe(struct platform_de
 
return 0;
 
+err_free_partitions:
+   sram_free_partitions(sram);
 err_disable_clk:
if (sram->clk)
clk_disable_unprepare(sram->clk);
-   sram_free_partitions(sram);
 
return ret;
 }




[PATCH 4.14 045/137] mtd: rawnand: atmel: add module param to avoid using dma

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Peter Rosin 

[ Upstream commit efc6362c6f8c1e74b340e2611f1b35e7d557ce7b ]

On a sama5d31 with a Full-HD dual LVDS panel (132MHz pixel clock) NAND
flash accesses have a tendency to cause display disturbances. Add a
module param to disable DMA from the NAND controller, since that fixes
the display problem for me.

Signed-off-by: Peter Rosin 
Reviewed-by: Boris Brezillon 
Signed-off-by: Miquel Raynal 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/mtd/nand/atmel/nand-controller.c |7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -129,6 +129,11 @@
 #define DEFAULT_TIMEOUT_MS 1000
 #define MIN_DMA_LEN128
 
+static bool atmel_nand_avoid_dma __read_mostly;
+
+MODULE_PARM_DESC(avoiddma, "Avoid using DMA");
+module_param_named(avoiddma, atmel_nand_avoid_dma, bool, 0400);
+
 enum atmel_nand_rb_type {
ATMEL_NAND_NO_RB,
ATMEL_NAND_NATIVE_RB,
@@ -1975,7 +1980,7 @@ static int atmel_nand_controller_init(st
return ret;
}
 
-   if (nc->caps->has_dma) {
+   if (nc->caps->has_dma && !atmel_nand_avoid_dma) {
dma_cap_mask_t mask;
 
dma_cap_zero(mask);




[PATCH 4.14 092/137] spi: rspi: Fix interrupted DMA transfers

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Geert Uytterhoeven 

commit 8dbbaa47b96f6ea5f09f922b4e3c505cd8cf upstream.

When interrupted, wait_event_interruptible_timeout() returns
-ERESTARTSYS, and the SPI transfer in progress will fail, as expected:

m25p80 spi0.0: SPI transfer failed: -512
spi_master spi0: failed to transfer one message from queue

However, as the underlying DMA transfers may not have completed, all
subsequent SPI transfers may start to fail:

spi_master spi0: receive timeout
qspi_transfer_out_in() returned -110
m25p80 spi0.0: SPI transfer failed: -110
spi_master spi0: failed to transfer one message from queue

Fix this by calling dmaengine_terminate_all() not only for timeouts, but
also for errors.

This can be reproduced on r8a7991/koelsch, using "hd /dev/mtd0" followed
by CTRL-C.

Signed-off-by: Geert Uytterhoeven 
Signed-off-by: Mark Brown 
Cc: sta...@vger.kernel.org
Signed-off-by: Greg Kroah-Hartman 

---
 drivers/spi/spi-rspi.c |   10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)

--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -598,11 +598,13 @@ static int rspi_dma_transfer(struct rspi
 
ret = wait_event_interruptible_timeout(rspi->wait,
   rspi->dma_callbacked, HZ);
-   if (ret > 0 && rspi->dma_callbacked)
+   if (ret > 0 && rspi->dma_callbacked) {
ret = 0;
-   else if (!ret) {
-   dev_err(>master->dev, "DMA timeout\n");
-   ret = -ETIMEDOUT;
+   } else {
+   if (!ret) {
+   dev_err(>master->dev, "DMA timeout\n");
+   ret = -ETIMEDOUT;
+   }
if (tx)
dmaengine_terminate_all(rspi->master->dma_tx);
if (rx)




[PATCH 4.14 061/137] ath10k: transmit queued frames after processing rx packets

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Niklas Cassel 

[ Upstream commit 3f04950f32d5d592ab4fcaecac2178558a6f7437 ]

When running iperf on ath10k SDIO, TX can stop working:

iperf -c 192.168.1.1 -i 1 -t 20 -w 10K
[  3]  0.0- 1.0 sec  2.00 MBytes  16.8 Mbits/sec
[  3]  1.0- 2.0 sec  3.12 MBytes  26.2 Mbits/sec
[  3]  2.0- 3.0 sec  3.25 MBytes  27.3 Mbits/sec
[  3]  3.0- 4.0 sec   655 KBytes  5.36 Mbits/sec
[  3]  4.0- 5.0 sec  0.00 Bytes  0.00 bits/sec
[  3]  5.0- 6.0 sec  0.00 Bytes  0.00 bits/sec
[  3]  6.0- 7.0 sec  0.00 Bytes  0.00 bits/sec
[  3]  7.0- 8.0 sec  0.00 Bytes  0.00 bits/sec
[  3]  8.0- 9.0 sec  0.00 Bytes  0.00 bits/sec
[  3]  9.0-10.0 sec  0.00 Bytes  0.00 bits/sec
[  3]  0.0-10.3 sec  9.01 MBytes  7.32 Mbits/sec

There are frames in the ieee80211_txq and there are frames that have
been removed from from this queue, but haven't yet been sent on the wire
(num_pending_tx).

When num_pending_tx reaches max_num_pending_tx, we will stop the queues
by calling ieee80211_stop_queues().

As frames that have previously been sent for transmission
(num_pending_tx) are completed, we will decrease num_pending_tx and wake
the queues by calling ieee80211_wake_queue(). ieee80211_wake_queue()
does not call wake_tx_queue, so we might still have frames in the
queue at this point.

While the queues were stopped, the socket buffer might have filled up,
and in order for user space to write more, we need to free the frames
in the queue, since they are accounted to the socket. In order to free
them, we first need to transmit them.

This problem cannot be reproduced on low-latency devices, e.g. pci,
since they call ath10k_mac_tx_push_pending() from
ath10k_htt_txrx_compl_task(). ath10k_htt_txrx_compl_task() is not called
on high-latency devices.
Fix the problem by calling ath10k_mac_tx_push_pending(), after
processing rx packets, just like for low-latency devices, also in the
SDIO case. Since we are calling ath10k_mac_tx_push_pending() directly,
we also need to export it.

Signed-off-by: Niklas Cassel 
Signed-off-by: Kalle Valo 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/net/wireless/ath/ath10k/mac.c  |1 +
 drivers/net/wireless/ath/ath10k/sdio.c |3 +++
 2 files changed, 4 insertions(+)

--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -4015,6 +4015,7 @@ void ath10k_mac_tx_push_pending(struct a
rcu_read_unlock();
spin_unlock_bh(>txqs_lock);
 }
+EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
 
 //
 /* Scanning */
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -30,6 +30,7 @@
 #include "debug.h"
 #include "hif.h"
 #include "htc.h"
+#include "mac.h"
 #include "targaddrs.h"
 #include "trace.h"
 #include "sdio.h"
@@ -1346,6 +1347,8 @@ static void ath10k_sdio_irq_handler(stru
break;
} while (time_before(jiffies, timeout) && !done);
 
+   ath10k_mac_tx_push_pending(ar);
+
sdio_claim_host(ar_sdio->func);
 
if (ret && ret != -ECANCELED)




[PATCH 4.14 091/137] spi: rspi: Fix invalid SPI use during system suspend

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Geert Uytterhoeven 

commit c1ca59c22c56930b377a665fdd1b43351887830b upstream.

If the SPI queue is running during system suspend, the system may lock
up.

Fix this by stopping/restarting the queue during system suspend/resume,
by calling spi_master_suspend()/spi_master_resume() from the PM
callbacks.  In-kernel users will receive an -ESHUTDOWN error while
system suspend/resume is in progress.

Based on a patch for sh-msiof by Gaku Inami.

Signed-off-by: Geert Uytterhoeven 
Signed-off-by: Mark Brown 
Cc: sta...@vger.kernel.org
Signed-off-by: Greg Kroah-Hartman 

---
 drivers/spi/spi-rspi.c |   24 
 1 file changed, 24 insertions(+)

--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1352,12 +1352,36 @@ static const struct platform_device_id s
 
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int rspi_suspend(struct device *dev)
+{
+   struct platform_device *pdev = to_platform_device(dev);
+   struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+   return spi_master_suspend(rspi->master);
+}
+
+static int rspi_resume(struct device *dev)
+{
+   struct platform_device *pdev = to_platform_device(dev);
+   struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+   return spi_master_resume(rspi->master);
+}
+
+static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
+#define DEV_PM_OPS _pm_ops
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver rspi_driver = {
.probe =rspi_probe,
.remove =   rspi_remove,
.id_table = spi_driver_ids,
.driver = {
.name = "renesas_spi",
+   .pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(rspi_of_match),
},
 };




[PATCH 4.14 083/137] floppy: Do not copy a kernel pointer to user memory in FDGETPRM ioctl

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Andy Whitcroft 

commit 65eea8edc315589d6c993cf12dbb5d0e9ef1fe4e upstream.

The final field of a floppy_struct is the field "name", which is a pointer
to a string in kernel memory.  The kernel pointer should not be copied to
user memory.  The FDGETPRM ioctl copies a floppy_struct to user memory,
including this "name" field.  This pointer cannot be used by the user
and it will leak a kernel address to user-space, which will reveal the
location of kernel code and data and undermine KASLR protection.

Model this code after the compat ioctl which copies the returned data
to a previously cleared temporary structure on the stack (excluding the
name pointer) and copy out to userspace from there.  As we already have
an inparam union with an appropriate member and that memory is already
cleared even for read only calls make use of that as a temporary store.

Based on an initial patch by Brian Belleville.

CVE-2018-7755
Signed-off-by: Andy Whitcroft 
Broke up long line.
Signed-off-by: Jens Axboe 
Signed-off-by: Greg Kroah-Hartman 

---
 drivers/block/floppy.c |3 +++
 1 file changed, 3 insertions(+)

--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3462,6 +3462,9 @@ static int fd_locked_ioctl(struct block_
  (struct floppy_struct **));
if (ret)
return ret;
+   memcpy(, outparam,
+   offsetof(struct floppy_struct, name));
+   outparam = 
break;
case FDMSGON:
UDP->flags |= FTD_MSG;




[PATCH 4.14 059/137] net: phy: xgmiitorgmii: Check phy_driver ready before accessing

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Brandon Maier 

[ Upstream commit ab4e6ee578e88a659938db8fbf33720bc048d29c ]

Since a phy_device is added to the global mdio_bus list during
phy_device_register(), but a phy_device's phy_driver doesn't get
attached until phy_probe(). It's possible of_phy_find_device() in
xgmiitorgmii will return a valid phy with a NULL phy_driver. Leading to
a NULL pointer access during the memcpy().

Fixes this Oops:

Unable to handle kernel NULL pointer dereference at virtual address 
pgd = c0004000
[] *pgd=
Internal error: Oops: 5 [#1] PREEMPT SMP ARM
Modules linked in:
CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.14.40 #1
Hardware name: Xilinx Zynq Platform
task: ce4c8d00 task.stack: ce4ca000
PC is at memcpy+0x48/0x330
LR is at xgmiitorgmii_probe+0x90/0xe8
pc : []lr : []psr: 2013
sp : ce4cbb54  ip :   fp : ce4cbb8c
r10:   r9 :   r8 : c0c49178
r7 :   r6 : cdc14718  r5 : ce762800  r4 : cdc14710
r3 :   r2 : 0054  r1 :   r0 : cdc14718
Flags: nzCv  IRQs on  FIQs on  Mode SVC_32  ISA ARM  Segment none
Control: 18c5387d  Table: 404a  DAC: 0051
Process swapper/0 (pid: 1, stack limit = 0xce4ca210)
...
[] (memcpy) from [] (xgmiitorgmii_probe+0x90/0xe8)
[] (xgmiitorgmii_probe) from [] (mdio_probe+0x28/0x34)
[] (mdio_probe) from [] (driver_probe_device+0x254/0x414)
[] (driver_probe_device) from [] 
(__device_attach_driver+0xac/0x10c)
[] (__device_attach_driver) from [] 
(bus_for_each_drv+0x84/0xc8)
[] (bus_for_each_drv) from [] (__device_attach+0xd0/0x134)
[] (__device_attach) from [] 
(device_initial_probe+0x1c/0x20)
[] (device_initial_probe) from [] 
(bus_probe_device+0x98/0xa0)
[] (bus_probe_device) from [] (device_add+0x43c/0x5d0)
[] (device_add) from [] (mdio_device_register+0x34/0x80)
[] (mdio_device_register) from [] 
(of_mdiobus_register+0x170/0x30c)
[] (of_mdiobus_register) from [] (macb_probe+0x710/0xc00)
[] (macb_probe) from [] (platform_drv_probe+0x44/0x80)
[] (platform_drv_probe) from [] 
(driver_probe_device+0x254/0x414)
[] (driver_probe_device) from [] 
(__driver_attach+0x10c/0x118)
[] (__driver_attach) from [] (bus_for_each_dev+0x8c/0xd0)
[] (bus_for_each_dev) from [] (driver_attach+0x2c/0x30)
[] (driver_attach) from [] (bus_add_driver+0x50/0x260)
[] (bus_add_driver) from [] (driver_register+0x88/0x108)
[] (driver_register) from [] 
(__platform_driver_register+0x50/0x58)
[] (__platform_driver_register) from [] 
(macb_driver_init+0x24/0x28)
[] (macb_driver_init) from [] (do_one_initcall+0x60/0x1a4)
[] (do_one_initcall) from [] 
(kernel_init_freeable+0x15c/0x1f8)
[] (kernel_init_freeable) from [] (kernel_init+0x18/0x124)
[] (kernel_init) from [] (ret_from_fork+0x14/0x20)
Code: ba02 f5d1f03c f5d1f05c f5d1f07c (e8b151f8)
---[ end trace 3e4ec21905820a1f ]---

Signed-off-by: Brandon Maier 
Reviewed-by: Andrew Lunn 
Reviewed-by: Florian Fainelli 

Signed-off-by: David S. Miller 

Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/net/phy/xilinx_gmii2rgmii.c |5 +
 1 file changed, 5 insertions(+)

--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -84,6 +84,11 @@ static int xgmiitorgmii_probe(struct mdi
return -EPROBE_DEFER;
}
 
+   if (!priv->phy_dev->drv) {
+   dev_info(dev, "Attached phy not ready\n");
+   return -EPROBE_DEFER;
+   }
+
priv->addr = mdiodev->addr;
priv->phy_drv = priv->phy_dev->drv;
memcpy(>conv_phy_drv, priv->phy_dev->drv,




[PATCH 4.14 028/137] x86/tsc: Add missing header to tsc_msr.c

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Andy Shevchenko 

[ Upstream commit dbd0fbc76c77daac08ddd245afdcbade0d506e19 ]

Add a missing header otherwise compiler warns about missed prototype:

CC  arch/x86/kernel/tsc_msr.o
arch/x86/kernel/tsc_msr.c:73:15: warning: no previous prototype for 
‘cpu_khz_from_msr’ [-Wmissing-prototypes]
   unsigned long cpu_khz_from_msr(void)
 ^~~~

Signed-off-by: Andy Shevchenko 
Signed-off-by: Thomas Gleixner 
Cc: "H. Peter Anvin" 
Cc: Pavel Tatashin 
Link: 
https://lkml.kernel.org/r/20180629193113.84425-4-andriy.shevche...@linux.intel.com
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/x86/kernel/tsc_msr.c |1 +
 1 file changed, 1 insertion(+)

--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -12,6 +12,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define MAX_NUM_FREQS  9
 




[PATCH 4.14 094/137] USB: fix error handling in usb_driver_claim_interface()

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Alan Stern 

commit bd729f9d67aa9a303d8925bb8c4f06af25f407d1 upstream.

The syzbot fuzzing project found a use-after-free bug in the USB
core.  The bug was caused by usbfs not unbinding from an interface
when the USB device file was closed, which led another process to
attempt the unbind later on, after the private data structure had been
deallocated.

The reason usbfs did not unbind the interface at the appropriate time
was because it thought the interface had never been claimed in the
first place.  This was caused by the fact that
usb_driver_claim_interface() does not clean up properly when
device_bind_driver() returns an error.  Although the error code gets
passed back to the caller, the iface->dev.driver pointer remains set
and iface->condition remains equal to USB_INTERFACE_BOUND.

This patch adds proper error handling to usb_driver_claim_interface().

Signed-off-by: Alan Stern 
Reported-by: syzbot+f84aa7209ccec8295...@syzkaller.appspotmail.com
CC: 
Signed-off-by: Greg Kroah-Hartman 

---
 drivers/usb/core/driver.c |   15 +++
 1 file changed, 15 insertions(+)

--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -565,6 +565,21 @@ int usb_driver_claim_interface(struct us
if (!lpm_disable_error)
usb_unlocked_enable_lpm(udev);
 
+   if (retval) {
+   dev->driver = NULL;
+   usb_set_intfdata(iface, NULL);
+   iface->needs_remote_wakeup = 0;
+   iface->condition = USB_INTERFACE_UNBOUND;
+
+   /*
+* Unbound interfaces are always runtime-PM-disabled
+* and runtime-PM-suspended
+*/
+   if (driver->supports_autosuspend)
+   pm_runtime_disable(dev);
+   pm_runtime_set_suspended(dev);
+   }
+
return retval;
 }
 EXPORT_SYMBOL_GPL(usb_driver_claim_interface);




[PATCH 4.14 081/137] iio: 104-quad-8: Fix off-by-one error in register selection

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: William Breathitt Gray 

[ Upstream commit 2873c3f0e2bd12a7612e905c920c058855f4072a ]

The reset flags operation is selected by bit 2 in the "Reset and Load
Signals Decoders" register, not bit 1.

Fixes: 28e5d3bb0325 ("iio: 104-quad-8: Add IIO support for the ACCES 
104-QUAD-8")
Signed-off-by: William Breathitt Gray 
Signed-off-by: Jonathan Cameron 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/iio/counter/104-quad-8.c |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

--- a/drivers/iio/counter/104-quad-8.c
+++ b/drivers/iio/counter/104-quad-8.c
@@ -138,7 +138,7 @@ static int quad8_write_raw(struct iio_de
outb(val >> (8 * i), base_offset);
 
/* Reset Borrow, Carry, Compare, and Sign flags */
-   outb(0x02, base_offset + 1);
+   outb(0x04, base_offset + 1);
/* Reset Error flag */
outb(0x06, base_offset + 1);
 




[PATCH 4.14 062/137] rndis_wlan: potential buffer overflow in rndis_wlan_auth_indication()

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Dan Carpenter 

[ Upstream commit ae636fb1554833ee5133ca47bf4b2791b6739c52 ]

This is a static checker fix, not something I have tested.  The issue
is that on the second iteration through the loop, we jump forward by
le32_to_cpu(auth_req->length) bytes.  The problem is that if the length
is more than "buflen" then we end up with a negative "buflen".  A
negative buflen is type promoted to a high positive value and the loop
continues but it's accessing beyond the end of the buffer.

I believe the "auth_req->length" comes from the firmware and if the
firmware is malicious or buggy, you're already toasted so the impact of
this bug is probably not very severe.

Fixes: 030645aceb3d ("rndis_wlan: handle 802.11 indications from device")
Signed-off-by: Dan Carpenter 
Signed-off-by: Kalle Valo 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/net/wireless/rndis_wlan.c |2 ++
 1 file changed, 2 insertions(+)

--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2928,6 +2928,8 @@ static void rndis_wlan_auth_indication(s
 
while (buflen >= sizeof(*auth_req)) {
auth_req = (void *)buf;
+   if (buflen < le32_to_cpu(auth_req->length))
+   return;
type = "unknown";
flags = le32_to_cpu(auth_req->flags);
pairwise_error = false;




[PATCH 4.14 056/137] ALSA: hda: Add AZX_DCAPS_PM_RUNTIME for AMD Raven Ridge

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Kai-Heng Feng 

[ Upstream commit 1adca4b0cd65c14cb8b8c9c257720385869c3d5f ]

This patch can make audio controller in AMD Raven Ridge gets runtime
suspended to D3, to save ~1W power when it's not in use.

Cc: Vijendar Mukunda 
Signed-off-by: Kai-Heng Feng 
Signed-off-by: Takashi Iwai 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 sound/pci/hda/hda_intel.c |3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2510,7 +2510,8 @@ static const struct pci_device_id azx_id
  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
/* AMD Raven */
{ PCI_DEVICE(0x1022, 0x15e3),
- .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+AZX_DCAPS_PM_RUNTIME },
/* ATI HDMI */
{ PCI_DEVICE(0x1002, 0x0002),
  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },




[PATCH 4.14 126/137] hwmon: (ina2xx) fix sysfs shunt resistor read access

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Lothar Felten 

[ Upstream commit 3ad867001c91657c46dcf6656d52eb6080286fd5 ]

fix the sysfs shunt resistor read access: return the shunt resistor
value, not the calibration register contents.

update email address

Signed-off-by: Lothar Felten 
Signed-off-by: Guenter Roeck 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 Documentation/hwmon/ina2xx   |2 +-
 drivers/hwmon/ina2xx.c   |   13 +++--
 include/linux/platform_data/ina2xx.h |2 +-
 3 files changed, 13 insertions(+), 4 deletions(-)

--- a/Documentation/hwmon/ina2xx
+++ b/Documentation/hwmon/ina2xx
@@ -32,7 +32,7 @@ Supported chips:
 Datasheet: Publicly available at the Texas Instruments website
http://www.ti.com/
 
-Author: Lothar Felten 
+Author: Lothar Felten 
 
 Description
 ---
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -17,7 +17,7 @@
  * Bi-directional Current/Power Monitor with I2C Interface
  * Datasheet: http://www.ti.com/product/ina230
  *
- * Copyright (C) 2012 Lothar Felten 
+ * Copyright (C) 2012 Lothar Felten 
  * Thanks to Jan Volkering
  *
  * This program is free software; you can redistribute it and/or modify
@@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2x
return 0;
 }
 
+static ssize_t ina2xx_show_shunt(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+   struct ina2xx_data *data = dev_get_drvdata(dev);
+
+   return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
+}
+
 static ssize_t ina2xx_store_shunt(struct device *dev,
  struct device_attribute *da,
  const char *buf, size_t count)
@@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input,
 
 /* shunt resistance */
 static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
- ina2xx_show_value, ina2xx_store_shunt,
+ ina2xx_show_shunt, ina2xx_store_shunt,
  INA2XX_CALIBRATION);
 
 /* update interval (ina226 only) */
--- a/include/linux/platform_data/ina2xx.h
+++ b/include/linux/platform_data/ina2xx.h
@@ -1,7 +1,7 @@
 /*
  * Driver for Texas Instruments INA219, INA226 power monitor chips
  *
- * Copyright (C) 2012 Lothar Felten 
+ * Copyright (C) 2012 Lothar Felten 
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as




[PATCH 4.14 105/137] IB/hfi1: Fix SL array bounds check

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Ira Weiny 

commit 0dbfaa9f2813787679e296eb5476e40938ab48c8 upstream.

The SL specified by a user needs to be a valid SL.

Add a range check to the user specified SL value which protects from
running off the end of the SL to SC table.

CC: sta...@vger.kernel.org
Fixes: 7724105686e7 ("IB/hfi1: add driver files")
Signed-off-by: Ira Weiny 
Signed-off-by: Dennis Dalessandro 
Signed-off-by: Jason Gunthorpe 
Signed-off-by: Greg Kroah-Hartman 

---
 drivers/infiniband/hw/hfi1/verbs.c |8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1573,6 +1573,7 @@ static int hfi1_check_ah(struct ib_devic
struct hfi1_pportdata *ppd;
struct hfi1_devdata *dd;
u8 sc5;
+   u8 sl;
 
if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
@@ -1581,8 +1582,13 @@ static int hfi1_check_ah(struct ib_devic
/* test the mapping for validity */
ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
ppd = ppd_from_ibp(ibp);
-   sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
dd = dd_from_ppd(ppd);
+
+   sl = rdma_ah_get_sl(ah_attr);
+   if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
+   return -EINVAL;
+
+   sc5 = ibp->sl_to_sc[sl];
if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
return -EINVAL;
return 0;




[PATCH 4.14 114/137] qed: Wait for MCP halt and resume commands to take place

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Tomer Tayar 

[ Upstream commit 76271809f49056f079e202bf6513d17b0d6dd34d ]

Successive iterations of halting and resuming the management chip (MCP)
might fail, since currently the driver doesn't wait for these operations to
actually take place.
This patch prevents the driver from moving forward before the operations
are reflected in the state register.

Signed-off-by: Tomer Tayar 
Signed-off-by: Ariel Elior 
Signed-off-by: David S. Miller 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/net/ethernet/qlogic/qed/qed_mcp.c  |   46 -
 drivers/net/ethernet/qlogic/qed/qed_reg_addr.h |1 
 2 files changed, 39 insertions(+), 8 deletions(-)

--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1998,31 +1998,61 @@ qed_mcp_send_drv_version(struct qed_hwfn
return rc;
 }
 
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define QED_MCP_HALT_SLEEP_MS  10
+#define QED_MCP_HALT_MAX_RETRIES   10
+
 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-   u32 resp = 0, param = 0;
+   u32 resp = 0, param = 0, cpu_state, cnt = 0;
int rc;
 
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, ,
 );
-   if (rc)
+   if (rc) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+   return rc;
+   }
 
-   return rc;
+   do {
+   msleep(QED_MCP_HALT_SLEEP_MS);
+   cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+   if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+   break;
+   } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
+
+   if (cnt == QED_MCP_HALT_MAX_RETRIES) {
+   DP_NOTICE(p_hwfn,
+ "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE 
= 0x%08x]\n",
+ qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+   return -EBUSY;
+   }
+
+   return 0;
 }
 
+#define QED_MCP_RESUME_SLEEP_MS10
+
 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-   u32 value, cpu_mode;
+   u32 cpu_mode, cpu_state;
 
qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0x);
 
-   value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
-   value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
-   qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+   cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+   qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+   msleep(QED_MCP_RESUME_SLEEP_MS);
+   cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+
+   if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+   DP_NOTICE(p_hwfn,
+ "Failed to resume the MCP [CPU_MODE = 0x%08x, 
CPU_STATE = 0x%08x]\n",
+ cpu_mode, cpu_state);
+   return -EBUSY;
+   }
 
-   return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
+   return 0;
 }
 
 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -554,6 +554,7 @@
0
 #define MCP_REG_CPU_STATE \
0xe05004UL
+#define MCP_REG_CPU_STATE_SOFT_HALTED  (0x1UL << 10)
 #define MCP_REG_CPU_EVENT_MASK \
0xe05008UL
 #define PGLUE_B_REG_PF_BAR0_SIZE \




[PATCH 4.14 124/137] e1000: ensure to free old tx/rx rings in set_ringparam()

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Bo Chen 

[ Upstream commit ee400a3f1bfe7004a3e14b81c38ccc5583c26295 ]

In 'e1000_set_ringparam()', the tx_ring and rx_ring are updated with new value
and the old tx/rx rings are freed only when the device is up. There are resource
leaks on old tx/rx rings when the device is not up. This bug is reported by COD,
a tool for testing kernel module binaries I am building.

This patch fixes the bug by always calling 'kfree()' on old tx/rx rings in
'e1000_set_ringparam()'.

Signed-off-by: Bo Chen 
Reviewed-by: Alexander Duyck 
Tested-by: Aaron Brown 
Signed-off-by: Jeff Kirsher 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/net/ethernet/intel/e1000/e1000_ethtool.c |4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -644,14 +644,14 @@ static int e1000_set_ringparam(struct ne
adapter->tx_ring = tx_old;
e1000_free_all_rx_resources(adapter);
e1000_free_all_tx_resources(adapter);
-   kfree(tx_old);
-   kfree(rx_old);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
err = e1000_up(adapter);
if (err)
goto err_setup;
}
+   kfree(tx_old);
+   kfree(rx_old);
 
clear_bit(__E1000_RESETTING, >flags);
return 0;




[PATCH 4.14 019/137] md-cluster: clear another nodes suspend_area after the copy is finished

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Guoqing Jiang 

[ Upstream commit 010228e4a932ca1e8365e3b58c8e1e44c16ff793 ]

When one node leaves cluster or stops the resyncing
(resync or recovery) array, then other nodes need to
call recover_bitmaps to continue the unfinished task.

But we need to clear suspend_area later after other
nodes copy the resync information to their bitmap
(by call bitmap_copy_from_slot). Otherwise, all nodes
could write to the suspend_area even the suspend_area
is not handled by any node, because area_resyncing
returns 0 at the beginning of raid1_write_request.
Which means one node could write suspend_area while
another node is resyncing the same area, then data
could be inconsistent.

So let's clear suspend_area later to avoid above issue
with the protection of bm lock. Also it is straightforward
to clear suspend_area after nodes have copied the resync
info to bitmap.

Signed-off-by: Guoqing Jiang 
Reviewed-by: NeilBrown 
Signed-off-by: Shaohua Li 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/md/md-cluster.c |   19 ++-
 1 file changed, 10 insertions(+), 9 deletions(-)

--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -304,15 +304,6 @@ static void recover_bitmaps(struct md_th
while (cinfo->recovery_map) {
slot = fls64((u64)cinfo->recovery_map) - 1;
 
-   /* Clear suspend_area associated with the bitmap */
-   spin_lock_irq(>suspend_lock);
-   list_for_each_entry_safe(s, tmp, >suspend_list, list)
-   if (slot == s->slot) {
-   list_del(>list);
-   kfree(s);
-   }
-   spin_unlock_irq(>suspend_lock);
-
snprintf(str, 64, "bitmap%04d", slot);
bm_lockres = lockres_init(mddev, str, NULL, 1);
if (!bm_lockres) {
@@ -331,6 +322,16 @@ static void recover_bitmaps(struct md_th
pr_err("md-cluster: Could not copy data from bitmap 
%d\n", slot);
goto clear_bit;
}
+
+   /* Clear suspend_area associated with the bitmap */
+   spin_lock_irq(>suspend_lock);
+   list_for_each_entry_safe(s, tmp, >suspend_list, list)
+   if (slot == s->slot) {
+   list_del(>list);
+   kfree(s);
+   }
+   spin_unlock_irq(>suspend_lock);
+
if (hi > 0) {
if (lo < mddev->recovery_cp)
mddev->recovery_cp = lo;




[PATCH 4.14 064/137] wlcore: Add missing PM call for wlcore_cmd_wait_for_event_or_timeout()

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Tony Lindgren 

[ Upstream commit 4ec7cece87b3ed21ffcd407c62fb2f151a366bc1 ]

Otherwise we can get:

WARNING: CPU: 0 PID: 55 at drivers/net/wireless/ti/wlcore/io.h:84

I've only seen this few times with the runtime PM patches enabled
so this one is probably not needed before that. This seems to
work currently based on the current PM implementation timer. Let's
apply this separately though in case others are hitting this issue.

Signed-off-by: Tony Lindgren 
Signed-off-by: Kalle Valo 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/net/wireless/ti/wlcore/cmd.c |6 ++
 1 file changed, 6 insertions(+)

--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -35,6 +35,7 @@
 #include "wl12xx_80211.h"
 #include "cmd.h"
 #include "event.h"
+#include "ps.h"
 #include "tx.h"
 #include "hw_ops.h"
 
@@ -191,6 +192,10 @@ int wlcore_cmd_wait_for_event_or_timeout
 
timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
 
+   ret = wl1271_ps_elp_wakeup(wl);
+   if (ret < 0)
+   return ret;
+
do {
if (time_after(jiffies, timeout_time)) {
wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
@@ -222,6 +227,7 @@ int wlcore_cmd_wait_for_event_or_timeout
} while (!event);
 
 out:
+   wl1271_ps_elp_sleep(wl);
kfree(events_vector);
return ret;
 }




[PATCH 4.14 021/137] RDMA/i40w: Hold read semaphore while looking after VMA

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Leon Romanovsky 

[ Upstream commit 5d9a2b0e28759e319a623da33940dbb3ce952b7d ]

VMA lookup is supposed to be performed while mmap_sem is held.

Fixes: f26c7c83395b ("i40iw: Add 2MB page support")
Signed-off-by: Leon Romanovsky 
Signed-off-by: Jason Gunthorpe 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/infiniband/hw/i40iw/i40iw_verbs.c |2 ++
 1 file changed, 2 insertions(+)

--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1408,6 +1408,7 @@ static void i40iw_set_hugetlb_values(u64
struct vm_area_struct *vma;
struct hstate *h;
 
+   down_read(>mm->mmap_sem);
vma = find_vma(current->mm, addr);
if (vma && is_vm_hugetlb_page(vma)) {
h = hstate_vma(vma);
@@ -1416,6 +1417,7 @@ static void i40iw_set_hugetlb_values(u64
iwmr->page_msk = huge_page_mask(h);
}
}
+   up_read(>mm->mmap_sem);
 }
 
 /**




[PATCH 4.14 023/137] media: exynos4-is: Prevent NULL pointer dereference in __isp_video_try_fmt()

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Sylwester Nawrocki 

[ Upstream commit 7c1b9a5aeed91bef98988ac0fcf38c8c1f4f9a3a ]

This patch fixes potential NULL pointer dereference as indicated
by the following static checker warning:

drivers/media/platform/exynos4-is/fimc-isp-video.c:408 
isp_video_try_fmt_mplane()
error: NULL dereference inside function '__isp_video_try_fmt(isp, 
>fmt.pix_mp, (0))()'.

Fixes: 34947b8aebe3: ("[media] exynos4-is: Add the FIMC-IS ISP capture DMA 
driver")

Reported-by: Dan Carpenter 
Signed-off-by: Sylwester Nawrocki 
Signed-off-by: Mauro Carvalho Chehab 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/media/platform/exynos4-is/fimc-isp-video.c |   11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)

--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -384,12 +384,17 @@ static void __isp_video_try_fmt(struct f
struct v4l2_pix_format_mplane *pixm,
const struct fimc_fmt **fmt)
 {
-   *fmt = fimc_isp_find_format(>pixelformat, NULL, 2);
+   const struct fimc_fmt *__fmt;
+
+   __fmt = fimc_isp_find_format(>pixelformat, NULL, 2);
+
+   if (fmt)
+   *fmt = __fmt;
 
pixm->colorspace = V4L2_COLORSPACE_SRGB;
pixm->field = V4L2_FIELD_NONE;
-   pixm->num_planes = (*fmt)->memplanes;
-   pixm->pixelformat = (*fmt)->fourcc;
+   pixm->num_planes = __fmt->memplanes;
+   pixm->pixelformat = __fmt->fourcc;
/*
 * TODO: double check with the docmentation these width/height
 * constraints are correct.




[PATCH 4.14 026/137] media: fsl-viu: fix error handling in viu_of_probe()

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Alexey Khoroshilov 

[ Upstream commit 662a99e145661c2b35155cf375044deae9b79896 ]

viu_of_probe() ignores fails in i2c_get_adapter(),
tries to unlock uninitialized mutex on error path.

The patch streamlining the error handling in viu_of_probe().

Found by Linux Driver Verification project (linuxtesting.org).

Signed-off-by: Alexey Khoroshilov 
Signed-off-by: Hans Verkuil 
Signed-off-by: Mauro Carvalho Chehab 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/media/platform/fsl-viu.c |   38 +++---
 1 file changed, 23 insertions(+), 15 deletions(-)

--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1417,7 +1417,7 @@ static int viu_of_probe(struct platform_
 sizeof(struct viu_reg), DRV_NAME)) {
dev_err(>dev, "Error while requesting mem region\n");
ret = -EBUSY;
-   goto err;
+   goto err_irq;
}
 
/* remap registers */
@@ -1425,7 +1425,7 @@ static int viu_of_probe(struct platform_
if (!viu_regs) {
dev_err(>dev, "Can't map register set\n");
ret = -ENOMEM;
-   goto err;
+   goto err_irq;
}
 
/* Prepare our private structure */
@@ -1433,7 +1433,7 @@ static int viu_of_probe(struct platform_
if (!viu_dev) {
dev_err(>dev, "Can't allocate private structure\n");
ret = -ENOMEM;
-   goto err;
+   goto err_irq;
}
 
viu_dev->vr = viu_regs;
@@ -1449,16 +1449,21 @@ static int viu_of_probe(struct platform_
ret = v4l2_device_register(viu_dev->dev, _dev->v4l2_dev);
if (ret < 0) {
dev_err(>dev, "v4l2_device_register() failed: %d\n", ret);
-   goto err;
+   goto err_irq;
}
 
ad = i2c_get_adapter(0);
+   if (!ad) {
+   ret = -EFAULT;
+   dev_err(>dev, "couldn't get i2c adapter\n");
+   goto err_v4l2;
+   }
 
v4l2_ctrl_handler_init(_dev->hdl, 5);
if (viu_dev->hdl.error) {
ret = viu_dev->hdl.error;
dev_err(>dev, "couldn't register control\n");
-   goto err_vdev;
+   goto err_i2c;
}
/* This control handler will inherit the control(s) from the
   sub-device(s). */
@@ -1475,7 +1480,7 @@ static int viu_of_probe(struct platform_
vdev = video_device_alloc();
if (vdev == NULL) {
ret = -ENOMEM;
-   goto err_vdev;
+   goto err_hdl;
}
 
*vdev = viu_template;
@@ -1496,7 +1501,7 @@ static int viu_of_probe(struct platform_
ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
video_device_release(viu_dev->vdev);
-   goto err_vdev;
+   goto err_unlock;
}
 
/* enable VIU clock */
@@ -1504,12 +1509,12 @@ static int viu_of_probe(struct platform_
if (IS_ERR(clk)) {
dev_err(>dev, "failed to lookup the clock!\n");
ret = PTR_ERR(clk);
-   goto err_clk;
+   goto err_vdev;
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(>dev, "failed to enable the clock!\n");
-   goto err_clk;
+   goto err_vdev;
}
viu_dev->clk = clk;
 
@@ -1520,7 +1525,7 @@ static int viu_of_probe(struct platform_
if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
dev_err(>dev, "Request VIU IRQ failed.\n");
ret = -ENODEV;
-   goto err_irq;
+   goto err_clk;
}
 
mutex_unlock(_dev->lock);
@@ -1528,16 +1533,19 @@ static int viu_of_probe(struct platform_
dev_info(>dev, "Freescale VIU Video Capture Board\n");
return ret;
 
-err_irq:
-   clk_disable_unprepare(viu_dev->clk);
 err_clk:
-   video_unregister_device(viu_dev->vdev);
+   clk_disable_unprepare(viu_dev->clk);
 err_vdev:
-   v4l2_ctrl_handler_free(_dev->hdl);
+   video_unregister_device(viu_dev->vdev);
+err_unlock:
mutex_unlock(_dev->lock);
+err_hdl:
+   v4l2_ctrl_handler_free(_dev->hdl);
+err_i2c:
i2c_put_adapter(ad);
+err_v4l2:
v4l2_device_unregister(_dev->v4l2_dev);
-err:
+err_irq:
irq_dispose_mapping(viu_irq);
return ret;
 }




[PATCH 4.14 078/137] EDAC: Fix memleak in module init error path

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Johan Hovold 

[ Upstream commit 4708aa85d50cc6e962dfa8acf5ad4e0d290a21db ]

Make sure to use put_device() to free the initialised struct device so
that resources managed by driver core also gets released in the event of
a registration failure.

Signed-off-by: Johan Hovold 
Cc: Denis Kirjanov 
Cc: Mauro Carvalho Chehab 
Cc: linux-edac 
Fixes: 2d56b109e3a5 ("EDAC: Handle error path in edac_mc_sysfs_init() properly")
Link: http://lkml.kernel.org/r/20180612124335.6420-1-jo...@kernel.org
Signed-off-by: Borislav Petkov 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/edac/edac_mc_sysfs.c |6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -1097,14 +1097,14 @@ int __init edac_mc_sysfs_init(void)
 
err = device_add(mci_pdev);
if (err < 0)
-   goto out_dev_free;
+   goto out_put_device;
 
edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
 
return 0;
 
- out_dev_free:
-   kfree(mci_pdev);
+ out_put_device:
+   put_device(mci_pdev);
  out:
return err;
 }




[PATCH 4.14 067/137] HID: hid-ntrig: add error handling for sysfs_create_group

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Zhouyang Jia 

[ Upstream commit 44d4d51de9a3534a2b63d69efda02a10e66541e4 ]

When sysfs_create_group fails, the lack of error-handling code may
cause unexpected results.

This patch adds error-handling code after calling sysfs_create_group.

Signed-off-by: Zhouyang Jia 
Signed-off-by: Jiri Kosina 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/hid/hid-ntrig.c |2 ++
 1 file changed, 2 insertions(+)

--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -955,6 +955,8 @@ static int ntrig_probe(struct hid_device
 
ret = sysfs_create_group(>dev.kobj,
_attribute_group);
+   if (ret)
+   hid_err(hdev, "cannot create sysfs group\n");
 
return 0;
 err_free:




[PATCH 4.14 071/137] iomap: complete partial direct I/O writes synchronously

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Andreas Gruenbacher 

[ Upstream commit ebf00be37de35788cad72f4f20b4a39e30c0be4a ]

According to xfstest generic/240, applications seem to expect direct I/O
writes to either complete as a whole or to fail; short direct I/O writes
are apparently not appreciated.  This means that when only part of an
asynchronous direct I/O write succeeds, we can either fail the entire
write, or we can wait for the partial write to complete and retry the
remaining write as buffered I/O.  The old __blockdev_direct_IO helper
has code for waiting for partial writes to complete; the new
iomap_dio_rw iomap helper does not.

The above mentioned fallback mode is needed for gfs2, which doesn't
allow block allocations under direct I/O to avoid taking cluster-wide
exclusive locks.  As a consequence, an asynchronous direct I/O write to
a file range that contains a hole will result in a short write.  In that
case, wait for the short write to complete to allow gfs2 to recover.

Signed-off-by: Andreas Gruenbacher 
Signed-off-by: Christoph Hellwig 
Reviewed-by: Darrick J. Wong 
Signed-off-by: Darrick J. Wong 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 fs/iomap.c |   21 +++--
 1 file changed, 11 insertions(+), 10 deletions(-)

--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -693,6 +693,7 @@ struct iomap_dio {
atomic_tref;
unsignedflags;
int error;
+   boolwait_for_completion;
 
union {
/* used during submission and for synchronous completion: */
@@ -793,9 +794,8 @@ static void iomap_dio_bio_end_io(struct
iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
 
if (atomic_dec_and_test(>ref)) {
-   if (is_sync_kiocb(dio->iocb)) {
+   if (dio->wait_for_completion) {
struct task_struct *waiter = dio->submit.waiter;
-
WRITE_ONCE(dio->submit.waiter, NULL);
wake_up_process(waiter);
} else if (dio->flags & IOMAP_DIO_WRITE) {
@@ -980,13 +980,12 @@ iomap_dio_rw(struct kiocb *iocb, struct
dio->end_io = end_io;
dio->error = 0;
dio->flags = 0;
+   dio->wait_for_completion = is_sync_kiocb(iocb);
 
dio->submit.iter = iter;
-   if (is_sync_kiocb(iocb)) {
-   dio->submit.waiter = current;
-   dio->submit.cookie = BLK_QC_T_NONE;
-   dio->submit.last_queue = NULL;
-   }
+   dio->submit.waiter = current;
+   dio->submit.cookie = BLK_QC_T_NONE;
+   dio->submit.last_queue = NULL;
 
if (iov_iter_rw(iter) == READ) {
if (pos >= dio->i_size)
@@ -1016,7 +1015,7 @@ iomap_dio_rw(struct kiocb *iocb, struct
WARN_ON_ONCE(ret);
ret = 0;
 
-   if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
+   if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
!inode->i_sb->s_dio_done_wq) {
ret = sb_init_dio_done_wq(inode->i_sb);
if (ret < 0)
@@ -1031,8 +1030,10 @@ iomap_dio_rw(struct kiocb *iocb, struct
iomap_dio_actor);
if (ret <= 0) {
/* magic error code to fall back to buffered I/O */
-   if (ret == -ENOTBLK)
+   if (ret == -ENOTBLK) {
+   dio->wait_for_completion = true;
ret = 0;
+   }
break;
}
pos += ret;
@@ -1046,7 +1047,7 @@ iomap_dio_rw(struct kiocb *iocb, struct
iomap_dio_set_error(dio, ret);
 
if (!atomic_dec_and_test(>ref)) {
-   if (!is_sync_kiocb(iocb))
+   if (!dio->wait_for_completion)
return -EIOCBQUEUED;
 
for (;;) {




[PATCH 4.14 040/137] s390/sysinfo: add missing #ifdef CONFIG_PROC_FS

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Heiko Carstens 

[ Upstream commit 9f35b818a2f90fb6cb291aa0c9f835d4f0974a9a ]

Get rid of this compile warning for !PROC_FS:

  CC  arch/s390/kernel/sysinfo.o
arch/s390/kernel/sysinfo.c:275:12: warning: 'sysinfo_show' defined but not used 
[-Wunused-function]
 static int sysinfo_show(struct seq_file *m, void *v)

Signed-off-by: Heiko Carstens 
Signed-off-by: Martin Schwidefsky 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/s390/kernel/sysinfo.c |4 
 1 file changed, 4 insertions(+)

--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -59,6 +59,8 @@ int stsi(void *sysinfo, int fc, int sel1
 }
 EXPORT_SYMBOL(stsi);
 
+#ifdef CONFIG_PROC_FS
+
 static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
 {
switch (encoding) {
@@ -311,6 +313,8 @@ static int __init sysinfo_create_proc(vo
 }
 device_initcall(sysinfo_create_proc);
 
+#endif /* CONFIG_PROC_FS */
+
 /*
  * Service levels interface.
  */




[PATCH 4.14 012/137] iommu/amd: make sure TLB to be flushed before IOVA freed

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Zhen Lei 

[ Upstream commit 3c120143f584360a13614787e23ae2cdcb5e5ccd ]

Although the mapping has already been removed in the page table, it maybe
still exist in TLB. Suppose the freed IOVAs is reused by others before the
flush operation completed, the new user can not correctly access to its
meomory.

Signed-off-by: Zhen Lei 
Fixes: b1516a14657a ('iommu/amd: Implement flush queue')
Signed-off-by: Joerg Roedel 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/iommu/amd_iommu.c |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2400,9 +2400,9 @@ static void __unmap_single(struct dma_op
}
 
if (amd_iommu_unmap_flush) {
-   dma_ops_free_iova(dma_dom, dma_addr, pages);
domain_flush_tlb(_dom->domain);
domain_flush_complete(_dom->domain);
+   dma_ops_free_iova(dma_dom, dma_addr, pages);
} else {
pages = __roundup_pow_of_two(pages);
queue_iova(_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0);




[PATCH 4.18 171/228] bus: ti-sysc: Fix module register ioremap for larger offsets

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Tony Lindgren 

[ Upstream commit 0ef8e3bb974af56346b34393e643d491d9141c66 ]

We can have the interconnect target module control registers pretty
much anywhere within the module range. The current code attempts an
incomplete optimization of the ioremap size but does it wrong and
it only works for registers at the beginning of the module.

Let's just use the largest control register to calculate the ioremap
size. The ioremapped range is for most part cached anyways so there
is no need for size optimization. Let's also update the comments
accordingly.

Fixes: 0eecc636e5a2 ("bus: ti-sysc: Add minimal TI sysc interconnect
target driver")
Signed-off-by: Tony Lindgren 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/bus/ti-sysc.c |   29 +
 1 file changed, 13 insertions(+), 16 deletions(-)

--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -490,32 +490,29 @@ static int sysc_check_registers(struct s
 
 /**
  * syc_ioremap - ioremap register space for the interconnect target module
- * @ddata: deviec driver data
+ * @ddata: device driver data
  *
  * Note that the interconnect target module registers can be anywhere
- * within the first child device address space. For example, SGX has
- * them at offset 0x1fc00 in the 32MB module address space. We just
- * what we need around the interconnect target module registers.
+ * within the interconnect target module range. For example, SGX has
+ * them at offset 0x1fc00 in the 32MB module address space. And cpsw
+ * has them at offset 0x1200 in the CPSW_WR child. Usually the
+ * the interconnect target module registers are at the beginning of
+ * the module range though.
  */
 static int sysc_ioremap(struct sysc *ddata)
 {
-   u32 size = 0;
+   int size;
 
-   if (ddata->offsets[SYSC_SYSSTATUS] >= 0)
-   size = ddata->offsets[SYSC_SYSSTATUS];
-   else if (ddata->offsets[SYSC_SYSCONFIG] >= 0)
-   size = ddata->offsets[SYSC_SYSCONFIG];
-   else if (ddata->offsets[SYSC_REVISION] >= 0)
-   size = ddata->offsets[SYSC_REVISION];
-   else
-   return -EINVAL;
+   size = max3(ddata->offsets[SYSC_REVISION],
+   ddata->offsets[SYSC_SYSCONFIG],
+   ddata->offsets[SYSC_SYSSTATUS]);
 
-   size &= 0xfff00;
-   size += SZ_256;
+   if (size < 0 || (size + sizeof(u32)) > ddata->module_size)
+   return -EINVAL;
 
ddata->module_va = devm_ioremap(ddata->dev,
ddata->module_pa,
-   size);
+   size + sizeof(u32));
if (!ddata->module_va)
return -EIO;
 




[PATCH 4.18 204/228] e1000: check on netif_running() before calling e1000_up()

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Bo Chen 

[ Upstream commit cf1acec008f8d7761aa3fd7c4bca7e17b2d2512d ]

When the device is not up, the call to 'e1000_up()' from the error handling path
of 'e1000_set_ringparam()' causes a kernel oops with a null-pointer
dereference. The null-pointer dereference is triggered in function
'e1000_alloc_rx_buffers()' at line 'buffer_info = _ring->buffer_info[i]'.

This bug was reported by COD, a tool for testing kernel module binaries I am
building. This bug was also detected by KFI from Dr. Kai Cong.

This patch fixes the bug by checking on 'netif_running()' before calling
'e1000_up()' in 'e1000_set_ringparam()'.

Signed-off-by: Bo Chen 
Acked-by: Alexander Duyck 
Tested-by: Aaron Brown 
Signed-off-by: Jeff Kirsher 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/net/ethernet/intel/e1000/e1000_ethtool.c |3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -644,7 +644,8 @@ err_setup_rx:
 err_alloc_rx:
kfree(txdr);
 err_alloc_tx:
-   e1000_up(adapter);
+   if (netif_running(adapter->netdev))
+   e1000_up(adapter);
 err_setup:
clear_bit(__E1000_RESETTING, >flags);
return err;




[PATCH 4.18 228/228] powerpc/pseries: Fix unitialized timer reset on migration

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Michael Bringmann 

commit 8604895a34d92f5e186ceb931b0d1b384030ea3d upstream.

After migration of a powerpc LPAR, the kernel executes code to
update the system state to reflect new platform characteristics.

Such changes include modifications to device tree properties provided
to the system by PHYP. Property notifications received by the
post_mobility_fixup() code are passed along to the kernel in general
through a call to of_update_property() which in turn passes such
events back to all modules through entries like the '.notifier_call'
function within the NUMA module.

When the NUMA module updates its state, it resets its event timer. If
this occurs after a previous call to stop_topology_update() or on a
system without VPHN enabled, the code runs into an unitialized timer
structure and crashes. This patch adds a safety check along this path
toward the problem code.

An example crash log is as follows.

  ibmvscsi 3081: Re-enabling adapter!
  [ cut here ]
  kernel BUG at kernel/time/timer.c:958!
  Oops: Exception in kernel mode, sig: 5 [#1]
  LE SMP NR_CPUS=2048 NUMA pSeries
  Modules linked in: nfsv3 nfs_acl nfs tcp_diag udp_diag inet_diag lockd 
unix_diag af_packet_diag netlink_diag grace fscache sunrpc xts vmx_crypto 
pseries_rng sg binfmt_misc ip_tables xfs libcrc32c sd_mod ibmvscsi ibmveth 
scsi_transport_srp dm_mirror dm_region_hash dm_log dm_mod
  CPU: 11 PID: 3067 Comm: drmgr Not tainted 4.17.0+ #179
  ...
  NIP mod_timer+0x4c/0x400
  LR  reset_topology_timer+0x40/0x60
  Call Trace:
0xc003f9407830 (unreliable)
reset_topology_timer+0x40/0x60
dt_update_callback+0x100/0x120
notifier_call_chain+0x90/0x100
__blocking_notifier_call_chain+0x60/0x90
of_property_notify+0x90/0xd0
of_update_property+0x104/0x150
update_dt_property+0xdc/0x1f0
pseries_devicetree_update+0x2d0/0x510
post_mobility_fixup+0x7c/0xf0
migration_store+0xa4/0xc0
kobj_attr_store+0x30/0x60
sysfs_kf_write+0x64/0xa0
kernfs_fop_write+0x16c/0x240
__vfs_write+0x40/0x200
vfs_write+0xc8/0x240
ksys_write+0x5c/0x100
system_call+0x58/0x6c

Fixes: 5d88aa85c00b ("powerpc/pseries: Update CPU maps when device tree is 
updated")
Cc: sta...@vger.kernel.org # v3.10+
Signed-off-by: Michael Bringmann 
Signed-off-by: Michael Ellerman 
Signed-off-by: Greg Kroah-Hartman 

---
 arch/powerpc/mm/numa.c |3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1452,7 +1452,8 @@ static struct timer_list topology_timer;
 
 static void reset_topology_timer(void)
 {
-   mod_timer(_timer, jiffies + topology_timer_secs * HZ);
+   if (vphn_enabled)
+   mod_timer(_timer, jiffies + topology_timer_secs * HZ);
 }
 
 #ifdef CONFIG_SMP




[PATCH 4.18 226/228] powerpc: fix csum_ipv6_magic() on little endian platforms

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Christophe Leroy 

commit 85682a7e3b9c664995ad477520f917039afdc330 upstream.

On little endian platforms, csum_ipv6_magic() keeps len and proto in
CPU byte order. This generates a bad results leading to ICMPv6 packets
from other hosts being dropped by powerpc64le platforms.

In order to fix this, len and proto should be converted to network
byte order ie bigendian byte order. However checksumming 0x12345678
and 0x56341278 provide the exact same result so it is enough to
rotate the sum of len and proto by 1 byte.

PPC32 only support bigendian so the fix is needed for PPC64 only

Fixes: e9c4943a107b ("powerpc: Implement csum_ipv6_magic in assembly")
Reported-by: Jianlin Shi 
Reported-by: Xin Long 
Cc:  # 4.18+
Signed-off-by: Christophe Leroy 
Tested-by: Xin Long 
Signed-off-by: Michael Ellerman 
Signed-off-by: Greg Kroah-Hartman 

---
 arch/powerpc/lib/checksum_64.S |3 +++
 1 file changed, 3 insertions(+)

--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic)
addcr0, r8, r9
ld  r10, 0(r4)
ld  r11, 8(r4)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+   rotldi  r5, r5, 8
+#endif
adder0, r0, r10
add r5, r5, r7
adder0, r0, r11




[PATCH 4.18 213/228] Revert "ARM: dts: imx7d: Invert legacy PCI irq mapping"

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Leonard Crestez 

[ Upstream commit 538d6e9d597584e80514698e24321645debde78f ]

This reverts commit 1c86c9dd82f859b474474a7fee0d5195da2c9c1d.

That commit followed the reference manual but unfortunately the imx7d
manual is incorrect.

Tested with ath9k pcie card and confirmed internally.

Signed-off-by: Leonard Crestez 
Acked-by: Lucas Stach 
Fixes: 1c86c9dd82f8 ("ARM: dts: imx7d: Invert legacy PCI irq mapping")
Signed-off-by: Shawn Guo 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/arm/boot/dts/imx7d.dtsi |   12 
 1 file changed, 8 insertions(+), 4 deletions(-)

--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -125,10 +125,14 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
-   interrupt-map = <0 0 0 1  GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
-   <0 0 0 2  GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
-   <0 0 0 3  GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
-   <0 0 0 4  GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+   /*
+* Reference manual lists pci irqs incorrectly
+* Real hardware ordering is same as imx6: D+MSI, C, B, A
+*/
+   interrupt-map = <0 0 0 1  GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+   <0 0 0 2  GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+   <0 0 0 3  GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+   <0 0 0 4  GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
clocks = < IMX7D_PCIE_CTRL_ROOT_CLK>,
 < IMX7D_PLL_ENET_MAIN_100M_CLK>,
 < IMX7D_PCIE_PHY_ROOT_CLK>;




[PATCH 4.14 032/137] scsi: target/iscsi: Make iscsit_ta_authentication() respect the output buffer size

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Bart Van Assche 

[ Upstream commit 35bea5c84fd13c643cce63f0b5cd4b148f8c901d ]

Fixes: e48354ce078c ("iscsi-target: Add iSCSI fabric support for target v4.1")
Signed-off-by: Bart Van Assche 
Reviewed-by: Mike Christie 
Cc: Mike Christie 
Cc: Christoph Hellwig 
Cc: Hannes Reinecke 
Signed-off-by: Martin K. Petersen 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/target/iscsi/iscsi_target_tpg.c |3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -633,8 +633,7 @@ int iscsit_ta_authentication(struct iscs
none = strstr(buf1, NONE);
if (none)
goto out;
-   strncat(buf1, ",", strlen(","));
-   strncat(buf1, NONE, strlen(NONE));
+   strlcat(buf1, "," NONE, sizeof(buf1));
if (iscsi_update_param_value(param, buf1) < 0)
return -EINVAL;
}




[PATCH 4.18 214/228] drm/amdgpu: Enable/disable gfx PG feature in rlc safe mode

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Rex Zhu 

[ Upstream commit 8ef23364b654d44244400d79988e677e504b21ba ]

This is required by gfx hw and can fix the rlc hang when
do s3 stree test on Cz/St.

Reviewed-by: Alex Deucher 
Signed-off-by: Hang Zhou 
Signed-off-by: Rex Zhu 
Signed-off-by: Alex Deucher 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c |   11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)

--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -5614,6 +5614,11 @@ static int gfx_v8_0_set_powergating_stat
if (amdgpu_sriov_vf(adev))
return 0;
 
+   if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+   AMD_PG_SUPPORT_RLC_SMU_HS |
+   AMD_PG_SUPPORT_CP |
+   AMD_PG_SUPPORT_GFX_DMG))
+   adev->gfx.rlc.funcs->enter_safe_mode(adev);
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
@@ -5663,7 +5668,11 @@ static int gfx_v8_0_set_powergating_stat
default:
break;
}
-
+   if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+   AMD_PG_SUPPORT_RLC_SMU_HS |
+   AMD_PG_SUPPORT_CP |
+   AMD_PG_SUPPORT_GFX_DMG))
+   adev->gfx.rlc.funcs->exit_safe_mode(adev);
return 0;
 }
 




[PATCH 4.14 014/137] USB: serial: kobil_sct: fix modem-status error handling

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Johan Hovold 

[ Upstream commit a420b5d939ee58f1d950f0ea782834056520aeaa ]

Make sure to return -EIO in case of a short modem-status read request.

While at it, split the debug message to not include the (zeroed)
transfer-buffer content in case of errors.

Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Johan Hovold 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/usb/serial/kobil_sct.c |   12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)

--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -397,12 +397,20 @@ static int kobil_tiocmget(struct tty_str
  transfer_buffer_length,
  KOBIL_TIMEOUT);
 
-   dev_dbg(>dev, "%s - Send get_status_line_state URB returns: %i. 
Statusline: %02x\n",
-   __func__, result, transfer_buffer[0]);
+   dev_dbg(>dev, "Send get_status_line_state URB returns: %i\n",
+   result);
+   if (result < 1) {
+   if (result >= 0)
+   result = -EIO;
+   goto out_free;
+   }
+
+   dev_dbg(>dev, "Statusline: %02x\n", transfer_buffer[0]);
 
result = 0;
if ((transfer_buffer[0] & SUSBCR_GSL_DSR) != 0)
result = TIOCM_DSR;
+out_free:
kfree(transfer_buffer);
return result;
 }




[PATCH 4.14 065/137] ARM: mvebu: declare asm symbols as character arrays in pmsu.c

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Ethan Tuttle 

[ Upstream commit d0d378ff451a66e486488eec842e507d28145813 ]

With CONFIG_FORTIFY_SOURCE, memcpy uses the declared size of operands to
detect buffer overflows.  If src or dest is declared as a char, attempts to
copy more than byte will result in a fortify_panic().

Address this problem in mvebu_setup_boot_addr_wa() by declaring
mvebu_boot_wa_start and mvebu_boot_wa_end as character arrays.  Also remove
a couple addressof operators to avoid "arithmetic on pointer to an
incomplete type" compiler error.

See commit 54a7d50b9205 ("x86: mark kprobe templates as character arrays,
not single characters") for a similar fix.

Fixes "detected buffer overflow in memcpy" error during init on some mvebu
systems (armada-370-xp, armada-375):

(fortify_panic) from (mvebu_setup_boot_addr_wa+0xb0/0xb4)
(mvebu_setup_boot_addr_wa) from (mvebu_v7_cpu_pm_init+0x154/0x204)
(mvebu_v7_cpu_pm_init) from (do_one_initcall+0x7c/0x1a8)
(do_one_initcall) from (kernel_init_freeable+0x1bc/0x254)
(kernel_init_freeable) from (kernel_init+0x8/0x114)
(kernel_init) from (ret_from_fork+0x14/0x2c)

Signed-off-by: Ethan Tuttle 
Tested-by: Ethan Tuttle 
Signed-off-by: Gregory CLEMENT 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/arm/mach-mvebu/pmsu.c |6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -116,8 +116,8 @@ void mvebu_pmsu_set_cpu_boot_addr(int hw
PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
 }
 
-extern unsigned char mvebu_boot_wa_start;
-extern unsigned char mvebu_boot_wa_end;
+extern unsigned char mvebu_boot_wa_start[];
+extern unsigned char mvebu_boot_wa_end[];
 
 /*
  * This function sets up the boot address workaround needed for SMP
@@ -130,7 +130,7 @@ int mvebu_setup_boot_addr_wa(unsigned in
 phys_addr_t resume_addr_reg)
 {
void __iomem *sram_virt_base;
-   u32 code_len = _boot_wa_end - _boot_wa_start;
+   u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start;
 
mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE);
mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute,




[PATCH 4.14 107/137] IB/hfi1: Fix context recovery when PBC has an UnsupportedVL

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Michael J. Ruhl 

commit d623500b3c4efd8d4e945ac9003c6b87b469a9ab upstream.

If a packet stream uses an UnsupportedVL (virtual lane), the send
engine will not send the packet, and it will not indicate that an
error has occurred.  This will cause the packet stream to block.

HFI has 8 virtual lanes available for packet streams.  Each lane can
be enabled or disabled using the UnsupportedVL mask.  If a lane is
disabled, adding a packet to the send context must be disallowed.

The current mask for determining unsupported VLs defaults to 0 (allow
all).  This is incorrect.  Only the VLs that are defined should be
allowed.

Determine which VLs are disabled (mtu == 0), and set the appropriate
unsupported bit in the mask.  The correct mask will allow the send
engine to error on the invalid VL, and error recovery will work
correctly.

Cc:  # 4.9.x+
Fixes: 7724105686e7 ("IB/hfi1: add driver files")
Reviewed-by: Mike Marciniszyn 
Reviewed-by: Lukasz Odzioba 
Signed-off-by: Michael J. Ruhl 
Signed-off-by: Dennis Dalessandro 
Signed-off-by: Jason Gunthorpe 
Signed-off-by: Greg Kroah-Hartman 

---
 drivers/infiniband/hw/hfi1/pio.c |9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)

--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdat
unsigned long flags;
int write = 1;  /* write sendctrl back */
int flush = 0;  /* re-read sendctrl to make sure it is flushed */
+   int i;
 
spin_lock_irqsave(>sendctrl_lock, flags);
 
@@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdat
reg |= SEND_CTRL_SEND_ENABLE_SMASK;
/* Fall through */
case PSC_DATA_VL_ENABLE:
+   mask = 0;
+   for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
+   if (!dd->vld[i].mtu)
+   mask |= BIT_ULL(i);
/* Disallow sending on VLs not enabled */
-   mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
-   SEND_CTRL_UNSUPPORTED_VL_SHIFT;
+   mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
+   SEND_CTRL_UNSUPPORTED_VL_SHIFT;
reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
break;
case PSC_GLOBAL_DISABLE:




[PATCH 4.14 115/137] qed: Prevent a possible deadlock during driver load and unload

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Tomer Tayar 

[ Upstream commit eaa50fc59e5841910987e90b0438b2643041f508 ]

The MFW manages an internal lock to prevent concurrent hardware
(de)initialization of different PFs.
This, together with the busy-waiting for the MFW's responses for commands,
might lead to a deadlock during concurrent load or unload of PFs.
This patch adds the option to sleep within the busy-waiting, and uses it
for the (un)load requests (which are not sent from an interrupt context) to
prevent the possible deadlock.

Signed-off-by: Tomer Tayar 
Signed-off-by: Ariel Elior 
Signed-off-by: David S. Miller 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/net/ethernet/qlogic/qed/qed_mcp.c |   43 +-
 drivers/net/ethernet/qlogic/qed/qed_mcp.h |   21 +-
 2 files changed, 44 insertions(+), 20 deletions(-)

--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -47,7 +47,7 @@
 #include "qed_reg_addr.h"
 #include "qed_sriov.h"
 
-#define CHIP_MCP_RESP_ITER_US 10
+#define QED_MCP_RESP_ITER_US   10
 
 #define QED_DRV_MB_MAX_RETRIES (500 * 1000)/* Account for 5 sec */
 #define QED_MCP_RESET_RETRIES  (50 * 1000) /* Account for 500 msec */
@@ -316,7 +316,7 @@ static void qed_mcp_reread_offsets(struc
 
 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-   u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
+   u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
int rc = 0;
 
/* Ensure that only a single thread is accessing the mailbox */
@@ -448,10 +448,10 @@ static int
 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
   struct qed_ptt *p_ptt,
   struct qed_mcp_mb_params *p_mb_params,
-  u32 max_retries, u32 delay)
+  u32 max_retries, u32 usecs)
 {
+   u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
struct qed_mcp_cmd_elem *p_cmd_elem;
-   u32 cnt = 0;
u16 seq_num;
int rc = 0;
 
@@ -474,7 +474,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *
goto err;
 
spin_unlock_bh(_hwfn->mcp_info->cmd_lock);
-   udelay(delay);
+
+   if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
+   msleep(msecs);
+   else
+   udelay(usecs);
} while (++cnt < max_retries);
 
if (cnt >= max_retries) {
@@ -503,7 +507,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *
 * The spinlock stays locked until the list element is removed.
 */
 
-   udelay(delay);
+   if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
+   msleep(msecs);
+   else
+   udelay(usecs);
+
spin_lock_bh(_hwfn->mcp_info->cmd_lock);
 
if (p_cmd_elem->b_is_completed)
@@ -538,7 +546,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *
   "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d 
ms]\n",
   p_mb_params->mcp_resp,
   p_mb_params->mcp_param,
-  (cnt * delay) / 1000, (cnt * delay) % 1000);
+  (cnt * usecs) / 1000, (cnt * usecs) % 1000);
 
/* Clear the sequence number from the MFW response */
p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
@@ -556,7 +564,7 @@ static int qed_mcp_cmd_and_union(struct
 {
size_t union_data_size = sizeof(union drv_union_data);
u32 max_retries = QED_DRV_MB_MAX_RETRIES;
-   u32 delay = CHIP_MCP_RESP_ITER_US;
+   u32 usecs = QED_MCP_RESP_ITER_US;
 
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
@@ -573,8 +581,13 @@ static int qed_mcp_cmd_and_union(struct
return -EINVAL;
}
 
+   if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
+   max_retries = DIV_ROUND_UP(max_retries, 1000);
+   usecs *= 1000;
+   }
+
return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
- delay);
+ usecs);
 }
 
 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -763,6 +776,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hw
mb_params.data_src_size = sizeof(load_req);
mb_params.p_data_dst = _rsp;
mb_params.data_dst_size = sizeof(load_rsp);
+   mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
 
DP_VERBOSE(p_hwfn, QED_MSG_SP,
   "Load Request: param 0x%08x [init_hw %d, drv_type %d, 
hsi_ver %d, pda 0x%04x]\n",
@@ -984,7 +998,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_
 
 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-   u32 wol_param, mcp_resp, mcp_param;
+   struct qed_mcp_mb_params mb_params;
+ 

[PATCH 4.14 106/137] IB/hfi1: Invalid user input can result in crash

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Michael J. Ruhl 

commit 94694d18cf27a6faad91487a38ce516c2b16e7d9 upstream.

If the number of packets in a user sdma request does not match
the actual iovectors being sent, sdma_cleanup can be called on
an uninitialized request structure, resulting in a crash similar
to this:

BUG: unable to handle kernel NULL pointer dereference at 0008
IP: [] __sdma_txclean+0x57/0x1e0 [hfi1]
PGD 801044f61067 PUD 1052706067 PMD 0
Oops:  [#1] SMP
CPU: 30 PID: 69912 Comm: upsm Kdump: loaded Tainted: G   OE
   3.10.0-862.el7.x86_64 #1
Hardware name: Intel Corporation S2600KPR/S2600KPR, BIOS
SE5C610.86B.01.01.0019.101220160604 10/12/2016
task: 8b331c89 ti: 8b2ed1f98000 task.ti: 8b2ed1f98000
RIP: 0010:[]  [] __sdma_txclean+0x57/0x1e0
[hfi1]
RSP: 0018:8b2ed1f9bab0  EFLAGS: 00010286
RAX: 8b2b RBX: 8b2adf6e RCX: 
RDX: 00a0 RSI: 8b2e9eedc540 RDI: 8b2adf6e
RBP: 8b2ed1f9bad8 R08:  R09: c0b04a06
R10: 8b331c890190 R11: e6ed00bf1840 R12: 8b331548
R13: 8b33154800f0 R14: fff2 R15: 8b2e9eedc540
FS:  7f035ac47740() GS:8b331e10() knlGS:
CS:  0010 DS:  ES:  CR0: 80050033
CR2: 0008 CR3: 000c03fe6000 CR4: 001607e0
Call Trace:
 [] user_sdma_send_pkts+0xdcd/0x1990 [hfi1]
 [] ? gup_pud_range+0x140/0x290
 [] ? hfi1_mmu_rb_insert+0x155/0x1b0 [hfi1]
 [] hfi1_user_sdma_process_request+0xc5b/0x11b0 [hfi1]
 [] hfi1_aio_write+0xba/0x110 [hfi1]
 [] do_sync_readv_writev+0x7b/0xd0
 [] do_readv_writev+0xce/0x260
 [] ? tty_ldisc_deref+0x19/0x20
 [] ? n_tty_ioctl+0xe0/0xe0
 [] vfs_writev+0x35/0x60
 [] SyS_writev+0x7f/0x110
 [] system_call_fastpath+0x1c/0x21
Code: 06 49 c7 47 18 00 00 00 00 0f 87 89 01 00 00 5b 41 5c 41 5d 41 5e 41 5f
5d c3 66 2e 0f 1f 84 00 00 00 00 00 48 8b 4e 10 48 89 fb <48> 8b 51 08 49 89 d4
83 e2 0c 41 81 e4 00 e0 00 00 48 c1 ea 02
RIP  [] __sdma_txclean+0x57/0x1e0 [hfi1]
 RSP 
CR2: 0008

There are two exit points from user_sdma_send_pkts().  One (free_tx)
merely frees the slab entry and one (free_txreq) cleans the sdma_txreq
prior to freeing the slab entry.   The free_txreq variation can only be
called after one of the sdma_init*() variations has been called.

In the panic case, the slab entry had been allocated but not inited.

Fix the issue by exiting through free_tx thus avoiding sdma_clean().

Cc:  # 4.9.x+
Fixes: 7724105686e7 ("IB/hfi1: add driver files")
Reviewed-by: Mike Marciniszyn 
Reviewed-by: Lukasz Odzioba 
Signed-off-by: Michael J. Ruhl 
Signed-off-by: Dennis Dalessandro 
Signed-off-by: Greg Kroah-Hartman 

Signed-off-by: Jason Gunthorpe 

---
 drivers/infiniband/hw/hfi1/user_sdma.c |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct us
if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
if (++req->iov_idx == req->data_iovs) {
ret = -EFAULT;
-   goto free_txreq;
+   goto free_tx;
}
iovec = >iovs[req->iov_idx];
WARN_ON(iovec->offset);




[PATCH 4.14 116/137] qed: Avoid sending mailbox commands when MFW is not responsive

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Tomer Tayar 

[ Upstream commit b310974e041913231b6e3d5d475d4df55c312301 ]

Keep sending mailbox commands to the MFW when it is not responsive ends up
with a redundant amount of timeout expiries.
This patch prints the MCP status on the first command which is not
responded, and blocks the following commands.
Since the (un)load request commands might be not responded due to other
PFs, the patch also adds the option to skip the blocking upon a failure.

Signed-off-by: Tomer Tayar 
Signed-off-by: Ariel Elior 
Signed-off-by: David S. Miller 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/net/ethernet/qlogic/qed/qed_mcp.c  |   52 -
 drivers/net/ethernet/qlogic/qed/qed_mcp.h  |6 ++
 drivers/net/ethernet/qlogic/qed/qed_reg_addr.h |1 
 3 files changed, 56 insertions(+), 3 deletions(-)

--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -319,6 +319,12 @@ int qed_mcp_reset(struct qed_hwfn *p_hwf
u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
int rc = 0;
 
+   if (p_hwfn->mcp_info->b_block_cmd) {
+   DP_NOTICE(p_hwfn,
+ "The MFW is not responsive. Avoid sending MCP_RESET 
mailbox command.\n");
+   return -EBUSY;
+   }
+
/* Ensure that only a single thread is accessing the mailbox */
spin_lock_bh(_hwfn->mcp_info->cmd_lock);
 
@@ -444,6 +450,33 @@ static void __qed_mcp_cmd_and_union(stru
   (p_mb_params->cmd | seq_num), p_mb_params->param);
 }
 
+static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
+{
+   p_hwfn->mcp_info->b_block_cmd = block_cmd;
+
+   DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
+   block_cmd ? "Block" : "Unblock");
+}
+
+static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
+  struct qed_ptt *p_ptt)
+{
+   u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
+   u32 delay = QED_MCP_RESP_ITER_US;
+
+   cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+   cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+   cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+   udelay(delay);
+   cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+   udelay(delay);
+   cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+
+   DP_NOTICE(p_hwfn,
+ "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 
0x%08x}\n",
+ cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
+}
+
 static int
 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
   struct qed_ptt *p_ptt,
@@ -530,11 +563,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *
DP_NOTICE(p_hwfn,
  "The MFW failed to respond to command 0x%08x [param 
0x%08x].\n",
  p_mb_params->cmd, p_mb_params->param);
+   qed_mcp_print_cpu_info(p_hwfn, p_ptt);
 
spin_lock_bh(_hwfn->mcp_info->cmd_lock);
qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
spin_unlock_bh(_hwfn->mcp_info->cmd_lock);
 
+   if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
+   qed_mcp_cmd_set_blocking(p_hwfn, true);
+
return -EAGAIN;
}
 
@@ -572,6 +609,13 @@ static int qed_mcp_cmd_and_union(struct
return -EBUSY;
}
 
+   if (p_hwfn->mcp_info->b_block_cmd) {
+   DP_NOTICE(p_hwfn,
+ "The MFW is not responsive. Avoid sending mailbox 
command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+   return -EBUSY;
+   }
+
if (p_mb_params->data_src_size > union_data_size ||
p_mb_params->data_dst_size > union_data_size) {
DP_ERR(p_hwfn,
@@ -776,7 +820,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hw
mb_params.data_src_size = sizeof(load_req);
mb_params.p_data_dst = _rsp;
mb_params.data_dst_size = sizeof(load_rsp);
-   mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
+   mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
 
DP_VERBOSE(p_hwfn, QED_MSG_SP,
   "Load Request: param 0x%08x [init_hw %d, drv_type %d, 
hsi_ver %d, pda 0x%04x]\n",
@@ -1020,7 +1064,7 @@ int qed_mcp_unload_req(struct qed_hwfn *
memset(_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
mb_params.param = wol_param;
-   mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
+   mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
 
return qed_mcp_cmd_and_union(p_hwfn, p_ptt, _params);
 }
@@ -2047,6 +2091,8 @@ int qed_mcp_halt(struct 

[PATCH 4.14 108/137] RDMA/uverbs: Atomically flush and mark closed the comp event queue

2018-10-02 Thread Greg Kroah-Hartman
4.14-stable review patch.  If anyone has any objections, please let me know.

--

From: Steve Wise 

commit 67e3816842fe6414d629c7515b955952ec40c7d7 upstream.

Currently a uverbs completion event queue is flushed of events in
ib_uverbs_comp_event_close() with the queue spinlock held and then
released.  Yet setting ev_queue->is_closed is not set until later in
uverbs_hot_unplug_completion_event_file().

In between the time ib_uverbs_comp_event_close() releases the lock and
uverbs_hot_unplug_completion_event_file() acquires the lock, a completion
event can arrive and be inserted into the event queue by
ib_uverbs_comp_handler().

This can cause a "double add" list_add warning or crash depending on the
kernel configuration, or a memory leak because the event is never dequeued
since the queue is already closed down.

So add setting ev_queue->is_closed = 1 to ib_uverbs_comp_event_close().

Cc: sta...@vger.kernel.org
Fixes: 1e7710f3f656 ("IB/core: Change completion channel to use the reworked 
objects schema")
Signed-off-by: Steve Wise 
Signed-off-by: Jason Gunthorpe 
Signed-off-by: Greg Kroah-Hartman 

---
 drivers/infiniband/core/uverbs_main.c |1 +
 1 file changed, 1 insertion(+)

--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -424,6 +424,7 @@ static int ib_uverbs_comp_event_close(st
list_del(>obj_list);
kfree(entry);
}
+   file->ev_queue.is_closed = 1;
spin_unlock_irq(>ev_queue.lock);
 
uverbs_close_fd(filp);




[PATCH 4.18 172/228] qed: Wait for ready indication before rereading the shmem

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Tomer Tayar 

[ Upstream commit f00d25f3154b676fcea4502a25b94bd7f142ca74 ]

The MFW might be reset and re-update its shared memory.
Upon the detection of such a reset the driver rereads this memory, but it
has to wait till the data is valid.
This patch adds the missing wait for a data ready indication.

Signed-off-by: Tomer Tayar 
Signed-off-by: Ariel Elior 
Signed-off-by: David S. Miller 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/net/ethernet/qlogic/qed/qed_mcp.c |   50 --
 1 file changed, 41 insertions(+), 9 deletions(-)

--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -183,18 +183,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn
return 0;
 }
 
+/* Maximum of 1 sec to wait for the SHMEM ready indication */
+#define QED_MCP_SHMEM_RDY_MAX_RETRIES  20
+#define QED_MCP_SHMEM_RDY_ITER_MS  50
+
 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+   u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
+   u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
u32 drv_mb_offsize, mfw_mb_offsize;
u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
 
p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
-   if (!p_info->public_base)
-   return 0;
+   if (!p_info->public_base) {
+   DP_NOTICE(p_hwfn,
+ "The address of the MCP scratch-pad is not 
configured\n");
+   return -EINVAL;
+   }
 
p_info->public_base |= GRCBASE_MCP;
 
+   /* Get the MFW MB address and number of supported messages */
+   mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+   SECTION_OFFSIZE_ADDR(p_info->public_base,
+PUBLIC_MFW_MB));
+   p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+   p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
+   p_info->mfw_mb_addr +
+   offsetof(struct public_mfw_mb,
+sup_msgs));
+
+   /* The driver can notify that there was an MCP reset, and might read the
+* SHMEM values before the MFW has completed initializing them.
+* To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
+* data ready indication.
+*/
+   while (!p_info->mfw_mb_length && --cnt) {
+   msleep(msec);
+   p_info->mfw_mb_length =
+   (u16)qed_rd(p_hwfn, p_ptt,
+   p_info->mfw_mb_addr +
+   offsetof(struct public_mfw_mb, sup_msgs));
+   }
+
+   if (!cnt) {
+   DP_NOTICE(p_hwfn,
+ "Failed to get the SHMEM ready notification after %d 
msec\n",
+ QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
+   return -EBUSY;
+   }
+
/* Calculate the driver and MFW mailbox address */
drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -204,13 +243,6 @@ static int qed_load_mcp_offsets(struct q
   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 
0x%x\n",
   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
 
-   /* Set the MFW MB address */
-   mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
-   SECTION_OFFSIZE_ADDR(p_info->public_base,
-PUBLIC_MFW_MB));
-   p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
-   p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
-
/* Get the current driver mailbox sequence before sending
 * the first command
 */




[PATCH 4.18 174/228] qed: Prevent a possible deadlock during driver load and unload

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Tomer Tayar 

[ Upstream commit eaa50fc59e5841910987e90b0438b2643041f508 ]

The MFW manages an internal lock to prevent concurrent hardware
(de)initialization of different PFs.
This, together with the busy-waiting for the MFW's responses for commands,
might lead to a deadlock during concurrent load or unload of PFs.
This patch adds the option to sleep within the busy-waiting, and uses it
for the (un)load requests (which are not sent from an interrupt context) to
prevent the possible deadlock.

Signed-off-by: Tomer Tayar 
Signed-off-by: Ariel Elior 
Signed-off-by: David S. Miller 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/net/ethernet/qlogic/qed/qed_mcp.c |   43 +-
 drivers/net/ethernet/qlogic/qed/qed_mcp.h |   21 +-
 2 files changed, 44 insertions(+), 20 deletions(-)

--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -48,7 +48,7 @@
 #include "qed_reg_addr.h"
 #include "qed_sriov.h"
 
-#define CHIP_MCP_RESP_ITER_US 10
+#define QED_MCP_RESP_ITER_US   10
 
 #define QED_DRV_MB_MAX_RETRIES (500 * 1000)/* Account for 5 sec */
 #define QED_MCP_RESET_RETRIES  (50 * 1000) /* Account for 500 msec */
@@ -317,7 +317,7 @@ static void qed_mcp_reread_offsets(struc
 
 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-   u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
+   u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
int rc = 0;
 
/* Ensure that only a single thread is accessing the mailbox */
@@ -449,10 +449,10 @@ static int
 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
   struct qed_ptt *p_ptt,
   struct qed_mcp_mb_params *p_mb_params,
-  u32 max_retries, u32 delay)
+  u32 max_retries, u32 usecs)
 {
+   u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
struct qed_mcp_cmd_elem *p_cmd_elem;
-   u32 cnt = 0;
u16 seq_num;
int rc = 0;
 
@@ -475,7 +475,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *
goto err;
 
spin_unlock_bh(_hwfn->mcp_info->cmd_lock);
-   udelay(delay);
+
+   if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
+   msleep(msecs);
+   else
+   udelay(usecs);
} while (++cnt < max_retries);
 
if (cnt >= max_retries) {
@@ -504,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *
 * The spinlock stays locked until the list element is removed.
 */
 
-   udelay(delay);
+   if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
+   msleep(msecs);
+   else
+   udelay(usecs);
+
spin_lock_bh(_hwfn->mcp_info->cmd_lock);
 
if (p_cmd_elem->b_is_completed)
@@ -539,7 +547,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *
   "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d 
ms]\n",
   p_mb_params->mcp_resp,
   p_mb_params->mcp_param,
-  (cnt * delay) / 1000, (cnt * delay) % 1000);
+  (cnt * usecs) / 1000, (cnt * usecs) % 1000);
 
/* Clear the sequence number from the MFW response */
p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
@@ -557,7 +565,7 @@ static int qed_mcp_cmd_and_union(struct
 {
size_t union_data_size = sizeof(union drv_union_data);
u32 max_retries = QED_DRV_MB_MAX_RETRIES;
-   u32 delay = CHIP_MCP_RESP_ITER_US;
+   u32 usecs = QED_MCP_RESP_ITER_US;
 
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
@@ -574,8 +582,13 @@ static int qed_mcp_cmd_and_union(struct
return -EINVAL;
}
 
+   if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
+   max_retries = DIV_ROUND_UP(max_retries, 1000);
+   usecs *= 1000;
+   }
+
return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
- delay);
+ usecs);
 }
 
 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -792,6 +805,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hw
mb_params.data_src_size = sizeof(load_req);
mb_params.p_data_dst = _rsp;
mb_params.data_dst_size = sizeof(load_rsp);
+   mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
 
DP_VERBOSE(p_hwfn, QED_MSG_SP,
   "Load Request: param 0x%08x [init_hw %d, drv_type %d, 
hsi_ver %d, pda 0x%04x]\n",
@@ -1013,7 +1027,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_
 
 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-   u32 wol_param, mcp_resp, mcp_param;
+   struct qed_mcp_mb_params mb_params;

[PATCH 4.18 163/228] IB/hfi1: Invalid user input can result in crash

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Michael J. Ruhl 

commit 94694d18cf27a6faad91487a38ce516c2b16e7d9 upstream.

If the number of packets in a user sdma request does not match
the actual iovectors being sent, sdma_cleanup can be called on
an uninitialized request structure, resulting in a crash similar
to this:

BUG: unable to handle kernel NULL pointer dereference at 0008
IP: [] __sdma_txclean+0x57/0x1e0 [hfi1]
PGD 801044f61067 PUD 1052706067 PMD 0
Oops:  [#1] SMP
CPU: 30 PID: 69912 Comm: upsm Kdump: loaded Tainted: G   OE
   3.10.0-862.el7.x86_64 #1
Hardware name: Intel Corporation S2600KPR/S2600KPR, BIOS
SE5C610.86B.01.01.0019.101220160604 10/12/2016
task: 8b331c89 ti: 8b2ed1f98000 task.ti: 8b2ed1f98000
RIP: 0010:[]  [] __sdma_txclean+0x57/0x1e0
[hfi1]
RSP: 0018:8b2ed1f9bab0  EFLAGS: 00010286
RAX: 8b2b RBX: 8b2adf6e RCX: 
RDX: 00a0 RSI: 8b2e9eedc540 RDI: 8b2adf6e
RBP: 8b2ed1f9bad8 R08:  R09: c0b04a06
R10: 8b331c890190 R11: e6ed00bf1840 R12: 8b331548
R13: 8b33154800f0 R14: fff2 R15: 8b2e9eedc540
FS:  7f035ac47740() GS:8b331e10() knlGS:
CS:  0010 DS:  ES:  CR0: 80050033
CR2: 0008 CR3: 000c03fe6000 CR4: 001607e0
Call Trace:
 [] user_sdma_send_pkts+0xdcd/0x1990 [hfi1]
 [] ? gup_pud_range+0x140/0x290
 [] ? hfi1_mmu_rb_insert+0x155/0x1b0 [hfi1]
 [] hfi1_user_sdma_process_request+0xc5b/0x11b0 [hfi1]
 [] hfi1_aio_write+0xba/0x110 [hfi1]
 [] do_sync_readv_writev+0x7b/0xd0
 [] do_readv_writev+0xce/0x260
 [] ? tty_ldisc_deref+0x19/0x20
 [] ? n_tty_ioctl+0xe0/0xe0
 [] vfs_writev+0x35/0x60
 [] SyS_writev+0x7f/0x110
 [] system_call_fastpath+0x1c/0x21
Code: 06 49 c7 47 18 00 00 00 00 0f 87 89 01 00 00 5b 41 5c 41 5d 41 5e 41 5f
5d c3 66 2e 0f 1f 84 00 00 00 00 00 48 8b 4e 10 48 89 fb <48> 8b 51 08 49 89 d4
83 e2 0c 41 81 e4 00 e0 00 00 48 c1 ea 02
RIP  [] __sdma_txclean+0x57/0x1e0 [hfi1]
 RSP 
CR2: 0008

There are two exit points from user_sdma_send_pkts().  One (free_tx)
merely frees the slab entry and one (free_txreq) cleans the sdma_txreq
prior to freeing the slab entry.   The free_txreq variation can only be
called after one of the sdma_init*() variations has been called.

In the panic case, the slab entry had been allocated but not inited.

Fix the issue by exiting through free_tx thus avoiding sdma_clean().

Cc:  # 4.9.x+
Fixes: 7724105686e7 ("IB/hfi1: add driver files")
Reviewed-by: Mike Marciniszyn 
Reviewed-by: Lukasz Odzioba 
Signed-off-by: Michael J. Ruhl 
Signed-off-by: Dennis Dalessandro 
Signed-off-by: Greg Kroah-Hartman 

Signed-off-by: Jason Gunthorpe 

---
 drivers/infiniband/hw/hfi1/user_sdma.c |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct us
if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
if (++req->iov_idx == req->data_iovs) {
ret = -EFAULT;
-   goto free_txreq;
+   goto free_tx;
}
iovec = >iovs[req->iov_idx];
WARN_ON(iovec->offset);




[PATCH 4.18 185/228] bpf: sockmap: write_space events need to be passed to TCP handler

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: John Fastabend 

[ Upstream commit 9b2e0388bec8ec5427403e23faff3b58dd1c3200 ]

When sockmap code is using the stream parser it also handles the write
space events in order to handle the case where (a) verdict redirects
skb to another socket and (b) the sockmap then sends the skb but due
to memory constraints (or other EAGAIN errors) needs to do a retry.

But the initial code missed a third case where the
skb_send_sock_locked() triggers an sk_wait_event(). A typically case
would be when sndbuf size is exceeded. If this happens because we
do not pass the write_space event to the lower layers we never wake
up the event and it will wait for sndtimeo. Which as noted in ktls
fix may be rather large and look like a hang to the user.

To reproduce the best test is to reduce the sndbuf size and send
1B data chunks to stress the memory handling. To fix this pass the
event from the upper layer to the lower layer.

Signed-off-by: John Fastabend 
Signed-off-by: Daniel Borkmann 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 kernel/bpf/sockmap.c |3 +++
 1 file changed, 3 insertions(+)

--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -1430,12 +1430,15 @@ out:
 static void smap_write_space(struct sock *sk)
 {
struct smap_psock *psock;
+   void (*write_space)(struct sock *sk);
 
rcu_read_lock();
psock = smap_psock_sk(sk);
if (likely(psock && test_bit(SMAP_TX_RUNNING, >state)))
schedule_work(>tx_work);
+   write_space = psock->save_write_space;
rcu_read_unlock();
+   write_space(sk);
 }
 
 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)




[PATCH 4.18 184/228] tls: possible hang when do_tcp_sendpages hits sndbuf is full case

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: John Fastabend 

[ Upstream commit 67db7cd249e71f64346f481b629724376d063e08 ]

Currently, the lower protocols sk_write_space handler is not called if
TLS is sending a scatterlist via  tls_push_sg. However, normally
tls_push_sg calls do_tcp_sendpage, which may be under memory pressure,
that in turn may trigger a wait via sk_wait_event. Typically, this
happens when the in-flight bytes exceed the sdnbuf size. In the normal
case when enough ACKs are received sk_write_space() will be called and
the sk_wait_event will be woken up allowing it to send more data
and/or return to the user.

But, in the TLS case because the sk_write_space() handler does not
wake up the events the above send will wait until the sndtimeo is
exceeded. By default this is MAX_SCHEDULE_TIMEOUT so it look like a
hang to the user (especially this impatient user). To fix this pass
the sk_write_space event to the lower layers sk_write_space event
which in the TCP case will wake any pending events.

I observed the above while integrating sockmap and ktls. It
initially appeared as test_sockmap (modified to use ktls) occasionally
hanging. To reliably reproduce this reduce the sndbuf size and stress
the tls layer by sending many 1B sends. This results in every byte
needing a header and each byte individually being sent to the crypto
layer.

Signed-off-by: John Fastabend 
Acked-by: Dave Watson 
Signed-off-by: Daniel Borkmann 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 net/tls/tls_main.c |9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)

--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -222,9 +222,14 @@ static void tls_write_space(struct sock
 {
struct tls_context *ctx = tls_get_ctx(sk);
 
-   /* We are already sending pages, ignore notification */
-   if (ctx->in_tcp_sendpages)
+   /* If in_tcp_sendpages call lower protocol write space handler
+* to ensure we wake up any waiting operations there. For example
+* if do_tcp_sendpages where to call sk_wait_event.
+*/
+   if (ctx->in_tcp_sendpages) {
+   ctx->sk_write_space(sk);
return;
+   }
 
if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
gfp_t sk_allocation = sk->sk_allocation;




[PATCH 4.18 179/228] mmc: android-goldfish: fix bad logic of sg_copy_{from,to}_buffer conversion

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Ludovic Desroches 

[ Upstream commit 17e96d8516e31c3cb52cb8e2ee79d1d2e6948c11 ]

The conversion to sg_copy_{from,to}_buffer has been done in the wrong
way. sg_copy_to_buffer is a copy from an SG list to a linear buffer so
it can't replace memcpy(dest, host->virt_base, data->sg->length) where
dest is the virtual address of the SG. Same for sg_copy_from_buffer
but in the opposite way.

Signed-off-by: Ludovic Desroches 
Suggested-by: Douglas Gilbert 
Fixes: 53d7e098ba08 ("mmc: android-goldfish: use sg_copy_{from,to}_buffer")
Signed-off-by: Ulf Hansson 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/mmc/host/android-goldfish.c |4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -217,7 +217,7 @@ static void goldfish_mmc_xfer_done(struc
 * We don't really have DMA, so we need
 * to copy from our platform driver buffer
 */
-   sg_copy_to_buffer(data->sg, 1, host->virt_base,
+   sg_copy_from_buffer(data->sg, 1, host->virt_base,
data->sg->length);
}
host->data->bytes_xfered += data->sg->length;
@@ -393,7 +393,7 @@ static void goldfish_mmc_prepare_data(st
 * We don't really have DMA, so we need to copy to our
 * platform driver buffer
 */
-   sg_copy_from_buffer(data->sg, 1, host->virt_base,
+   sg_copy_to_buffer(data->sg, 1, host->virt_base,
data->sg->length);
}
 }




[PATCH 4.18 180/228] bus: ti-sysc: Fix no_console_suspend handling

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Tony Lindgren 

[ Upstream commit 4f3530f4a41d49c41015020cd9a5ed5c95b5d2db ]

If no_console_suspend is set, we should keep console enabled during suspend.
Lets fix this by only producing a warning if we can't idle hardware during
suspend.

Fixes: ef55f8215a78 ("bus: ti-sysc: Improve suspend and resume handling")
Signed-off-by: Tony Lindgren 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/bus/ti-sysc.c |6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1175,10 +1175,10 @@ static int sysc_child_suspend_noirq(stru
if (!pm_runtime_status_suspended(dev)) {
error = pm_generic_runtime_suspend(dev);
if (error) {
-   dev_err(dev, "%s error at %i: %i\n",
-   __func__, __LINE__, error);
+   dev_warn(dev, "%s busy at %i: %i\n",
+__func__, __LINE__, error);
 
-   return error;
+   return 0;
}
 
error = sysc_runtime_suspend(ddata->dev);




[PATCH 4.18 178/228] mmc: atmel-mci: fix bad logic of sg_copy_{from,to}_buffer conversion

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Ludovic Desroches 

[ Upstream commit 19f5e9e015675fcdbf2c20e804b2e84e80201454 ]

The conversion to sg_copy_{from,to}_buffer has been done in the wrong
way. sg_copy_to_buffer is a copy from an SG list to a linear buffer so
it can't replace memcpy(buf + offset, , remaining) where buf is
the virtual address of the SG. Same for sg_copy_to_buffer but in the
opposite way.

Signed-off-by: Ludovic Desroches 
Suggested-by: Douglas Gilbert 
Fixes: 5b4277814e3f ("mmc: atmel-mci: use sg_copy_{from,to}_buffer")
Signed-off-by: Ulf Hansson 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/mmc/host/atmel-mci.c |   12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1976,7 +1976,7 @@ static void atmci_read_data_pio(struct a
do {
value = atmci_readl(host, ATMCI_RDR);
if (likely(offset + 4 <= sg->length)) {
-   sg_pcopy_to_buffer(sg, 1, , sizeof(u32), offset);
+   sg_pcopy_from_buffer(sg, 1, , sizeof(u32), 
offset);
 
offset += 4;
nbytes += 4;
@@ -1993,7 +1993,7 @@ static void atmci_read_data_pio(struct a
} else {
unsigned int remaining = sg->length - offset;
 
-   sg_pcopy_to_buffer(sg, 1, , remaining, offset);
+   sg_pcopy_from_buffer(sg, 1, , remaining, offset);
nbytes += remaining;
 
flush_dcache_page(sg_page(sg));
@@ -2003,7 +2003,7 @@ static void atmci_read_data_pio(struct a
goto done;
 
offset = 4 - remaining;
-   sg_pcopy_to_buffer(sg, 1, (u8 *) + remaining,
+   sg_pcopy_from_buffer(sg, 1, (u8 *) + remaining,
offset, 0);
nbytes += offset;
}
@@ -2042,7 +2042,7 @@ static void atmci_write_data_pio(struct
 
do {
if (likely(offset + 4 <= sg->length)) {
-   sg_pcopy_from_buffer(sg, 1, , sizeof(u32), 
offset);
+   sg_pcopy_to_buffer(sg, 1, , sizeof(u32), offset);
atmci_writel(host, ATMCI_TDR, value);
 
offset += 4;
@@ -2059,7 +2059,7 @@ static void atmci_write_data_pio(struct
unsigned int remaining = sg->length - offset;
 
value = 0;
-   sg_pcopy_from_buffer(sg, 1, , remaining, offset);
+   sg_pcopy_to_buffer(sg, 1, , remaining, offset);
nbytes += remaining;
 
host->sg = sg = sg_next(sg);
@@ -2070,7 +2070,7 @@ static void atmci_write_data_pio(struct
}
 
offset = 4 - remaining;
-   sg_pcopy_from_buffer(sg, 1, (u8 *) + remaining,
+   sg_pcopy_to_buffer(sg, 1, (u8 *) + remaining,
offset, 0);
atmci_writel(host, ATMCI_TDR, value);
nbytes += offset;




Re: [PATCH v2] dma-debug: Check for drivers mapping invalid addresses in dma_map_single()

2018-10-02 Thread Robin Murphy

On 01/10/18 22:53, Stephen Boyd wrote:

I recently debugged a DMA mapping oops where a driver was trying to map
a buffer returned from request_firmware() with dma_map_single(). Memory
returned from request_firmware() is mapped into the vmalloc region and
this isn't a valid region to map with dma_map_single() per the DMA
documentation's "What memory is DMA'able?" section.

Unfortunately, we don't really check that in the DMA debugging code, so
enabling DMA debugging doesn't help catch this problem. Let's add a new
DMA debug function to check for a vmalloc address or an invalid virtual
address and print a warning if this happens. This makes it a little
easier to debug these sorts of problems, instead of seeing odd behavior
or crashes when drivers attempt to map the vmalloc space for DMA.

Cc: Marek Szyprowski 
Cc: Robin Murphy 
Signed-off-by: Stephen Boyd 
---

Changes from v1:
  * Update code to check for invalid virtual address too
  * Rename function to debug_dma_map_single()

  include/linux/dma-debug.h   |  8 
  include/linux/dma-mapping.h |  1 +
  kernel/dma/debug.c  | 16 
  3 files changed, 25 insertions(+)

diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index a785f2507159..30213adbb6b9 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -32,6 +32,9 @@ extern void dma_debug_add_bus(struct bus_type *bus);
  
  extern int dma_debug_resize_entries(u32 num_entries);
  
+extern void debug_dma_map_single(struct device *dev, const void *addr,

+unsigned long len);
+
  extern void debug_dma_map_page(struct device *dev, struct page *page,
   size_t offset, size_t size,
   int direction, dma_addr_t dma_addr,
@@ -103,6 +106,11 @@ static inline int dma_debug_resize_entries(u32 num_entries)
return 0;
  }
  
+static inline void debug_dma_map_single(struct device *dev, const void *addr,

+   unsigned long len)
+{
+}
+
  static inline void debug_dma_map_page(struct device *dev, struct page *page,
  size_t offset, size_t size,
  int direction, dma_addr_t dma_addr,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index d23fc45c8208..99ccba66c06a 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -231,6 +231,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device 
*dev, void *ptr,
dma_addr_t addr;
  
  	BUG_ON(!valid_dma_direction(dir));

+   debug_dma_check_single(dev, ptr, size);


Ahem...

With that (and below) fixed so that it actually compiles,

Reviewed-by: Robin Murphy 


addr = ops->map_page(dev, virt_to_page(ptr),
 offset_in_page(ptr), size,
 dir, attrs);
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index c007d25bee09..0f34bce82a62 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -1312,6 +1312,22 @@ static void check_sg_segment(struct device *dev, struct 
scatterlist *sg)
  #endif
  }
  
+void debug_dma_check_single(struct device *dev, const void *addr,

+   unsigned long len)
+{
+   if (unlikely(dma_debug_disabled()))
+   return;
+
+   if (!virt_addr_valid(addr))
+   err_printk(dev, NULL, "DMA-API: device driver maps memory from 
invalid area [addr=%p] [len=%lu]\n",
+  addr, len);
+
+   if (is_vmalloc_addr(addr))
+   err_printk(dev, NULL, "DMA-API: device driver maps memory from 
vmalloc area [addr=%p] [len=%lu]\n",
+  addr, len);
+}
+EXPORT_SYMBOL(debug_dma_check_single);
+
  void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
size_t size, int direction, dma_addr_t dma_addr,
bool map_single)



[PATCH 4.18 073/228] media: ov772x: allow i2c controllers without I2C_FUNC_PROTOCOL_MANGLING

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Akinobu Mita 

[ Upstream commit 0b964d183cbf3f95a062ad9f3eec87ffa2790558 ]

The ov772x driver only works when the i2c controller have
I2C_FUNC_PROTOCOL_MANGLING.  However, many i2c controller drivers don't
support it.

The reason that the ov772x requires I2C_FUNC_PROTOCOL_MANGLING is that
it doesn't support repeated starts.

This changes the reading ov772x register method so that it doesn't
require I2C_FUNC_PROTOCOL_MANGLING by calling two separated i2c messages.

Cc: Laurent Pinchart 
Cc: Hans Verkuil 
Cc: Wolfram Sang 
Reviewed-by: Jacopo Mondi 
Signed-off-by: Akinobu Mita 
Signed-off-by: Sakari Ailus 
Signed-off-by: Mauro Carvalho Chehab 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/media/i2c/ov772x.c |   20 ++--
 1 file changed, 14 insertions(+), 6 deletions(-)

--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -542,9 +542,19 @@ static struct ov772x_priv *to_ov772x(str
return container_of(sd, struct ov772x_priv, subdev);
 }
 
-static inline int ov772x_read(struct i2c_client *client, u8 addr)
+static int ov772x_read(struct i2c_client *client, u8 addr)
 {
-   return i2c_smbus_read_byte_data(client, addr);
+   int ret;
+   u8 val;
+
+   ret = i2c_master_send(client, , 1);
+   if (ret < 0)
+   return ret;
+   ret = i2c_master_recv(client, , 1);
+   if (ret < 0)
+   return ret;
+
+   return val;
 }
 
 static inline int ov772x_write(struct i2c_client *client, u8 addr, u8 value)
@@ -1263,13 +1273,11 @@ static int ov772x_probe(struct i2c_clien
return -EINVAL;
}
 
-   if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_PROTOCOL_MANGLING)) {
+   if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(>dev,
-   "I2C-Adapter doesn't support SMBUS_BYTE_DATA or 
PROTOCOL_MANGLING\n");
+   "I2C-Adapter doesn't support SMBUS_BYTE_DATA\n");
return -EIO;
}
-   client->flags |= I2C_CLIENT_SCCB;
 
priv = devm_kzalloc(>dev, sizeof(*priv), GFP_KERNEL);
if (!priv)




[PATCH 4.18 076/228] arm64: dts: renesas: salvator-common: Fix adv7482 decimal unit addresses

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Geert Uytterhoeven 

[ Upstream commit c5a884838ce34681200b5a45b2330177036affd0 ]

With recent dtc and W=1:

...salvator-x.dtb: Warning (graph_port): 
/soc/i2c@e66d8000/video-receiver@70/port@10: graph node unit address error, 
expected "a"
...salvator-x.dtb: Warning (graph_port): 
/soc/i2c@e66d8000/video-receiver@70/port@11: graph node unit address error, 
expected "b"

Unit addresses are always hexadecimal (without prefix), while the bases
of reg property values depend on their prefixes.

Fixes: 908001d778eba06e ("arm64: dts: renesas: salvator-common: Add ADV7482 
support")
Signed-off-by: Geert Uytterhoeven 
Reviewed-by: Rob Herring 
Acked-by: Kieran Bingham 
Signed-off-by: Simon Horman 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/arm64/boot/dts/renesas/salvator-common.dtsi |4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -440,7 +440,7 @@
};
};
 
-   port@10 {
+   port@a {
reg = <10>;
 
adv7482_txa: endpoint {
@@ -450,7 +450,7 @@
};
};
 
-   port@11 {
+   port@b {
reg = <11>;
 
adv7482_txb: endpoint {




[PATCH 4.18 041/228] x86/tsc: Add missing header to tsc_msr.c

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Andy Shevchenko 

[ Upstream commit dbd0fbc76c77daac08ddd245afdcbade0d506e19 ]

Add a missing header otherwise compiler warns about missed prototype:

CC  arch/x86/kernel/tsc_msr.o
arch/x86/kernel/tsc_msr.c:73:15: warning: no previous prototype for 
‘cpu_khz_from_msr’ [-Wmissing-prototypes]
   unsigned long cpu_khz_from_msr(void)
 ^~~~

Signed-off-by: Andy Shevchenko 
Signed-off-by: Thomas Gleixner 
Cc: "H. Peter Anvin" 
Cc: Pavel Tatashin 
Link: 
https://lkml.kernel.org/r/20180629193113.84425-4-andriy.shevche...@linux.intel.com
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/x86/kernel/tsc_msr.c |1 +
 1 file changed, 1 insertion(+)

--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -12,6 +12,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define MAX_NUM_FREQS  9
 




[PATCH 4.18 072/228] staging: android: ashmem: Fix mmap size validation

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Alistair Strachan 

[ Upstream commit 8632c614565d0c5fdde527889601c018e97b6384 ]

The ashmem driver did not check that the size/offset of the vma passed
to its .mmap() function was not larger than the ashmem object being
mapped. This could cause mmap() to succeed, even though accessing parts
of the mapping would later fail with a segmentation fault.

Ensure an error is returned by the ashmem_mmap() function if the vma
size is larger than the ashmem object size. This enables safer handling
of the problem in userspace.

Cc: Todd Kjos 
Cc: de...@driverdev.osuosl.org
Cc: linux-kernel@vger.kernel.org
Cc: kernel-t...@android.com
Cc: Joel Fernandes 
Signed-off-by: Alistair Strachan 
Acked-by: Joel Fernandes (Google) 
Reviewed-by: Martijn Coenen 
Signed-off-by: Greg Kroah-Hartman 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/staging/android/ashmem.c |6 ++
 1 file changed, 6 insertions(+)

--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -366,6 +366,12 @@ static int ashmem_mmap(struct file *file
goto out;
}
 
+   /* requested mapping size larger than object size */
+   if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
+   ret = -EINVAL;
+   goto out;
+   }
+
/* requested protection bits must match our allowed protection mask */
if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
 calc_vm_prot_bits(PROT_MASK, 0))) {




[PATCH 4.18 043/228] x86/entry/64: Add two more instruction suffixes

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Jan Beulich 

[ Upstream commit 6709812f094d96543b443645c68daaa32d3d3e77 ]

Sadly, other than claimed in:

  a368d7fd2a ("x86/entry/64: Add instruction suffix")

... there are two more instances which want to be adjusted.

As said there, omitting suffixes from instructions in AT mode is bad
practice when operand size cannot be determined by the assembler from
register operands, and is likely going to be warned about by upstream
gas in the future (mine does already).

Add the other missing suffixes here as well.

Signed-off-by: Jan Beulich 
Cc: Andy Lutomirski 
Cc: Borislav Petkov 
Cc: Brian Gerst 
Cc: Denys Vlasenko 
Cc: H. Peter Anvin 
Cc: Josh Poimboeuf 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Link: http://lkml.kernel.org/r/5b3a02dd0278001cf...@prv1-mh.provo.novell.com
Signed-off-by: Ingo Molnar 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/x86/entry/entry_64.S |4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -92,7 +92,7 @@ END(native_usergs_sysret64)
 .endm
 
 .macro TRACE_IRQS_IRETQ_DEBUG
-   bt  $9, EFLAGS(%rsp)/* interrupts off? */
+   btl $9, EFLAGS(%rsp)/* interrupts off? */
jnc 1f
TRACE_IRQS_ON_DEBUG
 1:
@@ -701,7 +701,7 @@ retint_kernel:
 #ifdef CONFIG_PREEMPT
/* Interrupts are off */
/* Check if we need preemption */
-   bt  $9, EFLAGS(%rsp)/* were interrupts off? */
+   btl $9, EFLAGS(%rsp)/* were interrupts off? */
jnc 1f
 0: cmpl$0, PER_CPU_VAR(__preempt_count)
jnz 1f




[PATCH 4.18 044/228] ARM: dts: ls1021a: Add missing cooling device properties for CPUs

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Viresh Kumar 

[ Upstream commit 47768f372eae030db6fab5225f9504a820d2c07f ]

The cooling device properties, like "#cooling-cells" and
"dynamic-power-coefficient", should either be present for all the CPUs
of a cluster or none. If these are present only for a subset of CPUs of
a cluster then things will start falling apart as soon as the CPUs are
brought online in a different order. For example, this will happen
because the operating system looks for such properties in the CPU node
it is trying to bring up, so that it can register a cooling device.

Add such missing properties.

Signed-off-by: Viresh Kumar 
Signed-off-by: Shawn Guo 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 arch/arm/boot/dts/ls1021a.dtsi |1 +
 1 file changed, 1 insertion(+)

--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -84,6 +84,7 @@
device_type = "cpu";
reg = <0xf01>;
clocks = < 1 0>;
+   #cooling-cells = <2>;
};
};
 




[PATCH 4.18 077/228] serial: pxa: Fix an error handling path in serial_pxa_probe()

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Christophe JAILLET 

[ Upstream commit 95a0e656580fab3128c7bee5f660c50784f53651 ]

If port.line is out of range, we still need to release some resources, or
we will leak them.

Fixes: afc7851fab83 ("serial: pxa: Fix out-of-bounds access through serial port 
index")
Signed-off-by: Christophe JAILLET 
Signed-off-by: Greg Kroah-Hartman 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/tty/serial/pxa.c |3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -887,7 +887,8 @@ static int serial_pxa_probe(struct platf
goto err_clk;
if (sport->port.line >= ARRAY_SIZE(serial_pxa_ports)) {
dev_err(>dev, "serial%d out of range\n", sport->port.line);
-   return -EINVAL;
+   ret = -EINVAL;
+   goto err_clk;
}
snprintf(sport->name, PXA_NAME_LEN - 1, "UART%d", sport->port.line + 1);
 




[PATCH 4.18 070/228] media: omap3isp: zero-initialize the isp cam_xclk{a,b} initial data

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Javier Martinez Canillas 

[ Upstream commit 2ec7debd44b49927a6e2861521994cc075a389ed ]

The struct clk_init_data init variable is declared in the isp_xclk_init()
function so is an automatic variable allocated in the stack. But it's not
explicitly zero-initialized, so some init fields are left uninitialized.

This causes the data structure to have undefined values that may confuse
the common clock framework when the clock is registered.

For example, the uninitialized .flags field could have the CLK_IS_CRITICAL
bit set, causing the framework to wrongly prepare the clk on registration.
This leads to the isp_xclk_prepare() callback being called, which in turn
calls to the omap3isp_get() function that increments the isp dev refcount.

Since this omap3isp_get() call is unexpected, this leads to an unbalanced
omap3isp_get() call that prevents the requested IRQ to be later enabled,
due the refcount not being 0 when the correct omap3isp_get() call happens.

Fixes: 9b28ee3c9122 ("[media] omap3isp: Use the common clock framework")

Signed-off-by: Javier Martinez Canillas 
Reviewed-by: Sebastian Reichel 
Signed-off-by: Sakari Ailus 
Signed-off-by: Mauro Carvalho Chehab 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/media/platform/omap3isp/isp.c |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -300,7 +300,7 @@ static struct clk *isp_xclk_src_get(stru
 static int isp_xclk_init(struct isp_device *isp)
 {
struct device_node *np = isp->dev->of_node;
-   struct clk_init_data init;
+   struct clk_init_data init = { 0 };
unsigned int i;
 
for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)




[PATCH 4.18 071/228] media: ov772x: add checks for register read errors

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Akinobu Mita 

[ Upstream commit 30f3b17eaf4913e9e56be15915ce57aae69db701 ]

This change adds checks for register read errors and returns correct
error code.

Cc: Laurent Pinchart 
Cc: Hans Verkuil 
Reviewed-by: Jacopo Mondi 
Signed-off-by: Akinobu Mita 
Signed-off-by: Sakari Ailus 
Signed-off-by: Mauro Carvalho Chehab 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/media/i2c/ov772x.c |   20 ++--
 1 file changed, 14 insertions(+), 6 deletions(-)

--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -1136,7 +1136,7 @@ static int ov772x_set_fmt(struct v4l2_su
 static int ov772x_video_probe(struct ov772x_priv *priv)
 {
struct i2c_client  *client = v4l2_get_subdevdata(>subdev);
-   u8  pid, ver;
+   int pid, ver, midh, midl;
const char *devname;
int ret;
 
@@ -1146,7 +1146,11 @@ static int ov772x_video_probe(struct ov7
 
/* Check and show product ID and manufacturer ID. */
pid = ov772x_read(client, PID);
+   if (pid < 0)
+   return pid;
ver = ov772x_read(client, VER);
+   if (ver < 0)
+   return ver;
 
switch (VERSION(pid, ver)) {
case OV7720:
@@ -1162,13 +1166,17 @@ static int ov772x_video_probe(struct ov7
goto done;
}
 
+   midh = ov772x_read(client, MIDH);
+   if (midh < 0)
+   return midh;
+   midl = ov772x_read(client, MIDL);
+   if (midl < 0)
+   return midl;
+
dev_info(>dev,
 "%s Product ID %0x:%0x Manufacturer ID %x:%x\n",
-devname,
-pid,
-ver,
-ov772x_read(client, MIDH),
-ov772x_read(client, MIDL));
+devname, pid, ver, midh, midl);
+
ret = v4l2_ctrl_handler_setup(>hdl);
 
 done:




Re: [PATCH 4/5] locking/lockdep: Make class->ops a percpu counter

2018-10-02 Thread Waiman Long
On 10/02/2018 05:55 AM, Ingo Molnar wrote:
> * Peter Zijlstra  wrote:
>
>> On Fri, Sep 28, 2018 at 01:53:20PM -0400, Waiman Long wrote:
>>> diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
>>> index ca002c0..7a0ed1d 100644
>>> --- a/kernel/locking/lockdep.c
>>> +++ b/kernel/locking/lockdep.c
>>> @@ -139,6 +139,7 @@ static inline int debug_locks_off_graph_unlock(void)
>>>   */
>>>  unsigned long nr_lock_classes;
>>>  static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
>>> +static DEFINE_PER_CPU(unsigned long [MAX_LOCKDEP_KEYS], lock_class_ops);
>>> @@ -1387,11 +1391,15 @@ static inline int usage_match(struct lock_list 
>>> *entry, void *bit)
>>>  
>>>  static void print_lock_class_header(struct lock_class *class, int depth)
>>>  {
>>> -   int bit;
>>> +   int bit, cpu;
>>> +   unsigned long ops = 0UL;
>>> +
>>> +   for_each_possible_cpu(cpu)
>>> +   ops += *per_cpu(class->pops, cpu);
>>>  
>>> printk("%*s->", depth, "");
>>> print_lock_name(class);
>>> -   printk(KERN_CONT " ops: %lu", class->ops);
>>> +   printk(KERN_CONT " ops: %lu", ops);
>>> printk(KERN_CONT " {\n");
>>>  
>>> for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
>> That is an aweful lot of storage for a stupid number. Some archs
>> (sparc64) are bzImage size constrained and this will hurt them.
>>
>> Ingo, do you happen to remember what that number was good for?
> Just a spur of the moment statistics to satisfy curiousity, and it's useful 
> to see how 'busy' a 
> particular class is, right?
>
>> Can't we simply ditch it?
> We certainly could. Do we have roughly equivalent metrics to arrive at this 
> number via other 
> methods?
>
> Thanks,
>
>   Ingo


One alternative is to group it under CONFIG_DEBUG_LOCKDEP again. This
metric was originally under CONFIG_DEBUG_LOCKDEP, but was moved to
CONFIG_LOCKDEP when trying to make other lock debugging statistics
per-cpu counters. It was probably because this metric is per lock class
while the rests are global.

By doing so, you incur the memory cost only when CONFIG_DEBUG_LOCKDEP is
defined.

What do you think?

Cheers,
Longman




[PATCH 4.18 046/228] thermal: i.MX: Allow thermal probe to fail gracefully in case of bad calibration.

2018-10-02 Thread Greg Kroah-Hartman
4.18-stable review patch.  If anyone has any objections, please let me know.

--

From: Jean-Christophe Dubois 

[ Upstream commit be926ceeb4efc3bf44cb9b56f5c71aac9b1f8bbe ]

Without this fix, the thermal probe on i.MX6 might trigger a division
by zero exception later in the probe if the calibration does fail.

Note: This linux behavior (Division by zero in kernel) has been triggered
on a Qemu i.MX6 emulation where parameters in nvmem were not set. With this
fix the division by zero is not triggeed anymore as the thermal probe does
fail early.

Signed-off-by: Jean-Christophe Dubois 
Reviewed-by: Fabio Estevam 
Signed-off-by: Eduardo Valentin 
Signed-off-by: Sasha Levin 
Signed-off-by: Greg Kroah-Hartman 
---
 drivers/thermal/imx_thermal.c |5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -604,7 +604,10 @@ static int imx_init_from_nvmem_cells(str
ret = nvmem_cell_read_u32(>dev, "calib", );
if (ret)
return ret;
-   imx_init_calib(pdev, val);
+
+   ret = imx_init_calib(pdev, val);
+   if (ret)
+   return ret;
 
ret = nvmem_cell_read_u32(>dev, "temp_grade", );
if (ret)




Re: [PATCH 2/2] x86/tsc: Fix UV TSC initialization

2018-10-02 Thread Mike Travis




On 10/1/2018 11:22 PM, Thomas Gleixner wrote:

On Mon, 1 Oct 2018, Mike Travis wrote:


Fix regression introduced by

commit cf7a63ef4e02 ("x86/tsc: Calibrate tsc only once")

as it changed setup_arch() so that it now calls tsc_early_init() before
acpi_boot_table_init() which is a necessary step, in the case of UV
systems, to inform tsc_sanitize_first_cpu() that we're on a platform
with async TSC resets as documented in

commit 341102c3ef29 ("x86/tsc: Add option that TSC on Socket 0 being non-zero is 
valid")

Fix by skipping tsc_early_init() on UV systems and let TSC initialization
take place later in tsc_init().

Fixes: cf7a63ef4e02 ("x86/tsc: Calibrate tsc only once")
Signed-off-by: Mike Travis 
Signed-off-by: Hedi Berriche 


See previous mail.


Reviewed-by: Russ Anderson 
Reviewed-by: Dimitri Sivanich 
---
  arch/x86/kernel/setup.c |6 +-
  1 file changed, 5 insertions(+), 1 deletion(-)

--- linux.orig/arch/x86/kernel/setup.c
+++ linux/arch/x86/kernel/setup.c
@@ -117,6 +117,7 @@
  #include 
  #include 
  #include 
+#include 
  
  /*

   * max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -1015,7 +1016,10 @@ void __init setup_arch(char **cmdline_p)
 */
init_hypervisor_platform();
  
-	tsc_early_init();

+   /* UV TSC multi-chassis synchronization already set, don't change it */
+   if (!is_early_uv_system())
+   tsc_early_init();


tsc_early_init() has already a check which makes it not invoke the
calibration code, so please put your uv check into that.


Will do, thanks.



Thanks,

tglx



Re: KASAN: use-after-free Read in seq_escape

2018-10-02 Thread Theodore Y. Ts'o
On Sun, Sep 30, 2018 at 11:58:02PM -0700, syzbot wrote:
> Hello,
> 
> syzbot found the following crash on:
> 
> HEAD commit:17b57b1883c1 Linux 4.19-rc6
> git tree:   upstream
> console output: https://syzkaller.appspot.com/x/log.txt?x=1672d71140
> kernel config:  https://syzkaller.appspot.com/x/.config?x=c0af03fe452b65fb
> dashboard link: https://syzkaller.appspot.com/bug?extid=a2872d6feea6918008a9
> compiler:   gcc (GCC) 8.0.1 20180413 (experimental)

This should be fixed with the following patch.

- Ted

>From 89cc85e761c5b905b1a61cb85440bf9b5e841c80 Mon Sep 17 00:00:00 2001
From: Theodore Ts'o 
Date: Tue, 2 Oct 2018 10:40:57 -0400
Subject: [PATCH] ext4: fix use-after-free race in ext4_remount()'s error path

It's possible for ext4_show_quota_options() to try reading
s_qf_names[i] while it is being modified by ext4_remount() --- most
notably, in ext4_remount's error path when the original values of the
quota file name gets restored.

Reported-by: syzbot+a2872d6feea691800...@syzkaller.appspotmail.com
Signed-off-by: Theodore Ts'o 
Cc: sta...@kernel.org
---
 fs/ext4/super.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index faf293ed8060..11a1bfae0937 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2048,11 +2048,13 @@ static inline void ext4_show_quota_options(struct 
seq_file *seq,
seq_printf(seq, ",jqfmt=%s", fmtname);
}
 
+   down_read(>s_umount);
if (sbi->s_qf_names[USRQUOTA])
seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
 
if (sbi->s_qf_names[GRPQUOTA])
seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
+   up_read(>s_umount);
 #endif
 }
 
-- 
2.18.0.rc0



Re: [LKP] [fsnotify] 60f7ed8c7c: will-it-scale.per_thread_ops -5.9% regression

2018-10-02 Thread Amir Goldstein
On Mon, Oct 1, 2018 at 12:52 PM Amir Goldstein  wrote:
>
> On Mon, Oct 1, 2018 at 12:32 PM Jan Kara  wrote:
> >
> > On Sun 30-09-18 12:00:46, Amir Goldstein wrote:
> [...]
> > > > commit:
> > > >   1e6cb72399 ("fsnotify: add super block object type")
> > > >   60f7ed8c7c ("fsnotify: send path type events to group with super 
> > > > block marks")
> > > >
> > >
> > > I have to admit this looks strange.
> > > All this commit does is dereference mnt->mnt.mnt_sb and then
> > > sb->s_fsnotify_mask/sb->s_fsnotify_marks to find that they are zero.
> > > AFAICT there should be no extra contention added by this commit and it's
> > > hard to believe that parallel unlink workload would suffer from this 
> > > change.
> >
> > Well, it could be those additional fetches of
> > sb->s_fsnotify_mask/sb->s_fsnotify_marks if they happen to be cache cold.
> > Or it could be just code layout differences (i.e., compiler is not able to
> > optimize resulting code as good or the code layout just happens to align
> > with cache lines in a wrong way or something like that). Anyway, without
> > being able to reproduce this and do detailed comparison of perf profiles I
> > don't think we'll be able to tell.
> >
>

On my test machine, I couldn't measure a difference in results between the
two commits, but it may be because my test machine is not strong enough to
drive the test. It only has 4 cores and so I could only reach ~1/4 of
the workload
reported by 0day robot.

I tested both 16-thread and 4-thread jobs. In both cases the measured difference
between the two commits was less than 0.5% and within the stddev margins.

I will see if I can get my hands on a stronger test machine.

Thanks,
Amir.


[RFC PATCH v3 0/5] Do not touch pages/zones during hot-remove path

2018-10-02 Thread Oscar Salvador
I was about to send the patchset without RFC as suggested, but I wanted
to give it one more spin before sending it officially.

I rebased this patchset on top of [1] and [2].

I chose to rebase this on top of [1] because after that, HMM/devm got some
of their code unified, and the changes to be done were less.

Currently, the operations layout performed by the hot-add/remove and
offline/online stages looks like the following:

- hot-add memory:
  a) Allocate a new resouce based on the hot-added memory
  b) Add memory sections for the hot-added memory

- online memory:
  c) Re-adjust zone/pgdat nr of pages (managed, spanned, present)
  d) Initialize the pages from the new memory-range
  e) Online memory sections

- offline memory:
  f) Offline memory sections
  g) Re-adjust zone/pgdat nr of managed/present pages

- hot-remove memory:
  i) Re-adjust zone/pgdat nr of spanned pages
  j) Remove memory sections
  k) Release resource


This is not right for two reasons:

 1) If we do not get to online memory added by a hot-add operation,
and we offline it right away, we can access steal pages as these
are only initialized during the onlining stage.
Two problems have been reported for this [3] and [4]
 2) hot-add/remove memory operations should only care about
sections and memblock, nothing else.

This patchset moves the handling of the zones/pages
from the hot-remove path to the offline stage.

One of the things that made me scratch my head is the handling of the
memory-hotplug in regard of HMM/devm.
I really scratched my head to find out a way to handle it properly
and nicely, but let me be honest about this, my knowledge of that
part of the code tends to 0.

Jerome reviewed that part of the changes and it looked ok for him,
and Pavel did not see anything wrong in v2 either.

But I would like to get more feedback before sending it without RFC.

The picture we have after this is:

- hot-add memory:
  a) Allocate a new resouce based on the hot-added memory
  b) Add memory sections for the hot-added memory

- online memory:
  c) Re-adjust zone/pgdat nr of pages (managed, spanned, present)
  d) Initialize the pages from the new memory-range
  e) Online memory sections

- offline memory:
  f) Offline memory sections
  g) Re-adjust zone/pgdat nr of managed/present/spanned pages

- hot-remove memory:
  i) Remove memory sections
  j) Release resource


[1] https://patchwork.kernel.org/cover/10613425/
[2] https://patchwork.kernel.org/cover/10617699/
[3] https://patchwork.kernel.org/patch/10547445/
[4] https://www.spinics.net/lists/linux-mm/msg161316.html

Oscar Salvador (5):
  mm/memory_hotplug: Add nid parameter to arch_remove_memory
  mm/memory_hotplug: Create add/del_device_memory functions
  mm/memory_hotplug: Check for IORESOURCE_SYSRAM in
release_mem_region_adjustable
  mm/memory_hotplug: Move zone/pages handling to offline stage
  mm/memory-hotplug: Rework unregister_mem_sect_under_nodes

 arch/ia64/mm/init.c|   6 +-
 arch/powerpc/mm/mem.c  |  13 +---
 arch/s390/mm/init.c|   2 +-
 arch/sh/mm/init.c  |   6 +-
 arch/x86/mm/init_32.c  |   6 +-
 arch/x86/mm/init_64.c  |  10 +--
 drivers/base/memory.c  |   9 ++-
 drivers/base/node.c|  38 ++
 include/linux/memory.h |   2 +-
 include/linux/memory_hotplug.h |  17 +++--
 include/linux/node.h   |   7 +-
 kernel/memremap.c  |  50 +-
 kernel/resource.c  |  15 
 mm/memory_hotplug.c| 153 ++---
 mm/sparse.c|   4 +-
 15 files changed, 169 insertions(+), 169 deletions(-)

-- 
2.13.6



[RFC PATCH v3 5/5] mm/memory-hotplug: Rework unregister_mem_sect_under_nodes

2018-10-02 Thread Oscar Salvador
From: Oscar Salvador 

This tries to address another issue about accessing
unitiliazed pages.

Jonathan reported a problem [1] where we can access steal pages
in case we hot-remove memory without onlining it first.

This time is in unregister_mem_sect_under_nodes.
This function tries to get the nid from the pfn and then
tries to remove the symlink between mem_blk <-> nid and vice versa.

Since we already know the nid in remove_memory(), we can pass
it down the chain to unregister_mem_sect_under_nodes.
There we can just remove the symlinks without the need
to look into the pages.

[1] https://www.spinics.net/lists/linux-mm/msg161316.html

Signed-off-by: Oscar Salvador 
---
 drivers/base/memory.c  |  9 -
 drivers/base/node.c| 38 +++---
 include/linux/memory.h |  2 +-
 include/linux/node.h   |  7 ++-
 mm/memory_hotplug.c|  2 +-
 5 files changed, 15 insertions(+), 43 deletions(-)

diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 0e5985682642..3d8c65d84bea 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -744,8 +744,7 @@ unregister_memory(struct memory_block *memory)
device_unregister(>dev);
 }
 
-static int remove_memory_section(unsigned long node_id,
-  struct mem_section *section, int phys_device)
+static int remove_memory_section(unsigned long nid, struct mem_section 
*section)
 {
struct memory_block *mem;
 
@@ -759,7 +758,7 @@ static int remove_memory_section(unsigned long node_id,
if (!mem)
goto out_unlock;
 
-   unregister_mem_sect_under_nodes(mem, __section_nr(section));
+   unregister_mem_sect_under_nodes(nid, mem);
 
mem->section_count--;
if (mem->section_count == 0)
@@ -772,12 +771,12 @@ static int remove_memory_section(unsigned long node_id,
return 0;
 }
 
-int unregister_memory_section(struct mem_section *section)
+int unregister_memory_section(int nid, struct mem_section *section)
 {
if (!present_section(section))
return -EINVAL;
 
-   return remove_memory_section(0, section, 0);
+   return remove_memory_section(nid, section);
 }
 #endif /* CONFIG_MEMORY_HOTREMOVE */
 
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 86d6cd92ce3d..65bc5920bd3d 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -453,40 +453,16 @@ int register_mem_sect_under_node(struct memory_block 
*mem_blk, void *arg)
return 0;
 }
 
-/* unregister memory section under all nodes that it spans */
-int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
-   unsigned long phys_index)
+/*
+ * This mem_blk is going to be removed, so let us remove the link
+ * to the node and vice versa
+ */
+void unregister_mem_sect_under_nodes(int nid, struct memory_block *mem_blk)
 {
-   NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
-   unsigned long pfn, sect_start_pfn, sect_end_pfn;
-
-   if (!mem_blk) {
-   NODEMASK_FREE(unlinked_nodes);
-   return -EFAULT;
-   }
-   if (!unlinked_nodes)
-   return -ENOMEM;
-   nodes_clear(*unlinked_nodes);
-
-   sect_start_pfn = section_nr_to_pfn(phys_index);
-   sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
-   for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
-   int nid;
-
-   nid = get_nid_for_pfn(pfn);
-   if (nid < 0)
-   continue;
-   if (!node_online(nid))
-   continue;
-   if (node_test_and_set(nid, *unlinked_nodes))
-   continue;
-   sysfs_remove_link(_devices[nid]->dev.kobj,
+   sysfs_remove_link(_devices[nid]->dev.kobj,
 kobject_name(_blk->dev.kobj));
-   sysfs_remove_link(_blk->dev.kobj,
+   sysfs_remove_link(_blk->dev.kobj,
 kobject_name(_devices[nid]->dev.kobj));
-   }
-   NODEMASK_FREE(unlinked_nodes);
-   return 0;
 }
 
 int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
diff --git a/include/linux/memory.h b/include/linux/memory.h
index a6ddefc60517..d75ec88ca09d 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -113,7 +113,7 @@ extern int register_memory_isolate_notifier(struct 
notifier_block *nb);
 extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
 int hotplug_memory_register(int nid, struct mem_section *section);
 #ifdef CONFIG_MEMORY_HOTREMOVE
-extern int unregister_memory_section(struct mem_section *);
+extern int unregister_memory_section(int nid, struct mem_section *);
 #endif
 extern int memory_dev_init(void);
 extern int memory_notify(unsigned long val, void *v);
diff --git a/include/linux/node.h b/include/linux/node.h
index 257bb3d6d014..e8aa9e6d95f9 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -72,8 +72,7 

[RFC PATCH v3 3/5] mm/memory_hotplug: Check for IORESOURCE_SYSRAM in release_mem_region_adjustable

2018-10-02 Thread Oscar Salvador
From: Oscar Salvador 

This is a preparation for the next patch.

Currently, we only call release_mem_region_adjustable() in __remove_pages
if the zone is not ZONE_DEVICE, because resources that belong to
HMM/devm are being released by themselves with devm_release_mem_region.

Since we do not want to touch any zone/page stuff during the removing
of the memory (but during the offlining), we do not want to check for
the zone here.
So we need another way to tell release_mem_region_adjustable() to not realease
the resource in case it belongs to HMM/devm.

HMM/devm acquires/releases a resource through
devm_request_mem_region/devm_release_mem_region.

These resources have the flag IORESOURCE_MEM, while resources acquired by
hot-add memory path (register_memory_resource()) contain
IORESOURCE_SYSTEM_RAM.

So, we can check for this flag in release_mem_region_adjustable, and if the
resource does not contain such flag, we know that we are dealing with a HMM/devm
resource, so we can back off.

Signed-off-by: Oscar Salvador 
---
 kernel/resource.c | 15 +++
 1 file changed, 15 insertions(+)

diff --git a/kernel/resource.c b/kernel/resource.c
index 81937830a42f..6956ce3a4730 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1272,6 +1272,21 @@ int release_mem_region_adjustable(struct resource 
*parent,
continue;
}
 
+   /*
+* All memory regions added from memory-hotplug path
+* have the flag IORESOURCE_SYSTEM_RAM.
+* If the resource does not have this flag, we know that
+* we are dealing with a resource coming from HMM/devm.
+* HMM/devm use another mechanism to add/release a resource.
+* This goes via 
devm_request_mem_region/devm_release_mem_region.
+* HMM/devm take care to release their resources when they 
want, so
+* if we are dealing with them, let us just back off here.
+*/
+   if (!(res->flags & IORESOURCE_SYSRAM)) {
+   ret = 0;
+   break;
+   }
+
if (!(res->flags & IORESOURCE_MEM))
break;
 
-- 
2.13.6



Re: [tip:x86/hyperv] x86/hyperv: Enable PV qspinlock for Hyper-V

2018-10-02 Thread Juergen Gross
Sorry for noticing this only now, but I have been fighting with
Xen PV qspinlocks last weekend:

On 02/10/2018 13:28, tip-bot for Yi Sun wrote:
> Commit-ID:  aaa7fc34c003bd8133a49f7634480cef6288ad55
> Gitweb: 
> https://git.kernel.org/tip/aaa7fc34c003bd8133a49f7634480cef6288ad55
> Author: Yi Sun 
> AuthorDate: Thu, 27 Sep 2018 14:01:44 +0800
> Committer:  Thomas Gleixner 
> CommitDate: Tue, 2 Oct 2018 13:22:06 +0200
> 
> x86/hyperv: Enable PV qspinlock for Hyper-V
> 
> Implement the necessary callbacks for PV spinlocks which allow vCPU idling
> and kicking operations when running as a guest on Hyper-V
> 
> Signed-off-by: Yi Sun 
> Signed-off-by: Thomas Gleixner 
> Reviewed-by: Michael Kelley 
> Cc: chao.p.p...@intel.com
> Cc: chao@intel.com
> Cc: isaku.yamah...@intel.com
> Cc: tianyu@microsoft.com
> Cc: "K. Y. Srinivasan" 
> Cc: Haiyang Zhang 
> Cc: Stephen Hemminger 
> Link: 
> https://lkml.kernel.org/r/1538028104-114050-3-git-send-email-yi.y@linux.intel.com
> ---
>  Documentation/admin-guide/kernel-parameters.txt |  5 ++
>  arch/x86/hyperv/Makefile|  4 ++
>  arch/x86/hyperv/hv_spinlock.c   | 75 
> +
>  arch/x86/include/asm/mshyperv.h |  1 +
>  arch/x86/kernel/cpu/mshyperv.c  | 14 +
>  5 files changed, 99 insertions(+)
> 

...

> diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c
> new file mode 100644
> index ..6d3221322d0d
> --- /dev/null
> +++ b/arch/x86/hyperv/hv_spinlock.c
> @@ -0,0 +1,75 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +/*
> + * Hyper-V specific spinlock code.
> + *
> + * Copyright (C) 2018, Intel, Inc.
> + *
> + * Author : Yi Sun 
> + */
> +
> +#define pr_fmt(fmt) "Hyper-V: " fmt
> +
> +#include 
> +
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +static bool __initdata hv_pvspin = true;
> +
> +static void hv_qlock_kick(int cpu)
> +{
> + apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
> +}
> +
> +static void hv_qlock_wait(u8 *byte, u8 val)
> +{
> + unsigned long msr_val;
> +
> + if (READ_ONCE(*byte) != val)
> + return;
> +
> + /*
> +  * Read HV_X64_MSR_GUEST_IDLE MSR can trigger the guest's
> +  * transition to the idle power state which can be exited
> +  * by an IPI even if IF flag is disabled.
> +  */

What if interrupts are enabled? Won't a kick happening here just
interrupt and then the following rdmsr result in a hang?

I believe the correct way would be to:

- disable interrupts before above READ_ONCE() and restore them
  after the rdmsrl()

- return early if in_nmi()

similar as the kvm specific variant is doing it.


Juergen


Re: [PATCH 2/2] ARM: dts: at91: at91sam9x5cm: fix addressable nand flash size

2018-10-02 Thread Ludovic Desroches
On Tue, Oct 02, 2018 at 02:29:49PM +0300, Tudor Ambarus wrote:
> at91sam9x5cm comes with a 2Gb NAND flash. Fix the rootfs size to
> match this limit.
> 
> Signed-off-by: Tudor Ambarus 
Acked-by: Ludovic Desroches 

Thanks
> ---
>  arch/arm/boot/dts/at91sam9x5cm.dtsi | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/arch/arm/boot/dts/at91sam9x5cm.dtsi 
> b/arch/arm/boot/dts/at91sam9x5cm.dtsi
> index 4908ee07e628..993eabe1cf7a 100644
> --- a/arch/arm/boot/dts/at91sam9x5cm.dtsi
> +++ b/arch/arm/boot/dts/at91sam9x5cm.dtsi
> @@ -100,7 +100,7 @@
>  
>   rootfs@80 {
>   label = "rootfs";
> - reg = <0x80 
> 0x1f80>;
> + reg = <0x80 
> 0x0f80>;
>   };
>   };
>   };
> -- 
> 2.9.4
> 


  1   2   3   4   5   6   7   8   9   10   >