[PATCH] i2c: i2c-mux-gpio: Change log level to debug for probe deferrals

2015-03-25 Thread Ioan Nicu
Probe deferral is not an error case. It happens only when
the necessary dependencies are not there yet.

The driver core is already printing a message when a driver
requests probe deferral, so this can be traced in the logs
without these error prints.

This patch changes the error messages from these deferral cases
to debug messages.

Signed-off-by: Ionut Nicu ioan.nicu@nokia.com
---
 drivers/i2c/muxes/i2c-mux-gpio.c |4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index f5798eb..9f1cfca 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -77,7 +77,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
}
adapter = of_find_i2c_adapter_by_node(adapter_np);
if (!adapter) {
-   dev_err(pdev-dev, Cannot find parent bus\n);
+   dev_dbg(pdev-dev, Cannot find parent bus\n);
return -EPROBE_DEFER;
}
mux-data.parent = i2c_adapter_id(adapter);
@@ -178,7 +178,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
 
parent = i2c_get_adapter(mux-data.parent);
if (!parent) {
-   dev_err(pdev-dev, Parent adapter (%d) not found\n,
+   dev_dbg(pdev-dev, Parent adapter (%d) not found\n,
mux-data.parent);
return -EPROBE_DEFER;
}
-- 
1.7.1

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] i2c: i2c-mux-gpio: Change log level to debug for probe deferrals

2015-03-30 Thread Ioan Nicu
On Fri, Mar 27, 2015 at 04:29:33PM +0100, ext Wolfram Sang wrote:
 On Wed, Mar 25, 2015 at 06:43:51PM +0100, Ioan Nicu wrote:
  Probe deferral is not an error case. It happens only when
  the necessary dependencies are not there yet.
  
  The driver core is already printing a message when a driver
  requests probe deferral, so this can be traced in the logs
  without these error prints.
  
  This patch changes the error messages from these deferral cases
  to debug messages.
  
  Signed-off-by: Ionut Nicu ioan.nicu@nokia.com
 
 Why not simply removing it?
 

Right, those messages are not relevant anyway, so removing them is better.
I'll send out an updated patch.

Thanks!
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH v2] i2c: i2c-mux-gpio: remove error messages for probe deferrals

2015-03-30 Thread Ioan Nicu
Probe deferral is not an error case. It happens only when
the necessary dependencies are not there yet.

The driver core is already printing a message when a driver
requests probe deferral, so this can be traced in the logs
without these error prints.

This patch removes the error messages for these deferral cases.

Signed-off-by: Ionut Nicu ioan.nicu@nokia.com
---
 drivers/i2c/muxes/i2c-mux-gpio.c |   10 +++---
 1 files changed, 3 insertions(+), 7 deletions(-)

diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index f5798eb..70db992 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -76,10 +76,9 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
return -ENODEV;
}
adapter = of_find_i2c_adapter_by_node(adapter_np);
-   if (!adapter) {
-   dev_err(pdev-dev, Cannot find parent bus\n);
+   if (!adapter)
return -EPROBE_DEFER;
-   }
+
mux-data.parent = i2c_adapter_id(adapter);
put_device(adapter-dev);
 
@@ -177,11 +176,8 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
}
 
parent = i2c_get_adapter(mux-data.parent);
-   if (!parent) {
-   dev_err(pdev-dev, Parent adapter (%d) not found\n,
-   mux-data.parent);
+   if (!parent)
return -EPROBE_DEFER;
-   }
 
mux-parent = parent;
mux-gpio_base = gpio_base;
-- 
1.7.1

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH] MIPS: Octeon: do not change affinity for disabled irqs

2016-02-15 Thread Ioan Nicu
Octeon sets the default irq affinity to value 1 in the early arch init
code, so by default all irqs get registered with their affinity set to
core 0.

When setting one CPU ofline, octeon_irq_cpu_offline_ciu() calls
irq_set_affinity_locked(), but this function sets the IRQD_AFFINITY_SET bit
in the irq descriptor. This has the side effect that if one irq is
requested later, after putting one CPU offline, the affinity of this irq
would not be the default anymore, but rather forced to "all cores - the
offline core".

This patch sets the IRQCHIP_ONOFFLINE_ENABLED flag in octeon irq
controllers, so that the kernel would call the irq_cpu_[on|off]line()
callbacks only for enabled irqs. If some other irq is requested after
setting one cpu offline, it would use the default irq affinity, same as it
would do in the normal case where there is no CPU hotplug operation.

Signed-off-by: Ioan Nicu <ioan.nicu@nokia.com>
Acked-by: Alexander Sverdlin <alexander.sverd...@nokia.com>
---
 arch/mips/cavium-octeon/octeon-irq.c |   15 ---
 1 files changed, 12 insertions(+), 3 deletions(-)

diff --git a/arch/mips/cavium-octeon/octeon-irq.c 
b/arch/mips/cavium-octeon/octeon-irq.c
index 368eb49..684582e 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -935,6 +935,7 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -948,6 +949,7 @@ static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -963,6 +965,7 @@ static struct irq_chip octeon_irq_chip_ciu_sum2 = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -976,6 +979,7 @@ static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -988,6 +992,7 @@ static struct irq_chip octeon_irq_chip_ciu = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -1001,6 +1006,7 @@ static struct irq_chip octeon_irq_chip_ciu_edge = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -1041,7 +1047,7 @@ static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 #endif
-   .flags = IRQCHIP_SET_TYPE_MASKED,
+   .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_ONOFFLINE_ENABLED,
 };
 
 static struct irq_chip octeon_irq_chip_ciu_gpio = {
@@ -1056,7 +1062,7 @@ static struct irq_chip octeon_irq_chip_ciu_gpio = {
.irq_set_affinity = octeon_irq_ciu_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 #endif
-   .flags = IRQCHIP_SET_TYPE_MASKED,
+   .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_ONOFFLINE_ENABLED,
 };
 
 /*
@@ -1838,6 +1844,7 @@ static struct irq_chip octeon_irq_chip_ciu2 = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu2_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -1851,6 +1858,7 @@ static struct irq_chip octeon_irq_chip_ciu2_edge = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu2_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -1886,7 +1894,7 @@ static struct irq_chip octeon_irq_chip_ciu2_gpio = {
.irq_set_affinity = octeon_irq_ciu2_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 #endif
-   .flags = IRQCHIP_SET_TYPE_MASKED,
+   .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_ONOFFLINE_ENABLED,
 };
 
 static int octeon_irq_ciu2_xlat(struct irq_domain *d,
@@ -2537,6 +2545,7 @@ static struct irq_chip octeon_irq_chip_ciu3 = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu3_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
-- 
1.7.1



[PATCH] rapidio: remove global irq spinlocks from the subsystem

2017-08-24 Thread Ioan Nicu

Locking of config and doorbell operations should be done
only if the underlying hardware requires it.

This patch removes the global spinlocks from the rapidio
subsystem and moves them to the mport drivers (fsl_rio and tsi721),
only to the necessary places. For example, local config space
read and write operations (lcread/lcwrite) are atomic in all
existing drivers, so there should be no need for locking, while
the cread/cwrite operations which generate maintenance transactions
need to be synchronized with a lock.

Later, each driver could chose to use a per-port lock instead
of a global one, or even more granular locking.

Signed-off-by: Ioan Nicu <ioan.nicu@nokia.com>
Signed-off-by: Frank Kunz <frank.k...@nokia.com>
---
 arch/powerpc/sysdev/fsl_rio.c| 17 +++--
 arch/powerpc/sysdev/fsl_rmu.c|  8 
 drivers/rapidio/devices/tsi721.c |  7 +++
 drivers/rapidio/rio-access.c | 40 +---
 4 files changed, 35 insertions(+), 37 deletions(-)

diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 1c41c51..e9f3bc9 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -71,6 +71,8 @@
 #define RIWAR_WRTYP_ALLOC  0x6000
 #define RIWAR_SIZE_MASK0x003F
 
+static DEFINE_SPINLOCK(fsl_rio_config_lock);
+
 #define __fsl_read_rio_config(x, addr, err, op)\
__asm__ __volatile__(   \
"1: "op" %1,0(%2)\n"\
@@ -184,6 +186,7 @@ static int fsl_local_config_write(struct rio_mport *mport,
u8 hopcount, u32 offset, int len, u32 *val)
 {
struct rio_priv *priv = mport->priv;
+   unsigned long flags;
u8 *data;
u32 rval, err = 0;
 
@@ -197,6 +200,8 @@ static int fsl_local_config_write(struct rio_mport *mport,
if (offset > (0x100 - len) || !IS_ALIGNED(offset, len))
return -EINVAL;
 
+   spin_lock_irqsave(_rio_config_lock, flags);
+
out_be32(>maint_atmu_regs->rowtar,
 (destid << 22) | (hopcount << 12) | (offset >> 12));
out_be32(>maint_atmu_regs->rowtear, (destid >> 10));
@@ -213,6 +218,7 @@ static int fsl_local_config_write(struct rio_mport *mport,
__fsl_read_rio_config(rval, data, err, "lwz");
break;
default:
+   spin_unlock_irqrestore(_rio_config_lock, flags);
return -EINVAL;
}
 
@@ -221,6 +227,7 @@ static int fsl_local_config_write(struct rio_mport *mport,
 err, destid, hopcount, offset);
}
 
+   spin_unlock_irqrestore(_rio_config_lock, flags);
*val = rval;
 
return err;
@@ -244,7 +251,10 @@ static int fsl_local_config_write(struct rio_mport *mport,
u8 hopcount, u32 offset, int len, u32 val)
 {
struct rio_priv *priv = mport->priv;
+   unsigned long flags;
u8 *data;
+   int ret = 0;
+
pr_debug
("fsl_rio_config_write:"
" index %d destid %d hopcount %d offset %8.8x len %d val 
%8.8x\n",
@@ -255,6 +265,8 @@ static int fsl_local_config_write(struct rio_mport *mport,
if (offset > (0x100 - len) || !IS_ALIGNED(offset, len))
return -EINVAL;
 
+   spin_lock_irqsave(_rio_config_lock, flags);
+
out_be32(>maint_atmu_regs->rowtar,
 (destid << 22) | (hopcount << 12) | (offset >> 12));
out_be32(>maint_atmu_regs->rowtear, (destid >> 10));
@@ -271,10 +283,11 @@ static int fsl_local_config_write(struct rio_mport *mport,
out_be32((u32 *) data, val);
break;
default:
-   return -EINVAL;
+   ret = -EINVAL;
}
+   spin_unlock_irqrestore(_rio_config_lock, flags);
 
-   return 0;
+   return ret;
 }
 
 static void fsl_rio_inbound_mem_init(struct rio_priv *priv)
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
index c1826de..c15a17a 100644
--- a/arch/powerpc/sysdev/fsl_rmu.c
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -104,6 +104,8 @@
 
 #define DOORBELL_MESSAGE_SIZE  0x08
 
+static DEFINE_SPINLOCK(fsl_rio_doorbell_lock);
+
 struct rio_msg_regs {
u32 omr;
u32 osr;
@@ -626,9 +628,13 @@ int fsl_rio_port_write_init(struct fsl_rio_pw *pw)
 int fsl_rio_doorbell_send(struct rio_mport *mport,
int index, u16 destid, u16 data)
 {
+   unsigned long flags;
+
pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
 index, destid, data);
 
+   spin_lock_irqsave(_rio_doorbell_lock, flags);
+
/* In the serial version silicons, such as MPC8548, MPC8641,
 * below operations is must be.

[PATCH] rapidio: use a reference count for struct mport_dma_req

2018-04-05 Thread Ioan Nicu
Once the dma request is passed to the DMA engine, the DMA
subsystem would hold a pointer to this structure and could
call the completion callback after do_dma_request() has
timed out.

The current code deals with this by putting timed out SYNC
requests to a pending list and freeing them later, when the
mport cdev device is released. But this still does not
guarantee that the DMA subsystem is really done with those
transfers, so in theory dma_xfer_callback/dma_req_free
could be called after mport_cdev_release_dma and could
potentially access already freed memory.

This patch simplifies the current handling by using a kref
in the mport dma request structure, so that it gets freed
only when nobody uses it anymore.

This also simplifies the code a bit, as FAF transfers are
now handled in the same way as SYNC and ASYNC transfers.
There is no need anymore for the pending list and for the
dma workqueue which was used in case of FAF transfers, so
we remove them both.

Signed-off-by: Ioan Nicu <ioan.nicu@nokia.com>
---
 drivers/rapidio/devices/rio_mport_cdev.c | 122 +--
 1 file changed, 18 insertions(+), 104 deletions(-)

diff --git a/drivers/rapidio/devices/rio_mport_cdev.c 
b/drivers/rapidio/devices/rio_mport_cdev.c
index cfb54e01d758..9d27016c899e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -212,7 +212,6 @@ struct mport_cdev_priv {
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
struct dma_chan *dmach;
struct list_headasync_list;
-   struct list_headpend_list;
spinlock_t  req_lock;
struct mutexdma_lock;
struct kref dma_ref;
@@ -258,8 +257,6 @@ static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
 static struct class *dev_class;
 static dev_t dev_number;
 
-static struct workqueue_struct *dma_wq;
-
 static void mport_release_mapping(struct kref *ref);
 
 static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
@@ -539,6 +536,7 @@ static int maint_comptag_set(struct mport_cdev_priv *priv, 
void __user *arg)
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
 
 struct mport_dma_req {
+   struct kref refcount;
struct list_head node;
struct file *filp;
struct mport_cdev_priv *priv;
@@ -554,11 +552,6 @@ struct mport_dma_req {
struct completion req_comp;
 };
 
-struct mport_faf_work {
-   struct work_struct work;
-   struct mport_dma_req *req;
-};
-
 static void mport_release_def_dma(struct kref *dma_ref)
 {
struct mport_dev *md =
@@ -578,8 +571,10 @@ static void mport_release_dma(struct kref *dma_ref)
complete(>comp);
 }
 
-static void dma_req_free(struct mport_dma_req *req)
+static void dma_req_free(struct kref *ref)
 {
+   struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
+   refcount);
struct mport_cdev_priv *priv = req->priv;
unsigned int i;
 
@@ -611,30 +606,7 @@ static void dma_xfer_callback(void *param)
req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
   NULL, NULL);
complete(>req_comp);
-}
-
-static void dma_faf_cleanup(struct work_struct *_work)
-{
-   struct mport_faf_work *work = container_of(_work,
-   struct mport_faf_work, work);
-   struct mport_dma_req *req = work->req;
-
-   dma_req_free(req);
-   kfree(work);
-}
-
-static void dma_faf_callback(void *param)
-{
-   struct mport_dma_req *req = (struct mport_dma_req *)param;
-   struct mport_faf_work *work;
-
-   work = kmalloc(sizeof(*work), GFP_ATOMIC);
-   if (!work)
-   return;
-
-   INIT_WORK(>work, dma_faf_cleanup);
-   work->req = req;
-   queue_work(dma_wq, >work);
+   kref_put(>refcount, dma_req_free);
 }
 
 /*
@@ -765,16 +737,14 @@ static int do_dma_request(struct mport_dma_req *req,
goto err_out;
}
 
-   if (sync == RIO_TRANSFER_FAF)
-   tx->callback = dma_faf_callback;
-   else
-   tx->callback = dma_xfer_callback;
+   tx->callback = dma_xfer_callback;
tx->callback_param = req;
 
req->dmach = chan;
req->sync = sync;
req->status = DMA_IN_PROGRESS;
init_completion(>req_comp);
+   kref_get(>refcount);
 
cookie = dmaengine_submit(tx);
req->cookie = cookie;
@@ -785,6 +755,7 @@ static int do_dma_request(struct mport_dma_req *req,
if (dma_submit_error(cookie)) {
rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
   cookie, xfer->rio_addr, xfer->length);
+   kref_put(>refcount, dma_req_free);
ret = -EIO;
goto err_out;
}
@@ -860,6 +831,8 @@ rio_dma_transfe

Re: [PATCH] rapidio: fix rio_dma_transfer error handling

2018-04-13 Thread Ioan Nicu
Hi,

On Thu, Apr 12, 2018 at 07:44:01PM -0400, Alexandre Bounine wrote:
> 
> On 2018-04-12 05:28 PM, Andrew Morton wrote:
> > On Thu, 12 Apr 2018 17:06:05 +0200 Ioan Nicu <ioan.nicu@nokia.com> 
> > wrote:
> > 
> > > Some of the mport_dma_req structure members were initialized late
> > > inside the do_dma_request() function, just before submitting the
> > > request to the dma engine. But we have some error branches before
> > > that. In case of such an error, the code would return on the error
> > > path and trigger the calling of dma_req_free() with a req structure
> > > which is not completely initialized. This causes a NULL pointer
> > > dereference in dma_req_free().
> > > 
> > > This patch fixes these error branches by making sure that all
> > > necessary mport_dma_req structure members are initialized in
> > > rio_dma_transfer() immediately after the request structure gets
> > > allocated.
> > 
> > This sounds like something which someone has actually triggered in a
> > real-world situation.  So I added a cc:stable.  Please let me know if
> > that was inappropriate.
> > 
> > And please remember to always include all information regarding
> > end-user impact when fixing bugs.
> > 
> This bug fix is applicable to versions starting from v4.6

Actually, this is something I broke with my previous patch where I added a
kref to the mport_dma_req structure. Before this patch, all the error paths
were doing kfree(req) instead of kref_put(>refcount, dma_req_free).

Now that dma_req_free() is called, it dereferences req->dmach, which is
initialized late in do_dma_request(), so dma_req_free() could be called
with a NULL req->dmach in some cases.

Sorry if I did not make this clear enough in the description.

Regards,
Ioan


[PATCH] rapidio: fix rio_dma_transfer error handling

2018-04-12 Thread Ioan Nicu
Some of the mport_dma_req structure members were initialized late
inside the do_dma_request() function, just before submitting the
request to the dma engine. But we have some error branches before
that. In case of such an error, the code would return on the error
path and trigger the calling of dma_req_free() with a req structure
which is not completely initialized. This causes a NULL pointer
dereference in dma_req_free().

This patch fixes these error branches by making sure that all
necessary mport_dma_req structure members are initialized in
rio_dma_transfer() immediately after the request structure gets
allocated.

Signed-off-by: Ioan Nicu <ioan.nicu@nokia.com>
---
 drivers/rapidio/devices/rio_mport_cdev.c | 19 +--
 1 file changed, 9 insertions(+), 10 deletions(-)

diff --git a/drivers/rapidio/devices/rio_mport_cdev.c 
b/drivers/rapidio/devices/rio_mport_cdev.c
index 9d27016c899e..0434ab7b6497 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req,
tx->callback = dma_xfer_callback;
tx->callback_param = req;
 
-   req->dmach = chan;
-   req->sync = sync;
req->status = DMA_IN_PROGRESS;
-   init_completion(>req_comp);
kref_get(>refcount);
 
cookie = dmaengine_submit(tx);
@@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
if (!req)
return -ENOMEM;
 
-   kref_init(>refcount);
-
ret = get_dma_channel(priv);
if (ret) {
kfree(req);
return ret;
}
+   chan = priv->dmach;
+
+   kref_init(>refcount);
+   init_completion(>req_comp);
+   req->dir = dir;
+   req->filp = filp;
+   req->priv = priv;
+   req->dmach = chan;
+   req->sync = sync;
 
/*
 * If parameter loc_addr != NULL, we are transferring data from/to
@@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
xfer->offset, xfer->length);
}
 
-   req->dir = dir;
-   req->filp = filp;
-   req->priv = priv;
-   chan = priv->dmach;
-
nents = dma_map_sg(chan->device->dev,
   req->sgt.sgl, req->sgt.nents, dir);
if (nents == 0) {
-- 
2.16.3



[PATCH] rapidio: use a reference count for struct mport_dma_req

2018-04-03 Thread Ioan Nicu
Once the dma request is passed to the DMA engine, the DMA
subsystem would hold a pointer to this structure and could
call the completion callback after do_dma_request() has
timed out.

The current code deals with this by putting timed out SYNC
requests to a pending list and freeing them later, when the
mport cdev device is released. But this still does not
guarantee that the DMA subsystem is really done with those
transfers, so in theory dma_xfer_callback/dma_req_free
could be called after mport_cdev_release_dma and could
potentially access already freed memory.

This patch simplifies the current handling by using a kref
in the mport dma request structure, so that it gets freed
only when nobody uses it anymore.

This also simplifies the code a bit, as FAF transfers are
now handled in the same way as SYNC and ASYNC transfers.
There is no need anymore for the pending list and for the
dma workqueue which was used in case of FAF transfers, so
we remove them both.

Signed-off-by: Ioan Nicu <ioan.nicu@nokia.com>
---
 drivers/rapidio/devices/rio_mport_cdev.c | 122 +--
 1 file changed, 18 insertions(+), 104 deletions(-)

diff --git a/drivers/rapidio/devices/rio_mport_cdev.c 
b/drivers/rapidio/devices/rio_mport_cdev.c
index cfb54e01d758..9d27016c899e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -212,7 +212,6 @@ struct mport_cdev_priv {
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
struct dma_chan *dmach;
struct list_headasync_list;
-   struct list_headpend_list;
spinlock_t  req_lock;
struct mutexdma_lock;
struct kref dma_ref;
@@ -258,8 +257,6 @@ static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
 static struct class *dev_class;
 static dev_t dev_number;
 
-static struct workqueue_struct *dma_wq;
-
 static void mport_release_mapping(struct kref *ref);
 
 static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
@@ -539,6 +536,7 @@ static int maint_comptag_set(struct mport_cdev_priv *priv, 
void __user *arg)
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
 
 struct mport_dma_req {
+   struct kref refcount;
struct list_head node;
struct file *filp;
struct mport_cdev_priv *priv;
@@ -554,11 +552,6 @@ struct mport_dma_req {
struct completion req_comp;
 };
 
-struct mport_faf_work {
-   struct work_struct work;
-   struct mport_dma_req *req;
-};
-
 static void mport_release_def_dma(struct kref *dma_ref)
 {
struct mport_dev *md =
@@ -578,8 +571,10 @@ static void mport_release_dma(struct kref *dma_ref)
complete(>comp);
 }
 
-static void dma_req_free(struct mport_dma_req *req)
+static void dma_req_free(struct kref *ref)
 {
+   struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
+   refcount);
struct mport_cdev_priv *priv = req->priv;
unsigned int i;
 
@@ -611,30 +606,7 @@ static void dma_xfer_callback(void *param)
req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
   NULL, NULL);
complete(>req_comp);
-}
-
-static void dma_faf_cleanup(struct work_struct *_work)
-{
-   struct mport_faf_work *work = container_of(_work,
-   struct mport_faf_work, work);
-   struct mport_dma_req *req = work->req;
-
-   dma_req_free(req);
-   kfree(work);
-}
-
-static void dma_faf_callback(void *param)
-{
-   struct mport_dma_req *req = (struct mport_dma_req *)param;
-   struct mport_faf_work *work;
-
-   work = kmalloc(sizeof(*work), GFP_ATOMIC);
-   if (!work)
-   return;
-
-   INIT_WORK(>work, dma_faf_cleanup);
-   work->req = req;
-   queue_work(dma_wq, >work);
+   kref_put(>refcount, dma_req_free);
 }
 
 /*
@@ -765,16 +737,14 @@ static int do_dma_request(struct mport_dma_req *req,
goto err_out;
}
 
-   if (sync == RIO_TRANSFER_FAF)
-   tx->callback = dma_faf_callback;
-   else
-   tx->callback = dma_xfer_callback;
+   tx->callback = dma_xfer_callback;
tx->callback_param = req;
 
req->dmach = chan;
req->sync = sync;
req->status = DMA_IN_PROGRESS;
init_completion(>req_comp);
+   kref_get(>refcount);
 
cookie = dmaengine_submit(tx);
req->cookie = cookie;
@@ -785,6 +755,7 @@ static int do_dma_request(struct mport_dma_req *req,
if (dma_submit_error(cookie)) {
rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
   cookie, xfer->rio_addr, xfer->length);
+   kref_put(>refcount, dma_req_free);
ret = -EIO;
goto err_out;
}
@@ -860,6 +831,8 @@ rio_dma_transfe

[PATCH] rapidio: remove global irq spinlocks from the subsystem

2017-08-24 Thread Ioan Nicu

Locking of config and doorbell operations should be done
only if the underlying hardware requires it.

This patch removes the global spinlocks from the rapidio
subsystem and moves them to the mport drivers (fsl_rio and tsi721),
only to the necessary places. For example, local config space
read and write operations (lcread/lcwrite) are atomic in all
existing drivers, so there should be no need for locking, while
the cread/cwrite operations which generate maintenance transactions
need to be synchronized with a lock.

Later, each driver could chose to use a per-port lock instead
of a global one, or even more granular locking.

Signed-off-by: Ioan Nicu 
Signed-off-by: Frank Kunz 
---
 arch/powerpc/sysdev/fsl_rio.c| 17 +++--
 arch/powerpc/sysdev/fsl_rmu.c|  8 
 drivers/rapidio/devices/tsi721.c |  7 +++
 drivers/rapidio/rio-access.c | 40 +---
 4 files changed, 35 insertions(+), 37 deletions(-)

diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 1c41c51..e9f3bc9 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -71,6 +71,8 @@
 #define RIWAR_WRTYP_ALLOC  0x6000
 #define RIWAR_SIZE_MASK0x003F
 
+static DEFINE_SPINLOCK(fsl_rio_config_lock);
+
 #define __fsl_read_rio_config(x, addr, err, op)\
__asm__ __volatile__(   \
"1: "op" %1,0(%2)\n"\
@@ -184,6 +186,7 @@ static int fsl_local_config_write(struct rio_mport *mport,
u8 hopcount, u32 offset, int len, u32 *val)
 {
struct rio_priv *priv = mport->priv;
+   unsigned long flags;
u8 *data;
u32 rval, err = 0;
 
@@ -197,6 +200,8 @@ static int fsl_local_config_write(struct rio_mport *mport,
if (offset > (0x100 - len) || !IS_ALIGNED(offset, len))
return -EINVAL;
 
+   spin_lock_irqsave(_rio_config_lock, flags);
+
out_be32(>maint_atmu_regs->rowtar,
 (destid << 22) | (hopcount << 12) | (offset >> 12));
out_be32(>maint_atmu_regs->rowtear, (destid >> 10));
@@ -213,6 +218,7 @@ static int fsl_local_config_write(struct rio_mport *mport,
__fsl_read_rio_config(rval, data, err, "lwz");
break;
default:
+   spin_unlock_irqrestore(_rio_config_lock, flags);
return -EINVAL;
}
 
@@ -221,6 +227,7 @@ static int fsl_local_config_write(struct rio_mport *mport,
 err, destid, hopcount, offset);
}
 
+   spin_unlock_irqrestore(_rio_config_lock, flags);
*val = rval;
 
return err;
@@ -244,7 +251,10 @@ static int fsl_local_config_write(struct rio_mport *mport,
u8 hopcount, u32 offset, int len, u32 val)
 {
struct rio_priv *priv = mport->priv;
+   unsigned long flags;
u8 *data;
+   int ret = 0;
+
pr_debug
("fsl_rio_config_write:"
" index %d destid %d hopcount %d offset %8.8x len %d val 
%8.8x\n",
@@ -255,6 +265,8 @@ static int fsl_local_config_write(struct rio_mport *mport,
if (offset > (0x100 - len) || !IS_ALIGNED(offset, len))
return -EINVAL;
 
+   spin_lock_irqsave(_rio_config_lock, flags);
+
out_be32(>maint_atmu_regs->rowtar,
 (destid << 22) | (hopcount << 12) | (offset >> 12));
out_be32(>maint_atmu_regs->rowtear, (destid >> 10));
@@ -271,10 +283,11 @@ static int fsl_local_config_write(struct rio_mport *mport,
out_be32((u32 *) data, val);
break;
default:
-   return -EINVAL;
+   ret = -EINVAL;
}
+   spin_unlock_irqrestore(_rio_config_lock, flags);
 
-   return 0;
+   return ret;
 }
 
 static void fsl_rio_inbound_mem_init(struct rio_priv *priv)
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
index c1826de..c15a17a 100644
--- a/arch/powerpc/sysdev/fsl_rmu.c
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -104,6 +104,8 @@
 
 #define DOORBELL_MESSAGE_SIZE  0x08
 
+static DEFINE_SPINLOCK(fsl_rio_doorbell_lock);
+
 struct rio_msg_regs {
u32 omr;
u32 osr;
@@ -626,9 +628,13 @@ int fsl_rio_port_write_init(struct fsl_rio_pw *pw)
 int fsl_rio_doorbell_send(struct rio_mport *mport,
int index, u16 destid, u16 data)
 {
+   unsigned long flags;
+
pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
 index, destid, data);
 
+   spin_lock_irqsave(_rio_doorbell_lock, flags);
+
/* In the serial version silicons, such as MPC8548, MPC8641,
 * below operations is must be.
 */
@@ -638,6 +644,8 @@ int fsl_rio_do

[PATCH] rapidio: use a reference count for struct mport_dma_req

2018-04-05 Thread Ioan Nicu
Once the dma request is passed to the DMA engine, the DMA
subsystem would hold a pointer to this structure and could
call the completion callback after do_dma_request() has
timed out.

The current code deals with this by putting timed out SYNC
requests to a pending list and freeing them later, when the
mport cdev device is released. But this still does not
guarantee that the DMA subsystem is really done with those
transfers, so in theory dma_xfer_callback/dma_req_free
could be called after mport_cdev_release_dma and could
potentially access already freed memory.

This patch simplifies the current handling by using a kref
in the mport dma request structure, so that it gets freed
only when nobody uses it anymore.

This also simplifies the code a bit, as FAF transfers are
now handled in the same way as SYNC and ASYNC transfers.
There is no need anymore for the pending list and for the
dma workqueue which was used in case of FAF transfers, so
we remove them both.

Signed-off-by: Ioan Nicu 
---
 drivers/rapidio/devices/rio_mport_cdev.c | 122 +--
 1 file changed, 18 insertions(+), 104 deletions(-)

diff --git a/drivers/rapidio/devices/rio_mport_cdev.c 
b/drivers/rapidio/devices/rio_mport_cdev.c
index cfb54e01d758..9d27016c899e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -212,7 +212,6 @@ struct mport_cdev_priv {
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
struct dma_chan *dmach;
struct list_headasync_list;
-   struct list_headpend_list;
spinlock_t  req_lock;
struct mutexdma_lock;
struct kref dma_ref;
@@ -258,8 +257,6 @@ static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
 static struct class *dev_class;
 static dev_t dev_number;
 
-static struct workqueue_struct *dma_wq;
-
 static void mport_release_mapping(struct kref *ref);
 
 static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
@@ -539,6 +536,7 @@ static int maint_comptag_set(struct mport_cdev_priv *priv, 
void __user *arg)
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
 
 struct mport_dma_req {
+   struct kref refcount;
struct list_head node;
struct file *filp;
struct mport_cdev_priv *priv;
@@ -554,11 +552,6 @@ struct mport_dma_req {
struct completion req_comp;
 };
 
-struct mport_faf_work {
-   struct work_struct work;
-   struct mport_dma_req *req;
-};
-
 static void mport_release_def_dma(struct kref *dma_ref)
 {
struct mport_dev *md =
@@ -578,8 +571,10 @@ static void mport_release_dma(struct kref *dma_ref)
complete(>comp);
 }
 
-static void dma_req_free(struct mport_dma_req *req)
+static void dma_req_free(struct kref *ref)
 {
+   struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
+   refcount);
struct mport_cdev_priv *priv = req->priv;
unsigned int i;
 
@@ -611,30 +606,7 @@ static void dma_xfer_callback(void *param)
req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
   NULL, NULL);
complete(>req_comp);
-}
-
-static void dma_faf_cleanup(struct work_struct *_work)
-{
-   struct mport_faf_work *work = container_of(_work,
-   struct mport_faf_work, work);
-   struct mport_dma_req *req = work->req;
-
-   dma_req_free(req);
-   kfree(work);
-}
-
-static void dma_faf_callback(void *param)
-{
-   struct mport_dma_req *req = (struct mport_dma_req *)param;
-   struct mport_faf_work *work;
-
-   work = kmalloc(sizeof(*work), GFP_ATOMIC);
-   if (!work)
-   return;
-
-   INIT_WORK(>work, dma_faf_cleanup);
-   work->req = req;
-   queue_work(dma_wq, >work);
+   kref_put(>refcount, dma_req_free);
 }
 
 /*
@@ -765,16 +737,14 @@ static int do_dma_request(struct mport_dma_req *req,
goto err_out;
}
 
-   if (sync == RIO_TRANSFER_FAF)
-   tx->callback = dma_faf_callback;
-   else
-   tx->callback = dma_xfer_callback;
+   tx->callback = dma_xfer_callback;
tx->callback_param = req;
 
req->dmach = chan;
req->sync = sync;
req->status = DMA_IN_PROGRESS;
init_completion(>req_comp);
+   kref_get(>refcount);
 
cookie = dmaengine_submit(tx);
req->cookie = cookie;
@@ -785,6 +755,7 @@ static int do_dma_request(struct mport_dma_req *req,
if (dma_submit_error(cookie)) {
rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
   cookie, xfer->rio_addr, xfer->length);
+   kref_put(>refcount, dma_req_free);
ret = -EIO;
goto err_out;
}
@@ -860,6 +831,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mod

[PATCH] rapidio: fix rio_dma_transfer error handling

2018-04-12 Thread Ioan Nicu
Some of the mport_dma_req structure members were initialized late
inside the do_dma_request() function, just before submitting the
request to the dma engine. But we have some error branches before
that. In case of such an error, the code would return on the error
path and trigger the calling of dma_req_free() with a req structure
which is not completely initialized. This causes a NULL pointer
dereference in dma_req_free().

This patch fixes these error branches by making sure that all
necessary mport_dma_req structure members are initialized in
rio_dma_transfer() immediately after the request structure gets
allocated.

Signed-off-by: Ioan Nicu 
---
 drivers/rapidio/devices/rio_mport_cdev.c | 19 +--
 1 file changed, 9 insertions(+), 10 deletions(-)

diff --git a/drivers/rapidio/devices/rio_mport_cdev.c 
b/drivers/rapidio/devices/rio_mport_cdev.c
index 9d27016c899e..0434ab7b6497 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req,
tx->callback = dma_xfer_callback;
tx->callback_param = req;
 
-   req->dmach = chan;
-   req->sync = sync;
req->status = DMA_IN_PROGRESS;
-   init_completion(>req_comp);
kref_get(>refcount);
 
cookie = dmaengine_submit(tx);
@@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
if (!req)
return -ENOMEM;
 
-   kref_init(>refcount);
-
ret = get_dma_channel(priv);
if (ret) {
kfree(req);
return ret;
}
+   chan = priv->dmach;
+
+   kref_init(>refcount);
+   init_completion(>req_comp);
+   req->dir = dir;
+   req->filp = filp;
+   req->priv = priv;
+   req->dmach = chan;
+   req->sync = sync;
 
/*
 * If parameter loc_addr != NULL, we are transferring data from/to
@@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
xfer->offset, xfer->length);
}
 
-   req->dir = dir;
-   req->filp = filp;
-   req->priv = priv;
-   chan = priv->dmach;
-
nents = dma_map_sg(chan->device->dev,
   req->sgt.sgl, req->sgt.nents, dir);
if (nents == 0) {
-- 
2.16.3



Re: [PATCH] rapidio: fix rio_dma_transfer error handling

2018-04-13 Thread Ioan Nicu
Hi,

On Thu, Apr 12, 2018 at 07:44:01PM -0400, Alexandre Bounine wrote:
> 
> On 2018-04-12 05:28 PM, Andrew Morton wrote:
> > On Thu, 12 Apr 2018 17:06:05 +0200 Ioan Nicu  
> > wrote:
> > 
> > > Some of the mport_dma_req structure members were initialized late
> > > inside the do_dma_request() function, just before submitting the
> > > request to the dma engine. But we have some error branches before
> > > that. In case of such an error, the code would return on the error
> > > path and trigger the calling of dma_req_free() with a req structure
> > > which is not completely initialized. This causes a NULL pointer
> > > dereference in dma_req_free().
> > > 
> > > This patch fixes these error branches by making sure that all
> > > necessary mport_dma_req structure members are initialized in
> > > rio_dma_transfer() immediately after the request structure gets
> > > allocated.
> > 
> > This sounds like something which someone has actually triggered in a
> > real-world situation.  So I added a cc:stable.  Please let me know if
> > that was inappropriate.
> > 
> > And please remember to always include all information regarding
> > end-user impact when fixing bugs.
> > 
> This bug fix is applicable to versions starting from v4.6

Actually, this is something I broke with my previous patch where I added a
kref to the mport_dma_req structure. Before this patch, all the error paths
were doing kfree(req) instead of kref_put(>refcount, dma_req_free).

Now that dma_req_free() is called, it dereferences req->dmach, which is
initialized late in do_dma_request(), so dma_req_free() could be called
with a NULL req->dmach in some cases.

Sorry if I did not make this clear enough in the description.

Regards,
Ioan


[PATCH] rapidio: use a reference count for struct mport_dma_req

2018-04-03 Thread Ioan Nicu
Once the dma request is passed to the DMA engine, the DMA
subsystem would hold a pointer to this structure and could
call the completion callback after do_dma_request() has
timed out.

The current code deals with this by putting timed out SYNC
requests to a pending list and freeing them later, when the
mport cdev device is released. But this still does not
guarantee that the DMA subsystem is really done with those
transfers, so in theory dma_xfer_callback/dma_req_free
could be called after mport_cdev_release_dma and could
potentially access already freed memory.

This patch simplifies the current handling by using a kref
in the mport dma request structure, so that it gets freed
only when nobody uses it anymore.

This also simplifies the code a bit, as FAF transfers are
now handled in the same way as SYNC and ASYNC transfers.
There is no need anymore for the pending list and for the
dma workqueue which was used in case of FAF transfers, so
we remove them both.

Signed-off-by: Ioan Nicu 
---
 drivers/rapidio/devices/rio_mport_cdev.c | 122 +--
 1 file changed, 18 insertions(+), 104 deletions(-)

diff --git a/drivers/rapidio/devices/rio_mport_cdev.c 
b/drivers/rapidio/devices/rio_mport_cdev.c
index cfb54e01d758..9d27016c899e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -212,7 +212,6 @@ struct mport_cdev_priv {
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
struct dma_chan *dmach;
struct list_headasync_list;
-   struct list_headpend_list;
spinlock_t  req_lock;
struct mutexdma_lock;
struct kref dma_ref;
@@ -258,8 +257,6 @@ static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
 static struct class *dev_class;
 static dev_t dev_number;
 
-static struct workqueue_struct *dma_wq;
-
 static void mport_release_mapping(struct kref *ref);
 
 static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
@@ -539,6 +536,7 @@ static int maint_comptag_set(struct mport_cdev_priv *priv, 
void __user *arg)
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
 
 struct mport_dma_req {
+   struct kref refcount;
struct list_head node;
struct file *filp;
struct mport_cdev_priv *priv;
@@ -554,11 +552,6 @@ struct mport_dma_req {
struct completion req_comp;
 };
 
-struct mport_faf_work {
-   struct work_struct work;
-   struct mport_dma_req *req;
-};
-
 static void mport_release_def_dma(struct kref *dma_ref)
 {
struct mport_dev *md =
@@ -578,8 +571,10 @@ static void mport_release_dma(struct kref *dma_ref)
complete(>comp);
 }
 
-static void dma_req_free(struct mport_dma_req *req)
+static void dma_req_free(struct kref *ref)
 {
+   struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
+   refcount);
struct mport_cdev_priv *priv = req->priv;
unsigned int i;
 
@@ -611,30 +606,7 @@ static void dma_xfer_callback(void *param)
req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
   NULL, NULL);
complete(>req_comp);
-}
-
-static void dma_faf_cleanup(struct work_struct *_work)
-{
-   struct mport_faf_work *work = container_of(_work,
-   struct mport_faf_work, work);
-   struct mport_dma_req *req = work->req;
-
-   dma_req_free(req);
-   kfree(work);
-}
-
-static void dma_faf_callback(void *param)
-{
-   struct mport_dma_req *req = (struct mport_dma_req *)param;
-   struct mport_faf_work *work;
-
-   work = kmalloc(sizeof(*work), GFP_ATOMIC);
-   if (!work)
-   return;
-
-   INIT_WORK(>work, dma_faf_cleanup);
-   work->req = req;
-   queue_work(dma_wq, >work);
+   kref_put(>refcount, dma_req_free);
 }
 
 /*
@@ -765,16 +737,14 @@ static int do_dma_request(struct mport_dma_req *req,
goto err_out;
}
 
-   if (sync == RIO_TRANSFER_FAF)
-   tx->callback = dma_faf_callback;
-   else
-   tx->callback = dma_xfer_callback;
+   tx->callback = dma_xfer_callback;
tx->callback_param = req;
 
req->dmach = chan;
req->sync = sync;
req->status = DMA_IN_PROGRESS;
init_completion(>req_comp);
+   kref_get(>refcount);
 
cookie = dmaengine_submit(tx);
req->cookie = cookie;
@@ -785,6 +755,7 @@ static int do_dma_request(struct mport_dma_req *req,
if (dma_submit_error(cookie)) {
rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
   cookie, xfer->rio_addr, xfer->length);
+   kref_put(>refcount, dma_req_free);
ret = -EIO;
goto err_out;
}
@@ -860,6 +831,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mod

[PATCH] i2c: i2c-mux-gpio: Change log level to debug for probe deferrals

2015-03-25 Thread Ioan Nicu
Probe deferral is not an error case. It happens only when
the necessary dependencies are not there yet.

The driver core is already printing a message when a driver
requests probe deferral, so this can be traced in the logs
without these error prints.

This patch changes the error messages from these deferral cases
to debug messages.

Signed-off-by: Ionut Nicu 
---
 drivers/i2c/muxes/i2c-mux-gpio.c |4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index f5798eb..9f1cfca 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -77,7 +77,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
}
adapter = of_find_i2c_adapter_by_node(adapter_np);
if (!adapter) {
-   dev_err(>dev, "Cannot find parent bus\n");
+   dev_dbg(>dev, "Cannot find parent bus\n");
return -EPROBE_DEFER;
}
mux->data.parent = i2c_adapter_id(adapter);
@@ -178,7 +178,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
 
parent = i2c_get_adapter(mux->data.parent);
if (!parent) {
-   dev_err(>dev, "Parent adapter (%d) not found\n",
+   dev_dbg(>dev, "Parent adapter (%d) not found\n",
mux->data.parent);
return -EPROBE_DEFER;
}
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH] i2c: i2c-mux-gpio: Change log level to debug for probe deferrals

2015-03-30 Thread Ioan Nicu
On Fri, Mar 27, 2015 at 04:29:33PM +0100, ext Wolfram Sang wrote:
> On Wed, Mar 25, 2015 at 06:43:51PM +0100, Ioan Nicu wrote:
> > Probe deferral is not an error case. It happens only when
> > the necessary dependencies are not there yet.
> > 
> > The driver core is already printing a message when a driver
> > requests probe deferral, so this can be traced in the logs
> > without these error prints.
> > 
> > This patch changes the error messages from these deferral cases
> > to debug messages.
> > 
> > Signed-off-by: Ionut Nicu 
> 
> Why not simply removing it?
> 

Right, those messages are not relevant anyway, so removing them is better.
I'll send out an updated patch.

Thanks!
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH v2] i2c: i2c-mux-gpio: remove error messages for probe deferrals

2015-03-30 Thread Ioan Nicu
Probe deferral is not an error case. It happens only when
the necessary dependencies are not there yet.

The driver core is already printing a message when a driver
requests probe deferral, so this can be traced in the logs
without these error prints.

This patch removes the error messages for these deferral cases.

Signed-off-by: Ionut Nicu 
---
 drivers/i2c/muxes/i2c-mux-gpio.c |   10 +++---
 1 files changed, 3 insertions(+), 7 deletions(-)

diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index f5798eb..70db992 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -76,10 +76,9 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
return -ENODEV;
}
adapter = of_find_i2c_adapter_by_node(adapter_np);
-   if (!adapter) {
-   dev_err(>dev, "Cannot find parent bus\n");
+   if (!adapter)
return -EPROBE_DEFER;
-   }
+
mux->data.parent = i2c_adapter_id(adapter);
put_device(>dev);
 
@@ -177,11 +176,8 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
}
 
parent = i2c_get_adapter(mux->data.parent);
-   if (!parent) {
-   dev_err(>dev, "Parent adapter (%d) not found\n",
-   mux->data.parent);
+   if (!parent)
return -EPROBE_DEFER;
-   }
 
mux->parent = parent;
mux->gpio_base = gpio_base;
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH] MIPS: Octeon: do not change affinity for disabled irqs

2016-02-15 Thread Ioan Nicu
Octeon sets the default irq affinity to value 1 in the early arch init
code, so by default all irqs get registered with their affinity set to
core 0.

When setting one CPU ofline, octeon_irq_cpu_offline_ciu() calls
irq_set_affinity_locked(), but this function sets the IRQD_AFFINITY_SET bit
in the irq descriptor. This has the side effect that if one irq is
requested later, after putting one CPU offline, the affinity of this irq
would not be the default anymore, but rather forced to "all cores - the
offline core".

This patch sets the IRQCHIP_ONOFFLINE_ENABLED flag in octeon irq
controllers, so that the kernel would call the irq_cpu_[on|off]line()
callbacks only for enabled irqs. If some other irq is requested after
setting one cpu offline, it would use the default irq affinity, same as it
would do in the normal case where there is no CPU hotplug operation.

Signed-off-by: Ioan Nicu 
Acked-by: Alexander Sverdlin 
---
 arch/mips/cavium-octeon/octeon-irq.c |   15 ---
 1 files changed, 12 insertions(+), 3 deletions(-)

diff --git a/arch/mips/cavium-octeon/octeon-irq.c 
b/arch/mips/cavium-octeon/octeon-irq.c
index 368eb49..684582e 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -935,6 +935,7 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -948,6 +949,7 @@ static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -963,6 +965,7 @@ static struct irq_chip octeon_irq_chip_ciu_sum2 = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -976,6 +979,7 @@ static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -988,6 +992,7 @@ static struct irq_chip octeon_irq_chip_ciu = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -1001,6 +1006,7 @@ static struct irq_chip octeon_irq_chip_ciu_edge = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -1041,7 +1047,7 @@ static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 #endif
-   .flags = IRQCHIP_SET_TYPE_MASKED,
+   .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_ONOFFLINE_ENABLED,
 };
 
 static struct irq_chip octeon_irq_chip_ciu_gpio = {
@@ -1056,7 +1062,7 @@ static struct irq_chip octeon_irq_chip_ciu_gpio = {
.irq_set_affinity = octeon_irq_ciu_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 #endif
-   .flags = IRQCHIP_SET_TYPE_MASKED,
+   .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_ONOFFLINE_ENABLED,
 };
 
 /*
@@ -1838,6 +1844,7 @@ static struct irq_chip octeon_irq_chip_ciu2 = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu2_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -1851,6 +1858,7 @@ static struct irq_chip octeon_irq_chip_ciu2_edge = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu2_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
@@ -1886,7 +1894,7 @@ static struct irq_chip octeon_irq_chip_ciu2_gpio = {
.irq_set_affinity = octeon_irq_ciu2_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
 #endif
-   .flags = IRQCHIP_SET_TYPE_MASKED,
+   .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_ONOFFLINE_ENABLED,
 };
 
 static int octeon_irq_ciu2_xlat(struct irq_domain *d,
@@ -2537,6 +2545,7 @@ static struct irq_chip octeon_irq_chip_ciu3 = {
 #ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu3_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+   .flags = IRQCHIP_ONOFFLINE_ENABLED,
 #endif
 };
 
-- 
1.7.1