From: Xuelin Shi <[email protected]>

This patch does NOT apply to the 3.12 stable tree. If you still want
it applied, please provide a backport.

===============

commit c1f43dd9c20d85e66c4d77e284f64ac114abe3f8 upstream.

The count which is used to get_unmap_data maybe not the same as the
count computed in dmaengine_unmap which causes to free data in a
wrong pool.

This patch fixes this issue by keeping the map count with unmap_data
structure and use this count to get the pool.

Cc: <[email protected]>
Signed-off-by: Xuelin Shi <[email protected]>
Signed-off-by: Dan Williams <[email protected]>
---
 drivers/dma/dmaengine.c   | 2 ++
 include/linux/dmaengine.h | 1 +
 2 files changed, 3 insertions(+)

diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a886713937fd..d5d30ed863ce 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
                dma_unmap_page(dev, unmap->addr[i], unmap->len,
                               DMA_BIDIRECTIONAL);
        }
+       cnt = unmap->map_cnt;
        mempool_free(unmap, __get_unmap_pool(cnt)->pool);
 }
 
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, 
gfp_t flags)
        memset(unmap, 0, sizeof(*unmap));
        kref_init(&unmap->kref);
        unmap->dev = dev;
+       unmap->map_cnt = nr;
 
        return unmap;
 }
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8300fb87b84a..72cb0ddb9678 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -429,6 +429,7 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void 
*filter_param);
 typedef void (*dma_async_tx_callback)(void *dma_async_param);
 
 struct dmaengine_unmap_data {
+       u8 map_cnt;
        u8 to_cnt;
        u8 from_cnt;
        u8 bidi_cnt;
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to