[PATCH AUTOSEL for 4.9 155/190] drm/msm: fix leak in failed get_pages

2018-03-07 Thread Sasha Levin
From: Prakash Kamliya 

[ Upstream commit 62e3a3e342af3c313ab38603811ecdb1fcc79edb ]

get_pages doesn't keep a reference of the pages allocated
when it fails later in the code path. This can lead to
a memory leak. Keep reference of the allocated pages so
that it can be freed when msm_gem_free_object gets called
later during cleanup.

Signed-off-by: Prakash Kamliya 
Signed-off-by: Sharat Masetty 
Signed-off-by: Rob Clark 
Signed-off-by: Sasha Levin 
---
 drivers/gpu/drm/msm/msm_gem.c | 14 ++
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b6ac27e31929..797d1f8340b9 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -91,14 +91,17 @@ static struct page **get_pages(struct drm_gem_object *obj)
return p;
}
 
+   msm_obj->pages = p;
+
msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
if (IS_ERR(msm_obj->sgt)) {
+   void *ptr = ERR_CAST(msm_obj->sgt);
+
dev_err(dev->dev, "failed to allocate sgt\n");
-   return ERR_CAST(msm_obj->sgt);
+   msm_obj->sgt = NULL;
+   return ptr;
}
 
-   msm_obj->pages = p;
-
/* For non-cached buffers, ensure the new pages are clean
 * because display controller, GPU, etc. are not coherent:
 */
@@ -121,7 +124,10 @@ static void put_pages(struct drm_gem_object *obj)
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
-   sg_free_table(msm_obj->sgt);
+
+   if (msm_obj->sgt)
+   sg_free_table(msm_obj->sgt);
+
kfree(msm_obj->sgt);
 
if (use_pages(obj))
-- 
2.14.1


[PATCH AUTOSEL for 4.9 155/190] drm/msm: fix leak in failed get_pages

2018-03-07 Thread Sasha Levin
From: Prakash Kamliya 

[ Upstream commit 62e3a3e342af3c313ab38603811ecdb1fcc79edb ]

get_pages doesn't keep a reference of the pages allocated
when it fails later in the code path. This can lead to
a memory leak. Keep reference of the allocated pages so
that it can be freed when msm_gem_free_object gets called
later during cleanup.

Signed-off-by: Prakash Kamliya 
Signed-off-by: Sharat Masetty 
Signed-off-by: Rob Clark 
Signed-off-by: Sasha Levin 
---
 drivers/gpu/drm/msm/msm_gem.c | 14 ++
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b6ac27e31929..797d1f8340b9 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -91,14 +91,17 @@ static struct page **get_pages(struct drm_gem_object *obj)
return p;
}
 
+   msm_obj->pages = p;
+
msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
if (IS_ERR(msm_obj->sgt)) {
+   void *ptr = ERR_CAST(msm_obj->sgt);
+
dev_err(dev->dev, "failed to allocate sgt\n");
-   return ERR_CAST(msm_obj->sgt);
+   msm_obj->sgt = NULL;
+   return ptr;
}
 
-   msm_obj->pages = p;
-
/* For non-cached buffers, ensure the new pages are clean
 * because display controller, GPU, etc. are not coherent:
 */
@@ -121,7 +124,10 @@ static void put_pages(struct drm_gem_object *obj)
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
-   sg_free_table(msm_obj->sgt);
+
+   if (msm_obj->sgt)
+   sg_free_table(msm_obj->sgt);
+
kfree(msm_obj->sgt);
 
if (use_pages(obj))
-- 
2.14.1