Fix possible overflow in the address expression used as the second
argument to iommu_map() and iommu_unmap(). Without an explicit cast,
this expression may overflow when 'r->offset' or 'i' are large. Cast
the result to unsigned long before shifting to ensure correct IOVA
computation and prevent unintended wraparound.

Found by Linux Verification Center (linuxtesting.org) with SVACE.

Cc: sta...@vger.kernel.org # v4.4+
Signed-off-by: Alexey Nepomnyashih <s...@nppct.ru>
---
 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c 
b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 201022ae9214..17a0e1a46211 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -334,7 +334,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
        /* Unmap pages from GPU address space and free them */
        for (i = 0; i < node->base.mn->length; i++) {
                iommu_unmap(imem->domain,
-                           (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
+                           ((unsigned long)r->offset + i) << 
imem->iommu_pgshift, PAGE_SIZE);
                dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
                               DMA_BIDIRECTIONAL);
                __free_page(node->pages[i]);
@@ -472,7 +472,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 
npages, u32 align,
 
        /* Map into GPU address space */
        for (i = 0; i < npages; i++) {
-               u32 offset = (r->offset + i) << imem->iommu_pgshift;
+               unsigned long offset = ((unsigned long)r->offset + i) << 
imem->iommu_pgshift;
 
                ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
                                PAGE_SIZE, IOMMU_READ | IOMMU_WRITE,
-- 
2.43.0

Reply via email to