pageno is an int and the PAGE_SHIFT shift is done on an int,
overflowing if the memory is bigger than 2G

This can be reproduced using for example a reserved-memory of 4G

reserved-memory {
                    #address-cells = <2>;
                    #size-cells = <2>;
                    ranges;

                    reserved_dma: buffer@0 {
                        compatible = "shared-dma-pool";
                        no-map;
                        reg = <0x5 0x00000000 0x1 0x0>;
        };
};

Signed-off-by: Kevin Grandemange <[email protected]>
---

Changes v1 -> v2:
  - removed mem_offset tmp variable
  - use dma_addr_t instead of ssize_t
  - Fix reserved-memory size in the dts example

Changes v2 -> v3:
  - Fix several other site where PAGE_SHIFT shifts are done on ints.

 kernel/dma/coherent.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 551b0eb7028a..d322cb786e7e 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -134,7 +134,7 @@ static void *__dma_alloc_from_coherent(struct device *dev,
 
        spin_lock_irqsave(&mem->spinlock, flags);
 
-       if (unlikely(size > (mem->size << PAGE_SHIFT)))
+       if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
                goto err;
 
        pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
@@ -144,8 +144,8 @@ static void *__dma_alloc_from_coherent(struct device *dev,
        /*
         * Memory was found in the coherent area.
         */
-       *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
-       ret = mem->virt_base + (pageno << PAGE_SHIFT);
+       *dma_handle = dma_get_device_base(dev, mem) + ((dma_addr_t)pageno << 
PAGE_SHIFT);
+       ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
        spin_unlock_irqrestore(&mem->spinlock, flags);
        memset(ret, 0, size);
        return ret;
@@ -194,7 +194,7 @@ static int __dma_release_from_coherent(struct 
dma_coherent_mem *mem,
                                       int order, void *vaddr)
 {
        if (mem && vaddr >= mem->virt_base && vaddr <
-                  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+                  (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
                int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
                unsigned long flags;
 
@@ -238,7 +238,7 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem 
*mem,
                struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
 {
        if (mem && vaddr >= mem->virt_base && vaddr + size <=
-                  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+                  (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
                unsigned long off = vma->vm_pgoff;
                int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
                int user_count = vma_pages(vma);
@@ -248,7 +248,7 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem 
*mem,
                if (off < count && user_count <= count - off) {
                        unsigned long pfn = mem->pfn_base + start + off;
                        *ret = remap_pfn_range(vma, vma->vm_start, pfn,
-                                              user_count << PAGE_SHIFT,
+                                              (unsigned long)user_count << 
PAGE_SHIFT,
                                               vma->vm_page_prot);
                }
                return 1;
-- 
2.20.1

_______________________________________________
iommu mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to