Re: [PATCH v2] swiotlb-xen: Convert to use macro

2019-09-06 Thread Souptick Joarder
On Fri, Sep 6, 2019 at 7:02 PM Boris Ostrovsky
 wrote:
>
> On 9/6/19 8:27 AM, Souptick Joarder wrote:
> > On Mon, Sep 2, 2019 at 2:04 PM Souptick Joarder  
> > wrote:
> >> Rather than using static int max_dma_bits, this
> >> can be coverted to use as macro.
> >>
> >> Signed-off-by: Souptick Joarder 
> >> Reviewed-by: Juergen Gross 
> > If it is still not late, can we get this patch in queue for 5.4 ?
>
>
> Yes, I will queue it later today.

Thanks Boris.


Re: [PATCH v2] swiotlb-xen: Convert to use macro

2019-09-06 Thread Souptick Joarder
On Mon, Sep 2, 2019 at 2:04 PM Souptick Joarder  wrote:
>
> Rather than using static int max_dma_bits, this
> can be coverted to use as macro.
>
> Signed-off-by: Souptick Joarder 
> Reviewed-by: Juergen Gross 

If it is still not late, can we get this patch in queue for 5.4 ?

> ---
>  drivers/xen/swiotlb-xen.c | 5 ++---
>  1 file changed, 2 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
> index ae1df49..d1eced5 100644
> --- a/drivers/xen/swiotlb-xen.c
> +++ b/drivers/xen/swiotlb-xen.c
> @@ -38,6 +38,7 @@
>  #include 
>
>  #include 
> +#define MAX_DMA_BITS 32
>  /*
>   * Used to do a quick range check in swiotlb_tbl_unmap_single and
>   * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by 
> this
> @@ -114,8 +115,6 @@ static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
> return 0;
>  }
>
> -static int max_dma_bits = 32;
> -
>  static int
>  xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
>  {
> @@ -135,7 +134,7 @@ static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
> p + (i << IO_TLB_SHIFT),
> get_order(slabs << IO_TLB_SHIFT),
> dma_bits, _handle);
> -   } while (rc && dma_bits++ < max_dma_bits);
> +   } while (rc && dma_bits++ < MAX_DMA_BITS);
> if (rc)
> return rc;
>
> --
> 1.9.1
>
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v2] swiotlb-xen: Convert to use macro

2019-09-02 Thread Souptick Joarder
Rather than using static int max_dma_bits, this
can be coverted to use as macro.

Signed-off-by: Souptick Joarder 
Reviewed-by: Juergen Gross 
---
 drivers/xen/swiotlb-xen.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index ae1df49..d1eced5 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -38,6 +38,7 @@
 #include 
 
 #include 
+#define MAX_DMA_BITS 32
 /*
  * Used to do a quick range check in swiotlb_tbl_unmap_single and
  * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by 
this
@@ -114,8 +115,6 @@ static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
return 0;
 }
 
-static int max_dma_bits = 32;
-
 static int
 xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
 {
@@ -135,7 +134,7 @@ static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
p + (i << IO_TLB_SHIFT),
get_order(slabs << IO_TLB_SHIFT),
dma_bits, _handle);
-   } while (rc && dma_bits++ < max_dma_bits);
+   } while (rc && dma_bits++ < MAX_DMA_BITS);
if (rc)
return rc;
 
-- 
1.9.1



[PATCH] swiotlb-zen: Convert to use macro

2019-09-01 Thread Souptick Joarder
Rather than using static int max_dma_bits, this
can be coverted to use as macro.

Signed-off-by: Souptick Joarder 
---
 drivers/xen/swiotlb-xen.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index ae1df49..d1eced5 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -38,6 +38,7 @@
 #include 
 
 #include 
+#define MAX_DMA_BITS 32
 /*
  * Used to do a quick range check in swiotlb_tbl_unmap_single and
  * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by 
this
@@ -114,8 +115,6 @@ static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
return 0;
 }
 
-static int max_dma_bits = 32;
-
 static int
 xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
 {
@@ -135,7 +134,7 @@ static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
p + (i << IO_TLB_SHIFT),
get_order(slabs << IO_TLB_SHIFT),
dma_bits, _handle);
-   } while (rc && dma_bits++ < max_dma_bits);
+   } while (rc && dma_bits++ < MAX_DMA_BITS);
if (rc)
return rc;
 
-- 
1.9.1



[REBASE PATCH v5 8/9] xen/gntdev.c: Convert to use vm_map_pages()

2019-04-16 Thread Souptick Joarder
Convert to use vm_map_pages() to map range of kernel
memory to user vma.

map->count is passed to vm_map_pages() and internal API
verify map->count against count ( count = vma_pages(vma))
for page array boundary overrun condition.

Signed-off-by: Souptick Joarder 
Reviewed-by: Boris Ostrovsky 
---
 drivers/xen/gntdev.c | 11 ---
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 5efc5ee..5d64262 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1084,7 +1084,7 @@ static int gntdev_mmap(struct file *flip, struct 
vm_area_struct *vma)
int index = vma->vm_pgoff;
int count = vma_pages(vma);
struct gntdev_grant_map *map;
-   int i, err = -EINVAL;
+   int err = -EINVAL;
 
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
return -EINVAL;
@@ -1145,12 +1145,9 @@ static int gntdev_mmap(struct file *flip, struct 
vm_area_struct *vma)
goto out_put_map;
 
if (!use_ptemod) {
-   for (i = 0; i < count; i++) {
-   err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
-   map->pages[i]);
-   if (err)
-   goto out_put_map;
-   }
+   err = vm_map_pages(vma, map->pages, map->count);
+   if (err)
+   goto out_put_map;
} else {
 #ifdef CONFIG_X86
/*
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[REBASE PATCH v5 9/9] xen/privcmd-buf.c: Convert to use vm_map_pages_zero()

2019-04-16 Thread Souptick Joarder
Convert to use vm_map_pages_zero() to map range of kernel
memory to user vma.

This driver has ignored vm_pgoff. We could later "fix" these drivers
to behave according to the normal vm_pgoff offsetting simply by
removing the _zero suffix on the function name and if that causes
regressions, it gives us an easy way to revert.

Signed-off-by: Souptick Joarder 
Reviewed-by: Boris Ostrovsky 
---
 drivers/xen/privcmd-buf.c | 8 ++--
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
index de01a6d..d02dc43 100644
--- a/drivers/xen/privcmd-buf.c
+++ b/drivers/xen/privcmd-buf.c
@@ -166,12 +166,8 @@ static int privcmd_buf_mmap(struct file *file, struct 
vm_area_struct *vma)
if (vma_priv->n_pages != count)
ret = -ENOMEM;
else
-   for (i = 0; i < vma_priv->n_pages; i++) {
-   ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
-vma_priv->pages[i]);
-   if (ret)
-   break;
-   }
+   ret = vm_map_pages_zero(vma, vma_priv->pages,
+   vma_priv->n_pages);
 
if (ret)
privcmd_buf_vmapriv_free(vma_priv);
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[REBASE PATCH v5 3/9] drivers/firewire/core-iso.c: Convert to use vm_map_pages_zero()

2019-04-16 Thread Souptick Joarder
Convert to use vm_map_pages_zero() to map range of kernel memory
to user vma.

This driver has ignored vm_pgoff and mapped the entire pages. We
could later "fix" these drivers to behave according to the normal
vm_pgoff offsetting simply by removing the _zero suffix on the
function name and if that causes regressions, it gives us an easy
way to revert.

Signed-off-by: Souptick Joarder 
---
 drivers/firewire/core-iso.c | 15 ++-
 1 file changed, 2 insertions(+), 13 deletions(-)

diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 35e784c..5414eb1 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -107,19 +107,8 @@ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, 
struct fw_card *card,
 int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
  struct vm_area_struct *vma)
 {
-   unsigned long uaddr;
-   int i, err;
-
-   uaddr = vma->vm_start;
-   for (i = 0; i < buffer->page_count; i++) {
-   err = vm_insert_page(vma, uaddr, buffer->pages[i]);
-   if (err)
-   return err;
-
-   uaddr += PAGE_SIZE;
-   }
-
-   return 0;
+   return vm_map_pages_zero(vma, buffer->pages,
+   buffer->page_count);
 }
 
 void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[REBASE PATCH v5 5/9] drm/xen/xen_drm_front_gem.c: Convert to use vm_map_pages()

2019-04-16 Thread Souptick Joarder
Convert to use vm_map_pages() to map range of kernel
memory to user vma.

Signed-off-by: Souptick Joarder 
Reviewed-by: Oleksandr Andrushchenko 
---
 drivers/gpu/drm/xen/xen_drm_front_gem.c | 18 +-
 1 file changed, 5 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c 
b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 28bc501..dd0602d 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -224,8 +224,7 @@ struct drm_gem_object *
 static int gem_mmap_obj(struct xen_gem_object *xen_obj,
struct vm_area_struct *vma)
 {
-   unsigned long addr = vma->vm_start;
-   int i;
+   int ret;
 
/*
 * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
@@ -246,18 +245,11 @@ static int gem_mmap_obj(struct xen_gem_object *xen_obj,
 * FIXME: as we insert all the pages now then no .fault handler must
 * be called, so don't provide one
 */
-   for (i = 0; i < xen_obj->num_pages; i++) {
-   int ret;
-
-   ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
-   if (ret < 0) {
-   DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
-   return ret;
-   }
+   ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
+   if (ret < 0)
+   DRM_ERROR("Failed to map pages into vma: %d\n", ret);
 
-   addr += PAGE_SIZE;
-   }
-   return 0;
+   return ret;
 }
 
 int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[REBASE PATCH v5 4/9] drm/rockchip/rockchip_drm_gem.c: Convert to use vm_map_pages()

2019-04-16 Thread Souptick Joarder
Convert to use vm_map_pages() to map range of kernel
memory to user vma.

Tested on Rockchip hardware and display is working,
including talking to Lima via prime.

Signed-off-by: Souptick Joarder 
Tested-by: Heiko Stuebner 
---
 drivers/gpu/drm/rockchip/rockchip_drm_gem.c | 17 ++---
 1 file changed, 2 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c 
b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index a8db758..a2ebb08 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -221,26 +221,13 @@ static int rockchip_drm_gem_object_mmap_iommu(struct 
drm_gem_object *obj,
  struct vm_area_struct *vma)
 {
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
-   unsigned int i, count = obj->size >> PAGE_SHIFT;
+   unsigned int count = obj->size >> PAGE_SHIFT;
unsigned long user_count = vma_pages(vma);
-   unsigned long uaddr = vma->vm_start;
-   unsigned long offset = vma->vm_pgoff;
-   unsigned long end = user_count + offset;
-   int ret;
 
if (user_count == 0)
return -ENXIO;
-   if (end > count)
-   return -ENXIO;
 
-   for (i = offset; i < end; i++) {
-   ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]);
-   if (ret)
-   return ret;
-   uaddr += PAGE_SIZE;
-   }
-
-   return 0;
+   return vm_map_pages(vma, rk_obj->pages, count);
 }
 
 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[REBASE PATCH v5 7/9] videobuf2/videobuf2-dma-sg.c: Convert to use vm_map_pages()

2019-04-16 Thread Souptick Joarder
Convert to use vm_map_pages() to map range of kernel memory
to user vma.

vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer,
not as a in-buffer offset by design and it always want to mmap a
whole buffer from its beginning.

Signed-off-by: Souptick Joarder 
Suggested-by: Marek Szyprowski 
Reviewed-by: Marek Szyprowski 
---
 drivers/media/common/videobuf2/videobuf2-core.c|  7 +++
 .../media/common/videobuf2/videobuf2-dma-contig.c  |  6 --
 drivers/media/common/videobuf2/videobuf2-dma-sg.c  | 22 ++
 3 files changed, 13 insertions(+), 22 deletions(-)

diff --git a/drivers/media/common/videobuf2/videobuf2-core.c 
b/drivers/media/common/videobuf2/videobuf2-core.c
index 70e8c33..ca4577a 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -2175,6 +2175,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct 
*vma)
goto unlock;
}
 
+   /*
+* vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer,
+* not as a in-buffer offset. We always want to mmap a whole buffer
+* from its beginning.
+*/
+   vma->vm_pgoff = 0;
+
ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
 
 unlock:
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c 
b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index aff0ab7..46245c5 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -186,12 +186,6 @@ static int vb2_dc_mmap(void *buf_priv, struct 
vm_area_struct *vma)
return -EINVAL;
}
 
-   /*
-* dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
-* map whole buffer
-*/
-   vma->vm_pgoff = 0;
-
ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
buf->dma_addr, buf->size, buf->attrs);
 
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c 
b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 015e737..d6b8eca 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -328,28 +328,18 @@ static unsigned int vb2_dma_sg_num_users(void *buf_priv)
 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
 {
struct vb2_dma_sg_buf *buf = buf_priv;
-   unsigned long uaddr = vma->vm_start;
-   unsigned long usize = vma->vm_end - vma->vm_start;
-   int i = 0;
+   int err;
 
if (!buf) {
printk(KERN_ERR "No memory to map\n");
return -EINVAL;
}
 
-   do {
-   int ret;
-
-   ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
-   if (ret) {
-   printk(KERN_ERR "Remapping memory, error: %d\n", ret);
-   return ret;
-   }
-
-   uaddr += PAGE_SIZE;
-   usize -= PAGE_SIZE;
-   } while (usize > 0);
-
+   err = vm_map_pages(vma, buf->pages, buf->num_pages);
+   if (err) {
+   printk(KERN_ERR "Remapping memory, error: %d\n", err);
+   return err;
+   }
 
/*
 * Use common vm_area operations to track buffer refcount.
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[REBASE PATCH v5 2/9] arm: mm: dma-mapping: Convert to use vm_map_pages()

2019-04-16 Thread Souptick Joarder
Convert to use vm_map_pages() to map range of kernel
memory to user vma.

Signed-off-by: Souptick Joarder 
---
 arch/arm/mm/dma-mapping.c | 22 ++
 1 file changed, 6 insertions(+), 16 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f1e2922..de7c76e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1575,31 +1575,21 @@ static int __arm_iommu_mmap_attrs(struct device *dev, 
struct vm_area_struct *vma
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
 {
-   unsigned long uaddr = vma->vm_start;
-   unsigned long usize = vma->vm_end - vma->vm_start;
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-   unsigned long off = vma->vm_pgoff;
+   int err;
 
if (!pages)
return -ENXIO;
 
-   if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
+   if (vma->vm_pgoff >= nr_pages)
return -ENXIO;
 
-   pages += off;
-
-   do {
-   int ret = vm_insert_page(vma, uaddr, *pages++);
-   if (ret) {
-   pr_err("Remapping memory failed: %d\n", ret);
-   return ret;
-   }
-   uaddr += PAGE_SIZE;
-   usize -= PAGE_SIZE;
-   } while (usize > 0);
+   err = vm_map_pages(vma, pages, nr_pages);
+   if (err)
+   pr_err("Remapping memory failed: %d\n", err);
 
-   return 0;
+   return err;
 }
 static int arm_iommu_mmap_attrs(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [RESEND PATCH v4 0/9] mm: Use vm_map_pages() and vm_map_pages_zero() API

2019-04-09 Thread Souptick Joarder
Hi Andrew/ Michal,

On Mon, Apr 1, 2019 at 10:56 AM Souptick Joarder  wrote:
>
> Hi Andrew,
>
> On Tue, Mar 19, 2019 at 7:47 AM Souptick Joarder  wrote:
> >
> > Previouly drivers have their own way of mapping range of
> > kernel pages/memory into user vma and this was done by
> > invoking vm_insert_page() within a loop.
> >
> > As this pattern is common across different drivers, it can
> > be generalized by creating new functions and use it across
> > the drivers.
> >
> > vm_map_pages() is the API which could be used to map
> > kernel memory/pages in drivers which has considered vm_pgoff.
> >
> > vm_map_pages_zero() is the API which could be used to map
> > range of kernel memory/pages in drivers which has not considered
> > vm_pgoff. vm_pgoff is passed default as 0 for those drivers.
> >
> > We _could_ then at a later "fix" these drivers which are using
> > vm_map_pages_zero() to behave according to the normal vm_pgoff
> > offsetting simply by removing the _zero suffix on the function
> > name and if that causes regressions, it gives us an easy way to revert.
> >
> > Tested on Rockchip hardware and display is working fine, including talking
> > to Lima via prime.
> >
> > v1 -> v2:
> > Few Reviewed-by.
> >
> > Updated the change log in [8/9]
> >
> > In [7/9], vm_pgoff is treated in V4L2 API as a 'cookie'
> > to select a buffer, not as a in-buffer offset by design
> > and it always want to mmap a whole buffer from its beginning.
> > Added additional changes after discussing with Marek and
> > vm_map_pages() could be used instead of vm_map_pages_zero().
> >
> > v2 -> v3:
> > Corrected the documentation as per review comment.
> >
> > As suggested in v2, renaming the interfaces to -
> > *vm_insert_range() -> vm_map_pages()* and
> > *vm_insert_range_buggy() -> vm_map_pages_zero()*.
> > As the interface is renamed, modified the code accordingly,
> > updated the change logs and modified the subject lines to use the
> > new interfaces. There is no other change apart from renaming and
> > using the new interface.
> >
> > Patch[1/9] & [4/9], Tested on Rockchip hardware.
> >
> > v3 -> v4:
> > Fixed build warnings on patch [8/9] reported by kbuild test robot.
> >
> > Souptick Joarder (9):
> >   mm: Introduce new vm_map_pages() and vm_map_pages_zero() API
> >   arm: mm: dma-mapping: Convert to use vm_map_pages()
> >   drivers/firewire/core-iso.c: Convert to use vm_map_pages_zero()
> >   drm/rockchip/rockchip_drm_gem.c: Convert to use vm_map_pages()
> >   drm/xen/xen_drm_front_gem.c: Convert to use vm_map_pages()
> >   iommu/dma-iommu.c: Convert to use vm_map_pages()
> >   videobuf2/videobuf2-dma-sg.c: Convert to use vm_map_pages()
> >   xen/gntdev.c: Convert to use vm_map_pages()
> >   xen/privcmd-buf.c: Convert to use vm_map_pages_zero()
>
> Is it fine to take these patches into mm tree for regression ?

v4 of this series has not received any further comments/ kbuild error
in last 8 weeks (including
the previously posted v4).

Any suggestion, if it safe to take these changes through mm tree ? or any
other tree is preferred ?

>
> >
> >  arch/arm/mm/dma-mapping.c  | 22 ++
> >  drivers/firewire/core-iso.c| 15 +---
> >  drivers/gpu/drm/rockchip/rockchip_drm_gem.c| 17 +
> >  drivers/gpu/drm/xen/xen_drm_front_gem.c| 18 ++---
> >  drivers/iommu/dma-iommu.c  | 12 +---
> >  drivers/media/common/videobuf2/videobuf2-core.c|  7 ++
> >  .../media/common/videobuf2/videobuf2-dma-contig.c  |  6 --
> >  drivers/media/common/videobuf2/videobuf2-dma-sg.c  | 22 ++
> >  drivers/xen/gntdev.c   | 11 ++-
> >  drivers/xen/privcmd-buf.c  |  8 +--
> >  include/linux/mm.h |  4 ++
> >  mm/memory.c| 81 
> > ++
> >  mm/nommu.c | 14 
> >  13 files changed, 134 insertions(+), 103 deletions(-)
> >
> > --
> > 1.9.1
> >
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [RESEND PATCH v4 0/9] mm: Use vm_map_pages() and vm_map_pages_zero() API

2019-03-31 Thread Souptick Joarder
Hi Andrew,

On Tue, Mar 19, 2019 at 7:47 AM Souptick Joarder  wrote:
>
> Previouly drivers have their own way of mapping range of
> kernel pages/memory into user vma and this was done by
> invoking vm_insert_page() within a loop.
>
> As this pattern is common across different drivers, it can
> be generalized by creating new functions and use it across
> the drivers.
>
> vm_map_pages() is the API which could be used to map
> kernel memory/pages in drivers which has considered vm_pgoff.
>
> vm_map_pages_zero() is the API which could be used to map
> range of kernel memory/pages in drivers which has not considered
> vm_pgoff. vm_pgoff is passed default as 0 for those drivers.
>
> We _could_ then at a later "fix" these drivers which are using
> vm_map_pages_zero() to behave according to the normal vm_pgoff
> offsetting simply by removing the _zero suffix on the function
> name and if that causes regressions, it gives us an easy way to revert.
>
> Tested on Rockchip hardware and display is working fine, including talking
> to Lima via prime.
>
> v1 -> v2:
> Few Reviewed-by.
>
> Updated the change log in [8/9]
>
> In [7/9], vm_pgoff is treated in V4L2 API as a 'cookie'
> to select a buffer, not as a in-buffer offset by design
> and it always want to mmap a whole buffer from its beginning.
> Added additional changes after discussing with Marek and
> vm_map_pages() could be used instead of vm_map_pages_zero().
>
> v2 -> v3:
> Corrected the documentation as per review comment.
>
> As suggested in v2, renaming the interfaces to -
> *vm_insert_range() -> vm_map_pages()* and
> *vm_insert_range_buggy() -> vm_map_pages_zero()*.
> As the interface is renamed, modified the code accordingly,
> updated the change logs and modified the subject lines to use the
> new interfaces. There is no other change apart from renaming and
> using the new interface.
>
>     Patch[1/9] & [4/9], Tested on Rockchip hardware.
>
> v3 -> v4:
> Fixed build warnings on patch [8/9] reported by kbuild test robot.
>
> Souptick Joarder (9):
>   mm: Introduce new vm_map_pages() and vm_map_pages_zero() API
>   arm: mm: dma-mapping: Convert to use vm_map_pages()
>   drivers/firewire/core-iso.c: Convert to use vm_map_pages_zero()
>   drm/rockchip/rockchip_drm_gem.c: Convert to use vm_map_pages()
>   drm/xen/xen_drm_front_gem.c: Convert to use vm_map_pages()
>   iommu/dma-iommu.c: Convert to use vm_map_pages()
>   videobuf2/videobuf2-dma-sg.c: Convert to use vm_map_pages()
>   xen/gntdev.c: Convert to use vm_map_pages()
>   xen/privcmd-buf.c: Convert to use vm_map_pages_zero()

Is it fine to take these patches into mm tree for regression ?

>
>  arch/arm/mm/dma-mapping.c  | 22 ++
>  drivers/firewire/core-iso.c| 15 +---
>  drivers/gpu/drm/rockchip/rockchip_drm_gem.c| 17 +
>  drivers/gpu/drm/xen/xen_drm_front_gem.c| 18 ++---
>  drivers/iommu/dma-iommu.c  | 12 +---
>  drivers/media/common/videobuf2/videobuf2-core.c|  7 ++
>  .../media/common/videobuf2/videobuf2-dma-contig.c  |  6 --
>  drivers/media/common/videobuf2/videobuf2-dma-sg.c  | 22 ++
>  drivers/xen/gntdev.c   | 11 ++-
>  drivers/xen/privcmd-buf.c  |  8 +--
>  include/linux/mm.h |  4 ++
>  mm/memory.c| 81 
> ++
>  mm/nommu.c | 14 
>  13 files changed, 134 insertions(+), 103 deletions(-)
>
> --
> 1.9.1
>
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[RESEND PATCH v4 6/9] iommu/dma-iommu.c: Convert to use vm_map_pages()

2019-03-18 Thread Souptick Joarder
Convert to use vm_map_pages() to map range of kernel
memory to user vma.

Signed-off-by: Souptick Joarder 
---
 drivers/iommu/dma-iommu.c | 12 +---
 1 file changed, 1 insertion(+), 11 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d19f3d6..bacebff 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -620,17 +620,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t 
size, gfp_t gfp,
 
 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct 
*vma)
 {
-   unsigned long uaddr = vma->vm_start;
-   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-   int ret = -ENXIO;
-
-   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
-   ret = vm_insert_page(vma, uaddr, pages[i]);
-   if (ret)
-   break;
-   uaddr += PAGE_SIZE;
-   }
-   return ret;
+   return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
 }
 
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[RESEND PATCH v4 1/9] mm: Introduce new vm_map_pages() and vm_map_pages_zero() API

2019-03-18 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating new functions and use it across
the drivers.

vm_map_pages() is the API which could be used to mapped
kernel memory/pages in drivers which has considered vm_pgoff

vm_map_pages_zero() is the API which could be used to map
range of kernel memory/pages in drivers which has not considered
vm_pgoff. vm_pgoff is passed default as 0 for those drivers.

We _could_ then at a later "fix" these drivers which are using
vm_map_pages_zero() to behave according to the normal vm_pgoff
offsetting simply by removing the _zero suffix on the function
name and if that causes regressions, it gives us an easy way to revert.

Tested on Rockchip hardware and display is working, including talking
to Lima via prime.

Signed-off-by: Souptick Joarder 
Suggested-by: Russell King 
Suggested-by: Matthew Wilcox 
Reviewed-by: Mike Rapoport 
Tested-by: Heiko Stuebner 
---
 include/linux/mm.h |  4 +++
 mm/memory.c| 81 ++
 mm/nommu.c | 14 ++
 3 files changed, 99 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80bb640..e0aaa73 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2565,6 +2565,10 @@ unsigned long change_prot_numa(struct vm_area_struct 
*vma,
 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num);
+int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num);
 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index e11ca9d..cad3e27 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1520,6 +1520,87 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned 
long addr,
 }
 EXPORT_SYMBOL(vm_insert_page);
 
+/*
+ * __vm_map_pages - maps range of kernel pages into user vma
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ * @offset: user's requested vm_pgoff
+ *
+ * This allows drivers to map range of kernel pages into a user vma.
+ *
+ * Return: 0 on success and error code otherwise.
+ */
+static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num, unsigned long offset)
+{
+   unsigned long count = vma_pages(vma);
+   unsigned long uaddr = vma->vm_start;
+   int ret, i;
+
+   /* Fail if the user requested offset is beyond the end of the object */
+   if (offset > num)
+   return -ENXIO;
+
+   /* Fail if the user requested size exceeds available object size */
+   if (count > num - offset)
+   return -ENXIO;
+
+   for (i = 0; i < count; i++) {
+   ret = vm_insert_page(vma, uaddr, pages[offset + i]);
+   if (ret < 0)
+   return ret;
+   uaddr += PAGE_SIZE;
+   }
+
+   return 0;
+}
+
+/**
+ * vm_map_pages - maps range of kernel pages starts with non zero offset
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ *
+ * Maps an object consisting of @num pages, catering for the user's
+ * requested vm_pgoff
+ *
+ * If we fail to insert any page into the vma, the function will return
+ * immediately leaving any previously inserted pages present.  Callers
+ * from the mmap handler may immediately return the error as their caller
+ * will destroy the vma, removing any successfully inserted pages. Other
+ * callers should make their own arrangements for calling unmap_region().
+ *
+ * Context: Process context. Called by mmap handlers.
+ * Return: 0 on success and error code otherwise.
+ */
+int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num)
+{
+   return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
+}
+EXPORT_SYMBOL(vm_map_pages);
+
+/**
+ * vm_map_pages_zero - map range of kernel pages starts with zero offset
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ *
+ * Similar to vm_map_pages(), except that it explicitly sets the offset
+ * to 0. This function is intended for the drivers that did not consider
+ * vm_pgoff.
+ *
+ * Context: Process context. Called by mmap handlers.
+ * Re

[RESEND PATCH v4 0/9] mm: Use vm_map_pages() and vm_map_pages_zero() API

2019-03-18 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating new functions and use it across
the drivers.

vm_map_pages() is the API which could be used to map
kernel memory/pages in drivers which has considered vm_pgoff.

vm_map_pages_zero() is the API which could be used to map
range of kernel memory/pages in drivers which has not considered
vm_pgoff. vm_pgoff is passed default as 0 for those drivers.

We _could_ then at a later "fix" these drivers which are using
vm_map_pages_zero() to behave according to the normal vm_pgoff
offsetting simply by removing the _zero suffix on the function
name and if that causes regressions, it gives us an easy way to revert.

Tested on Rockchip hardware and display is working fine, including talking
to Lima via prime.

v1 -> v2:
Few Reviewed-by.

Updated the change log in [8/9]

In [7/9], vm_pgoff is treated in V4L2 API as a 'cookie'
to select a buffer, not as a in-buffer offset by design
and it always want to mmap a whole buffer from its beginning.
Added additional changes after discussing with Marek and
vm_map_pages() could be used instead of vm_map_pages_zero().

v2 -> v3:
Corrected the documentation as per review comment.

As suggested in v2, renaming the interfaces to -
*vm_insert_range() -> vm_map_pages()* and
*vm_insert_range_buggy() -> vm_map_pages_zero()*.
As the interface is renamed, modified the code accordingly,
updated the change logs and modified the subject lines to use the
new interfaces. There is no other change apart from renaming and
using the new interface.

Patch[1/9] & [4/9], Tested on Rockchip hardware.

v3 -> v4:
Fixed build warnings on patch [8/9] reported by kbuild test robot.

Souptick Joarder (9):
  mm: Introduce new vm_map_pages() and vm_map_pages_zero() API
  arm: mm: dma-mapping: Convert to use vm_map_pages()
  drivers/firewire/core-iso.c: Convert to use vm_map_pages_zero()
  drm/rockchip/rockchip_drm_gem.c: Convert to use vm_map_pages()
  drm/xen/xen_drm_front_gem.c: Convert to use vm_map_pages()
  iommu/dma-iommu.c: Convert to use vm_map_pages()
  videobuf2/videobuf2-dma-sg.c: Convert to use vm_map_pages()
  xen/gntdev.c: Convert to use vm_map_pages()
  xen/privcmd-buf.c: Convert to use vm_map_pages_zero()

 arch/arm/mm/dma-mapping.c  | 22 ++
 drivers/firewire/core-iso.c| 15 +---
 drivers/gpu/drm/rockchip/rockchip_drm_gem.c| 17 +
 drivers/gpu/drm/xen/xen_drm_front_gem.c| 18 ++---
 drivers/iommu/dma-iommu.c  | 12 +---
 drivers/media/common/videobuf2/videobuf2-core.c|  7 ++
 .../media/common/videobuf2/videobuf2-dma-contig.c  |  6 --
 drivers/media/common/videobuf2/videobuf2-dma-sg.c  | 22 ++
 drivers/xen/gntdev.c   | 11 ++-
 drivers/xen/privcmd-buf.c  |  8 +--
 include/linux/mm.h |  4 ++
 mm/memory.c| 81 ++
 mm/nommu.c | 14 
 13 files changed, 134 insertions(+), 103 deletions(-)

-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v4 0/9] mm: Use vm_map_pages() and vm_map_pages_zero() API

2019-02-20 Thread Souptick Joarder
On Fri, Feb 15, 2019 at 8:06 AM Souptick Joarder  wrote:
>
> Previouly drivers have their own way of mapping range of
> kernel pages/memory into user vma and this was done by
> invoking vm_insert_page() within a loop.
>
> As this pattern is common across different drivers, it can
> be generalized by creating new functions and use it across
> the drivers.
>
> vm_map_pages() is the API which could be used to map
> kernel memory/pages in drivers which has considered vm_pgoff.
>
> vm_map_pages_zero() is the API which could be used to map
> range of kernel memory/pages in drivers which has not considered
> vm_pgoff. vm_pgoff is passed default as 0 for those drivers.
>
> We _could_ then at a later "fix" these drivers which are using
> vm_map_pages_zero() to behave according to the normal vm_pgoff
> offsetting simply by removing the _zero suffix on the function
> name and if that causes regressions, it gives us an easy way to revert.
>
> Tested on Rockchip hardware and display is working fine, including talking
> to Lima via prime.
>
> v1 -> v2:
> Few Reviewed-by.
>
> Updated the change log in [8/9]
>
> In [7/9], vm_pgoff is treated in V4L2 API as a 'cookie'
> to select a buffer, not as a in-buffer offset by design
> and it always want to mmap a whole buffer from its beginning.
> Added additional changes after discussing with Marek and
> vm_map_pages() could be used instead of vm_map_pages_zero().
>
> v2 -> v3:
> Corrected the documentation as per review comment.
>
> As suggested in v2, renaming the interfaces to -
> *vm_insert_range() -> vm_map_pages()* and
> *vm_insert_range_buggy() -> vm_map_pages_zero()*.
> As the interface is renamed, modified the code accordingly,
> updated the change logs and modified the subject lines to use the
> new interfaces. There is no other change apart from renaming and
> using the new interface.
>
>     Patch[1/9] & [4/9], Tested on Rockchip hardware.
>
> v3 -> v4:
> Fixed build warnings on patch [8/9] reported by kbuild test robot.
>
> Souptick Joarder (9):
>   mm: Introduce new vm_map_pages() and vm_map_pages_zero() API
>   arm: mm: dma-mapping: Convert to use vm_map_pages()
>   drivers/firewire/core-iso.c: Convert to use vm_map_pages_zero()
>   drm/rockchip/rockchip_drm_gem.c: Convert to use vm_map_pages()
>   drm/xen/xen_drm_front_gem.c: Convert to use vm_map_pages()
>   iommu/dma-iommu.c: Convert to use vm_map_pages()
>   videobuf2/videobuf2-dma-sg.c: Convert to use vm_map_pages()
>   xen/gntdev.c: Convert to use vm_map_pages()
>   xen/privcmd-buf.c: Convert to use vm_map_pages_zero()

If no further comment, is it fine to take these patches to -mm
tree for regression ?

>
>  arch/arm/mm/dma-mapping.c  | 22 ++
>  drivers/firewire/core-iso.c| 15 +---
>  drivers/gpu/drm/rockchip/rockchip_drm_gem.c| 17 +
>  drivers/gpu/drm/xen/xen_drm_front_gem.c| 18 ++---
>  drivers/iommu/dma-iommu.c  | 12 +---
>  drivers/media/common/videobuf2/videobuf2-core.c|  7 ++
>  .../media/common/videobuf2/videobuf2-dma-contig.c  |  6 --
>  drivers/media/common/videobuf2/videobuf2-dma-sg.c  | 22 ++
>  drivers/xen/gntdev.c   | 11 ++-
>  drivers/xen/privcmd-buf.c  |  8 +--
>  include/linux/mm.h |  4 ++
>  mm/memory.c| 81 
> ++
>  mm/nommu.c | 14 
>  13 files changed, 134 insertions(+), 103 deletions(-)
>
> --
> 1.9.1
>
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v4 6/9] iommu/dma-iommu.c: Convert to use vm_map_pages()

2019-02-14 Thread Souptick Joarder
Convert to use vm_map_pages() to map range of kernel
memory to user vma.

Signed-off-by: Souptick Joarder 
---
 drivers/iommu/dma-iommu.c | 12 +---
 1 file changed, 1 insertion(+), 11 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d19f3d6..bacebff 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -620,17 +620,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t 
size, gfp_t gfp,
 
 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct 
*vma)
 {
-   unsigned long uaddr = vma->vm_start;
-   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-   int ret = -ENXIO;
-
-   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
-   ret = vm_insert_page(vma, uaddr, pages[i]);
-   if (ret)
-   break;
-   uaddr += PAGE_SIZE;
-   }
-   return ret;
+   return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
 }
 
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v4 1/9] mm: Introduce new vm_map_pages() and vm_map_pages_zero() API

2019-02-14 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating new functions and use it across
the drivers.

vm_map_pages() is the API which could be used to mapped
kernel memory/pages in drivers which has considered vm_pgoff

vm_map_pages_zero() is the API which could be used to map
range of kernel memory/pages in drivers which has not considered
vm_pgoff. vm_pgoff is passed default as 0 for those drivers.

We _could_ then at a later "fix" these drivers which are using
vm_map_pages_zero() to behave according to the normal vm_pgoff
offsetting simply by removing the _zero suffix on the function
name and if that causes regressions, it gives us an easy way to revert.

Tested on Rockchip hardware and display is working, including talking
to Lima via prime.

Signed-off-by: Souptick Joarder 
Suggested-by: Russell King 
Suggested-by: Matthew Wilcox 
Reviewed-by: Mike Rapoport 
Tested-by: Heiko Stuebner 
---
 include/linux/mm.h |  4 +++
 mm/memory.c| 81 ++
 mm/nommu.c | 14 ++
 3 files changed, 99 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80bb640..e0aaa73 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2565,6 +2565,10 @@ unsigned long change_prot_numa(struct vm_area_struct 
*vma,
 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num);
+int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num);
 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index e11ca9d..cad3e27 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1520,6 +1520,87 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned 
long addr,
 }
 EXPORT_SYMBOL(vm_insert_page);
 
+/*
+ * __vm_map_pages - maps range of kernel pages into user vma
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ * @offset: user's requested vm_pgoff
+ *
+ * This allows drivers to map range of kernel pages into a user vma.
+ *
+ * Return: 0 on success and error code otherwise.
+ */
+static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num, unsigned long offset)
+{
+   unsigned long count = vma_pages(vma);
+   unsigned long uaddr = vma->vm_start;
+   int ret, i;
+
+   /* Fail if the user requested offset is beyond the end of the object */
+   if (offset > num)
+   return -ENXIO;
+
+   /* Fail if the user requested size exceeds available object size */
+   if (count > num - offset)
+   return -ENXIO;
+
+   for (i = 0; i < count; i++) {
+   ret = vm_insert_page(vma, uaddr, pages[offset + i]);
+   if (ret < 0)
+   return ret;
+   uaddr += PAGE_SIZE;
+   }
+
+   return 0;
+}
+
+/**
+ * vm_map_pages - maps range of kernel pages starts with non zero offset
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ *
+ * Maps an object consisting of @num pages, catering for the user's
+ * requested vm_pgoff
+ *
+ * If we fail to insert any page into the vma, the function will return
+ * immediately leaving any previously inserted pages present.  Callers
+ * from the mmap handler may immediately return the error as their caller
+ * will destroy the vma, removing any successfully inserted pages. Other
+ * callers should make their own arrangements for calling unmap_region().
+ *
+ * Context: Process context. Called by mmap handlers.
+ * Return: 0 on success and error code otherwise.
+ */
+int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num)
+{
+   return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
+}
+EXPORT_SYMBOL(vm_map_pages);
+
+/**
+ * vm_map_pages_zero - map range of kernel pages starts with zero offset
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ *
+ * Similar to vm_map_pages(), except that it explicitly sets the offset
+ * to 0. This function is intended for the drivers that did not consider
+ * vm_pgoff.
+ *
+ * Context: Process context. Called by mmap handlers.
+ * Re

[PATCH v4 0/9] mm: Use vm_map_pages() and vm_map_pages_zero() API

2019-02-14 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating new functions and use it across
the drivers.

vm_map_pages() is the API which could be used to map
kernel memory/pages in drivers which has considered vm_pgoff.

vm_map_pages_zero() is the API which could be used to map
range of kernel memory/pages in drivers which has not considered
vm_pgoff. vm_pgoff is passed default as 0 for those drivers.

We _could_ then at a later "fix" these drivers which are using
vm_map_pages_zero() to behave according to the normal vm_pgoff
offsetting simply by removing the _zero suffix on the function
name and if that causes regressions, it gives us an easy way to revert.

Tested on Rockchip hardware and display is working fine, including talking
to Lima via prime.

v1 -> v2:
Few Reviewed-by.

Updated the change log in [8/9]

In [7/9], vm_pgoff is treated in V4L2 API as a 'cookie'
to select a buffer, not as a in-buffer offset by design
and it always want to mmap a whole buffer from its beginning.
Added additional changes after discussing with Marek and
vm_map_pages() could be used instead of vm_map_pages_zero().

v2 -> v3:
Corrected the documentation as per review comment.

As suggested in v2, renaming the interfaces to -
*vm_insert_range() -> vm_map_pages()* and
*vm_insert_range_buggy() -> vm_map_pages_zero()*.
As the interface is renamed, modified the code accordingly,
updated the change logs and modified the subject lines to use the
new interfaces. There is no other change apart from renaming and
using the new interface.

Patch[1/9] & [4/9], Tested on Rockchip hardware.

v3 -> v4:
Fixed build warnings on patch [8/9] reported by kbuild test robot.

Souptick Joarder (9):
  mm: Introduce new vm_map_pages() and vm_map_pages_zero() API
  arm: mm: dma-mapping: Convert to use vm_map_pages()
  drivers/firewire/core-iso.c: Convert to use vm_map_pages_zero()
  drm/rockchip/rockchip_drm_gem.c: Convert to use vm_map_pages()
  drm/xen/xen_drm_front_gem.c: Convert to use vm_map_pages()
  iommu/dma-iommu.c: Convert to use vm_map_pages()
  videobuf2/videobuf2-dma-sg.c: Convert to use vm_map_pages()
  xen/gntdev.c: Convert to use vm_map_pages()
  xen/privcmd-buf.c: Convert to use vm_map_pages_zero()

 arch/arm/mm/dma-mapping.c  | 22 ++
 drivers/firewire/core-iso.c| 15 +---
 drivers/gpu/drm/rockchip/rockchip_drm_gem.c| 17 +
 drivers/gpu/drm/xen/xen_drm_front_gem.c| 18 ++---
 drivers/iommu/dma-iommu.c  | 12 +---
 drivers/media/common/videobuf2/videobuf2-core.c|  7 ++
 .../media/common/videobuf2/videobuf2-dma-contig.c  |  6 --
 drivers/media/common/videobuf2/videobuf2-dma-sg.c  | 22 ++
 drivers/xen/gntdev.c   | 11 ++-
 drivers/xen/privcmd-buf.c  |  8 +--
 include/linux/mm.h |  4 ++
 mm/memory.c| 81 ++
 mm/nommu.c | 14 
 13 files changed, 134 insertions(+), 103 deletions(-)

-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v3 6/9] iommu/dma-iommu.c: Convert to use vm_map_pages()

2019-02-13 Thread Souptick Joarder
Convert to use vm_map_pages() to map range of kernel
memory to user vma.

Signed-off-by: Souptick Joarder 
---
 drivers/iommu/dma-iommu.c | 12 +---
 1 file changed, 1 insertion(+), 11 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d19f3d6..bacebff 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -620,17 +620,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t 
size, gfp_t gfp,
 
 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct 
*vma)
 {
-   unsigned long uaddr = vma->vm_start;
-   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-   int ret = -ENXIO;
-
-   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
-   ret = vm_insert_page(vma, uaddr, pages[i]);
-   if (ret)
-   break;
-   uaddr += PAGE_SIZE;
-   }
-   return ret;
+   return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
 }
 
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v3 1/9] mm: Introduce new vm_map_pages() and vm_map_pages_zero() API

2019-02-13 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating new functions and use it across
the drivers.

vm_map_pages() is the API which could be used to mapped
kernel memory/pages in drivers which has considered vm_pgoff

vm_map_pages_zero() is the API which could be used to map
range of kernel memory/pages in drivers which has not considered
vm_pgoff. vm_pgoff is passed default as 0 for those drivers.

We _could_ then at a later "fix" these drivers which are using
vm_map_pages_zero() to behave according to the normal vm_pgoff
offsetting simply by removing the _zero suffix on the function
name and if that causes regressions, it gives us an easy way to revert.

Tested on Rockchip hardware and display is working, including talking
to Lima via prime.

Signed-off-by: Souptick Joarder 
Suggested-by: Russell King 
Suggested-by: Matthew Wilcox 
Reviewed-by: Mike Rapoport 
Tested-by: Heiko Stuebner 
---
 include/linux/mm.h |  4 +++
 mm/memory.c| 81 ++
 mm/nommu.c | 14 ++
 3 files changed, 99 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80bb640..e0aaa73 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2565,6 +2565,10 @@ unsigned long change_prot_numa(struct vm_area_struct 
*vma,
 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num);
+int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num);
 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index e11ca9d..cad3e27 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1520,6 +1520,87 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned 
long addr,
 }
 EXPORT_SYMBOL(vm_insert_page);
 
+/*
+ * __vm_map_pages - maps range of kernel pages into user vma
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ * @offset: user's requested vm_pgoff
+ *
+ * This allows drivers to map range of kernel pages into a user vma.
+ *
+ * Return: 0 on success and error code otherwise.
+ */
+static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num, unsigned long offset)
+{
+   unsigned long count = vma_pages(vma);
+   unsigned long uaddr = vma->vm_start;
+   int ret, i;
+
+   /* Fail if the user requested offset is beyond the end of the object */
+   if (offset > num)
+   return -ENXIO;
+
+   /* Fail if the user requested size exceeds available object size */
+   if (count > num - offset)
+   return -ENXIO;
+
+   for (i = 0; i < count; i++) {
+   ret = vm_insert_page(vma, uaddr, pages[offset + i]);
+   if (ret < 0)
+   return ret;
+   uaddr += PAGE_SIZE;
+   }
+
+   return 0;
+}
+
+/**
+ * vm_map_pages - maps range of kernel pages starts with non zero offset
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ *
+ * Maps an object consisting of @num pages, catering for the user's
+ * requested vm_pgoff
+ *
+ * If we fail to insert any page into the vma, the function will return
+ * immediately leaving any previously inserted pages present.  Callers
+ * from the mmap handler may immediately return the error as their caller
+ * will destroy the vma, removing any successfully inserted pages. Other
+ * callers should make their own arrangements for calling unmap_region().
+ *
+ * Context: Process context. Called by mmap handlers.
+ * Return: 0 on success and error code otherwise.
+ */
+int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num)
+{
+   return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
+}
+EXPORT_SYMBOL(vm_map_pages);
+
+/**
+ * vm_map_pages_zero - map range of kernel pages starts with zero offset
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ *
+ * Similar to vm_map_pages(), except that it explicitly sets the offset
+ * to 0. This function is intended for the drivers that did not consider
+ * vm_pgoff.
+ *
+ * Context: Process context. Called by mmap handlers.
+ * Re

[PATCH v3 0/9] mm: Use vm_map_pages() and vm_map_pages_zero() API

2019-02-13 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating new functions and use it across
the drivers.

vm_map_pages() is the API which could be used to map
kernel memory/pages in drivers which has considered vm_pgoff.

vm_map_pages_zero() is the API which could be used to map
range of kernel memory/pages in drivers which has not considered
vm_pgoff. vm_pgoff is passed default as 0 for those drivers.

We _could_ then at a later "fix" these drivers which are using
vm_map_pages_zero() to behave according to the normal vm_pgoff
offsetting simply by removing the _zero suffix on the function
name and if that causes regressions, it gives us an easy way to revert.

Tested on Rockchip hardware and display is working fine, including talking
to Lima via prime.

v1 -> v2:
Few Reviewed-by.

Updated the change log in [8/9]

In [7/9], vm_pgoff is treated in V4L2 API as a 'cookie'
to select a buffer, not as a in-buffer offset by design
and it always want to mmap a whole buffer from its beginning.
Added additional changes after discussing with Marek and
vm_map_pages() could be used instead of vm_map_pages_zero().

v2 -> v3:
Corrected the documentation as per review comment.

As suggested in v2, renaming the interfaces to -
*vm_insert_range() -> vm_map_pages()* and
*vm_insert_range_buggy() -> vm_map_pages_zero()*.
As the interface is renamed, modified the code accordingly,
updated the change logs and modified the subject lines to use the
new interfaces. There is no other change apart from renaming and
using the new interface.

Patch[1/9] & [4/9], Tested on Rockchip hardware.

Souptick Joarder (9):
  mm: Introduce new vm_map_pages() and vm_map_pages_zero() API
  arm: mm: dma-mapping: Convert to use vm_map_pages()
  drivers/firewire/core-iso.c: Convert to use vm_map_pages_zero()
  drm/rockchip/rockchip_drm_gem.c: Convert to use vm_map_pages()
  drm/xen/xen_drm_front_gem.c: Convert to use vm_map_pages()
  iommu/dma-iommu.c: Convert to use vm_map_pages()
  videobuf2/videobuf2-dma-sg.c: Convert to use vm_map_pages()
  xen/gntdev.c: Convert to use vm_map_pages()
  xen/privcmd-buf.c: Convert to use vm_map_pages_zero()

 arch/arm/mm/dma-mapping.c  | 22 ++
 drivers/firewire/core-iso.c| 15 +---
 drivers/gpu/drm/rockchip/rockchip_drm_gem.c| 17 +
 drivers/gpu/drm/xen/xen_drm_front_gem.c| 18 ++---
 drivers/iommu/dma-iommu.c  | 12 +---
 drivers/media/common/videobuf2/videobuf2-core.c|  7 ++
 .../media/common/videobuf2/videobuf2-dma-contig.c  |  6 --
 drivers/media/common/videobuf2/videobuf2-dma-sg.c  | 22 ++
 drivers/xen/gntdev.c   | 16 ++---
 drivers/xen/privcmd-buf.c  |  8 +--
 include/linux/mm.h |  4 ++
 mm/memory.c| 81 ++
 mm/nommu.c | 14 
 13 files changed, 136 insertions(+), 106 deletions(-)

-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCHv2 1/9] mm: Introduce new vm_insert_range and vm_insert_range_buggy API

2019-02-11 Thread Souptick Joarder
On Fri, Feb 8, 2019 at 10:52 AM Souptick Joarder  wrote:
>
> On Thu, Feb 7, 2019 at 10:17 PM Matthew Wilcox  wrote:
> >
> > On Thu, Feb 07, 2019 at 09:19:47PM +0530, Souptick Joarder wrote:
> > > Just thought to take opinion for documentation before placing it in v3.
> > > Does it looks fine ?
> > >
> > > +/**
> > > + * __vm_insert_range - insert range of kernel pages into user vma
> > > + * @vma: user vma to map to
> > > + * @pages: pointer to array of source kernel pages
> > > + * @num: number of pages in page array
> > > + * @offset: user's requested vm_pgoff
> > > + *
> > > + * This allow drivers to insert range of kernel pages into a user vma.
> > > + *
> > > + * Return: 0 on success and error code otherwise.
> > > + */
> > > +static int __vm_insert_range(struct vm_area_struct *vma, struct page 
> > > **pages,
> > > +   unsigned long num, unsigned long offset)
> >
> > For static functions, I prefer to leave off the second '*', ie make it
> > formatted like a docbook comment, but not be processed like a docbook
> > comment.  That avoids cluttering the html with descriptions of internal
> > functions that people can't actually call.
> >
> > > +/**
> > > + * vm_insert_range - insert range of kernel pages starts with non zero 
> > > offset
> > > + * @vma: user vma to map to
> > > + * @pages: pointer to array of source kernel pages
> > > + * @num: number of pages in page array
> > > + *
> > > + * Maps an object consisting of `num' `pages', catering for the user's
> >
> > Rather than using `num', you should use @num.
> >
> > > + * requested vm_pgoff
> > > + *
> > > + * If we fail to insert any page into the vma, the function will return
> > > + * immediately leaving any previously inserted pages present.  Callers
> > > + * from the mmap handler may immediately return the error as their caller
> > > + * will destroy the vma, removing any successfully inserted pages. Other
> > > + * callers should make their own arrangements for calling unmap_region().
> > > + *
> > > + * Context: Process context. Called by mmap handlers.
> > > + * Return: 0 on success and error code otherwise.
> > > + */
> > > +int vm_insert_range(struct vm_area_struct *vma, struct page **pages,
> > > +   unsigned long num)
> > >
> > >
> > > +/**
> > > + * vm_insert_range_buggy - insert range of kernel pages starts with zero 
> > > offset
> > > + * @vma: user vma to map to
> > > + * @pages: pointer to array of source kernel pages
> > > + * @num: number of pages in page array
> > > + *
> > > + * Similar to vm_insert_range(), except that it explicitly sets 
> > > @vm_pgoff to
> >
> > But vm_pgoff isn't a parameter, so it's misleading to format it as such.
> >
> > > + * 0. This function is intended for the drivers that did not consider
> > > + * @vm_pgoff.
> > > + *
> > > + * Context: Process context. Called by mmap handlers.
> > > + * Return: 0 on success and error code otherwise.
> > > + */
> > > +int vm_insert_range_buggy(struct vm_area_struct *vma, struct page 
> > > **pages,
> > > +   unsigned long num)
> >
> > I don't think we should call it 'buggy'.  'zero' would make more sense
> > as a suffix.
>
> suffix can be *zero or zero_offset* whichever suits better.
>
> >
> > Given how this interface has evolved, I'm no longer sure than
> > 'vm_insert_range' makes sense as the name for it.  Is it perhaps
> > 'vm_map_object' or 'vm_map_pages'?
> >
>
> I prefer vm_map_pages. Considering it, both the interface name can be changed
> to *vm_insert_range -> vm_map_pages* and *vm_insert_range_buggy ->
> vm_map_pages_{zero/zero_offset}.
>
> As this is only change in interface name and rest of code remain same
> shall I post it in v3 ( with additional change log mentioned about interface
> name changed) ?
>
> or,
>
> It will be a new patch series ( with carry forward all the Reviewed-by
> / Tested-by on
> vm_insert_range/ vm_insert_range_buggy ) ?

Any suggestion on this minor query ?
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCHv2 1/9] mm: Introduce new vm_insert_range and vm_insert_range_buggy API

2019-02-07 Thread Souptick Joarder
On Thu, Feb 7, 2019 at 10:17 PM Matthew Wilcox  wrote:
>
> On Thu, Feb 07, 2019 at 09:19:47PM +0530, Souptick Joarder wrote:
> > Just thought to take opinion for documentation before placing it in v3.
> > Does it looks fine ?
> >
> > +/**
> > + * __vm_insert_range - insert range of kernel pages into user vma
> > + * @vma: user vma to map to
> > + * @pages: pointer to array of source kernel pages
> > + * @num: number of pages in page array
> > + * @offset: user's requested vm_pgoff
> > + *
> > + * This allow drivers to insert range of kernel pages into a user vma.
> > + *
> > + * Return: 0 on success and error code otherwise.
> > + */
> > +static int __vm_insert_range(struct vm_area_struct *vma, struct page 
> > **pages,
> > +   unsigned long num, unsigned long offset)
>
> For static functions, I prefer to leave off the second '*', ie make it
> formatted like a docbook comment, but not be processed like a docbook
> comment.  That avoids cluttering the html with descriptions of internal
> functions that people can't actually call.
>
> > +/**
> > + * vm_insert_range - insert range of kernel pages starts with non zero 
> > offset
> > + * @vma: user vma to map to
> > + * @pages: pointer to array of source kernel pages
> > + * @num: number of pages in page array
> > + *
> > + * Maps an object consisting of `num' `pages', catering for the user's
>
> Rather than using `num', you should use @num.
>
> > + * requested vm_pgoff
> > + *
> > + * If we fail to insert any page into the vma, the function will return
> > + * immediately leaving any previously inserted pages present.  Callers
> > + * from the mmap handler may immediately return the error as their caller
> > + * will destroy the vma, removing any successfully inserted pages. Other
> > + * callers should make their own arrangements for calling unmap_region().
> > + *
> > + * Context: Process context. Called by mmap handlers.
> > + * Return: 0 on success and error code otherwise.
> > + */
> > +int vm_insert_range(struct vm_area_struct *vma, struct page **pages,
> > +   unsigned long num)
> >
> >
> > +/**
> > + * vm_insert_range_buggy - insert range of kernel pages starts with zero 
> > offset
> > + * @vma: user vma to map to
> > + * @pages: pointer to array of source kernel pages
> > + * @num: number of pages in page array
> > + *
> > + * Similar to vm_insert_range(), except that it explicitly sets @vm_pgoff 
> > to
>
> But vm_pgoff isn't a parameter, so it's misleading to format it as such.
>
> > + * 0. This function is intended for the drivers that did not consider
> > + * @vm_pgoff.
> > + *
> > + * Context: Process context. Called by mmap handlers.
> > + * Return: 0 on success and error code otherwise.
> > + */
> > +int vm_insert_range_buggy(struct vm_area_struct *vma, struct page **pages,
> > +   unsigned long num)
>
> I don't think we should call it 'buggy'.  'zero' would make more sense
> as a suffix.

suffix can be *zero or zero_offset* whichever suits better.

>
> Given how this interface has evolved, I'm no longer sure than
> 'vm_insert_range' makes sense as the name for it.  Is it perhaps
> 'vm_map_object' or 'vm_map_pages'?
>

I prefer vm_map_pages. Considering it, both the interface name can be changed
to *vm_insert_range -> vm_map_pages* and *vm_insert_range_buggy ->
vm_map_pages_{zero/zero_offset}.

As this is only change in interface name and rest of code remain same
shall I post it in v3 ( with additional change log mentioned about interface
name changed) ?

or,

It will be a new patch series ( with carry forward all the Reviewed-by
/ Tested-by on
vm_insert_range/ vm_insert_range_buggy ) ?
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCHv2 1/9] mm: Introduce new vm_insert_range and vm_insert_range_buggy API

2019-02-07 Thread Souptick Joarder
On Thu, Feb 7, 2019 at 9:27 PM Mike Rapoport  wrote:
>
> Hi Souptick,
>
> On Thu, Feb 07, 2019 at 09:19:47PM +0530, Souptick Joarder wrote:
> > Hi Mike,
> >
> > Just thought to take opinion for documentation before placing it in v3.
> > Does it looks fine ?
>
> Overall looks good to me. Several minor points below.

Thanks Mike. Noted.
Shall I consider it as *Reviewed-by:* with below changes ?

>
> > +/**
> > + * __vm_insert_range - insert range of kernel pages into user vma
> > + * @vma: user vma to map to
> > + * @pages: pointer to array of source kernel pages
> > + * @num: number of pages in page array
> > + * @offset: user's requested vm_pgoff
> > + *
> > + * This allow drivers to insert range of kernel pages into a user vma.
>
>   allows
> > + *
> > + * Return: 0 on success and error code otherwise.
> > + */
> > +static int __vm_insert_range(struct vm_area_struct *vma, struct page 
> > **pages,
> > +   unsigned long num, unsigned long offset)
> >
> >
> > +/**
> > + * vm_insert_range - insert range of kernel pages starts with non zero 
> > offset
> > + * @vma: user vma to map to
> > + * @pages: pointer to array of source kernel pages
> > + * @num: number of pages in page array
> > + *
> > + * Maps an object consisting of `num' `pages', catering for the user's
>@num pages
> > + * requested vm_pgoff
> > + *
> > + * If we fail to insert any page into the vma, the function will return
> > + * immediately leaving any previously inserted pages present.  Callers
> > + * from the mmap handler may immediately return the error as their caller
> > + * will destroy the vma, removing any successfully inserted pages. Other
> > + * callers should make their own arrangements for calling unmap_region().
> > + *
> > + * Context: Process context. Called by mmap handlers.
> > + * Return: 0 on success and error code otherwise.
> > + */
> > +int vm_insert_range(struct vm_area_struct *vma, struct page **pages,
> > +   unsigned long num)
> >
> >
> > +/**
> > + * vm_insert_range_buggy - insert range of kernel pages starts with zero 
> > offset
> > + * @vma: user vma to map to
> > + * @pages: pointer to array of source kernel pages
> > + * @num: number of pages in page array
> > + *
> > + * Similar to vm_insert_range(), except that it explicitly sets @vm_pgoff 
> > to
>
>   the offset
>
> > + * 0. This function is intended for the drivers that did not consider
> > + * @vm_pgoff.
> > + *
> > + * Context: Process context. Called by mmap handlers.
> > + * Return: 0 on success and error code otherwise.
> > + */
> > +int vm_insert_range_buggy(struct vm_area_struct *vma, struct page **pages,
> > +   unsigned long num)
> >
>
> --
> Sincerely yours,
> Mike.
>
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCHv2 1/9] mm: Introduce new vm_insert_range and vm_insert_range_buggy API

2019-02-07 Thread Souptick Joarder
Hi Mike,

On Thu, Jan 31, 2019 at 2:09 PM Mike Rapoport  wrote:
>
> On Thu, Jan 31, 2019 at 08:38:12AM +0530, Souptick Joarder wrote:
> > Previouly drivers have their own way of mapping range of
> > kernel pages/memory into user vma and this was done by
> > invoking vm_insert_page() within a loop.
> >
> > As this pattern is common across different drivers, it can
> > be generalized by creating new functions and use it across
> > the drivers.
> >
> > vm_insert_range() is the API which could be used to mapped
> > kernel memory/pages in drivers which has considered vm_pgoff
> >
> > vm_insert_range_buggy() is the API which could be used to map
> > range of kernel memory/pages in drivers which has not considered
> > vm_pgoff. vm_pgoff is passed default as 0 for those drivers.
> >
> > We _could_ then at a later "fix" these drivers which are using
> > vm_insert_range_buggy() to behave according to the normal vm_pgoff
> > offsetting simply by removing the _buggy suffix on the function
> > name and if that causes regressions, it gives us an easy way to revert.
> >
> > Signed-off-by: Souptick Joarder 
> > Suggested-by: Russell King 
> > Suggested-by: Matthew Wilcox 
> > ---
> >  include/linux/mm.h |  4 +++
> >  mm/memory.c| 81 
> > ++
> >  mm/nommu.c | 14 ++
> >  3 files changed, 99 insertions(+)
> >
> > diff --git a/include/linux/mm.h b/include/linux/mm.h
> > index 80bb640..25752b0 100644
> > --- a/include/linux/mm.h
> > +++ b/include/linux/mm.h
> > @@ -2565,6 +2565,10 @@ unsigned long change_prot_numa(struct vm_area_struct 
> > *vma,
> >  int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
> >   unsigned long pfn, unsigned long size, pgprot_t);
> >  int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct 
> > page *);
> > +int vm_insert_range(struct vm_area_struct *vma, struct page **pages,
> > + unsigned long num);
> > +int vm_insert_range_buggy(struct vm_area_struct *vma, struct page **pages,
> > + unsigned long num);
> >  vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
> >   unsigned long pfn);
> >  vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long 
> > addr,
> > diff --git a/mm/memory.c b/mm/memory.c
> > index e11ca9d..0a4bf57 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -1520,6 +1520,87 @@ int vm_insert_page(struct vm_area_struct *vma, 
> > unsigned long addr,
> >  }
> >  EXPORT_SYMBOL(vm_insert_page);
> >
> > +/**
> > + * __vm_insert_range - insert range of kernel pages into user vma
> > + * @vma: user vma to map to
> > + * @pages: pointer to array of source kernel pages
> > + * @num: number of pages in page array
> > + * @offset: user's requested vm_pgoff
> > + *
> > + * This allows drivers to insert range of kernel pages they've allocated
> > + * into a user vma.
> > + *
> > + * If we fail to insert any page into the vma, the function will return
> > + * immediately leaving any previously inserted pages present.  Callers
> > + * from the mmap handler may immediately return the error as their caller
> > + * will destroy the vma, removing any successfully inserted pages. Other
> > + * callers should make their own arrangements for calling unmap_region().
> > + *
> > + * Context: Process context.
> > + * Return: 0 on success and error code otherwise.
> > + */
> > +static int __vm_insert_range(struct vm_area_struct *vma, struct page 
> > **pages,
> > + unsigned long num, unsigned long offset)
> > +{
> > + unsigned long count = vma_pages(vma);
> > + unsigned long uaddr = vma->vm_start;
> > + int ret, i;
> > +
> > + /* Fail if the user requested offset is beyond the end of the object 
> > */
> > + if (offset > num)
> > + return -ENXIO;
> > +
> > + /* Fail if the user requested size exceeds available object size */
> > + if (count > num - offset)
> > + return -ENXIO;
> > +
> > + for (i = 0; i < count; i++) {
> > + ret = vm_insert_page(vma, uaddr, pages[offset + i]);
> > + if (ret < 0)
> > + return ret;
> > + uaddr += PAGE_SIZE;
> > + }
> > +
> > + return 0;
> > +}
> > 

Re: [PATCHv2 6/9] iommu/dma-iommu.c: Convert to use vm_insert_range

2019-02-03 Thread Souptick Joarder
Hi Joerg,

On Thu, Jan 31, 2019 at 8:38 AM Souptick Joarder  wrote:
>
> Convert to use vm_insert_range() to map range of kernel
> memory to user vma.
>
> Signed-off-by: Souptick Joarder 

Can you please help to review this patch ?

> ---
>  drivers/iommu/dma-iommu.c | 12 +---
>  1 file changed, 1 insertion(+), 11 deletions(-)
>
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index d19f3d6..bdf14b87 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -620,17 +620,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t 
> size, gfp_t gfp,
>
>  int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct 
> *vma)
>  {
> -   unsigned long uaddr = vma->vm_start;
> -   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> -   int ret = -ENXIO;
> -
> -   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
> -   ret = vm_insert_page(vma, uaddr, pages[i]);
> -   if (ret)
> -   break;
> -   uaddr += PAGE_SIZE;
> -   }
> -   return ret;
> +   return vm_insert_range(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
>  }
>
>  static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
> --
> 1.9.1
>
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCHv2 1/9] mm: Introduce new vm_insert_range and vm_insert_range_buggy API

2019-02-01 Thread Souptick Joarder
On Thu, Jan 31, 2019 at 6:04 PM Heiko Stuebner  wrote:
>
> Am Donnerstag, 31. Januar 2019, 13:31:52 CET schrieb Souptick Joarder:
> > On Thu, Jan 31, 2019 at 5:37 PM Heiko Stuebner  wrote:
> > >
> > > Am Donnerstag, 31. Januar 2019, 04:08:12 CET schrieb Souptick Joarder:
> > > > Previouly drivers have their own way of mapping range of
> > > > kernel pages/memory into user vma and this was done by
> > > > invoking vm_insert_page() within a loop.
> > > >
> > > > As this pattern is common across different drivers, it can
> > > > be generalized by creating new functions and use it across
> > > > the drivers.
> > > >
> > > > vm_insert_range() is the API which could be used to mapped
> > > > kernel memory/pages in drivers which has considered vm_pgoff
> > > >
> > > > vm_insert_range_buggy() is the API which could be used to map
> > > > range of kernel memory/pages in drivers which has not considered
> > > > vm_pgoff. vm_pgoff is passed default as 0 for those drivers.
> > > >
> > > > We _could_ then at a later "fix" these drivers which are using
> > > > vm_insert_range_buggy() to behave according to the normal vm_pgoff
> > > > offsetting simply by removing the _buggy suffix on the function
> > > > name and if that causes regressions, it gives us an easy way to revert.
> > > >
> > > > Signed-off-by: Souptick Joarder 
> > > > Suggested-by: Russell King 
> > > > Suggested-by: Matthew Wilcox 
> > >
> > > hmm, I'm missing a changelog here between v1 and v2.
> > > Nevertheless I managed to test v1 on Rockchip hardware
> > > and display is still working, including talking to Lima via prime.
> > >
> > > So if there aren't any big changes for v2, on Rockchip
> > > Tested-by: Heiko Stuebner 
> >
> > Change log is available in [0/9].
> > Patch [1/9] & [4/9] have no changes between v1 -> v2.
>
> I never seem to get your cover-letters, so didn't see that, sorry.

I added you in sender list for all cover-letters but it didn't reach
your inbox :-)
Thanks for reviewing and validating the patch.

>
> But great that there weren't changes then :-)
>
> Heiko
>
>
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCHv2 1/9] mm: Introduce new vm_insert_range and vm_insert_range_buggy API

2019-01-31 Thread Souptick Joarder
On Thu, Jan 31, 2019 at 5:37 PM Heiko Stuebner  wrote:
>
> Am Donnerstag, 31. Januar 2019, 04:08:12 CET schrieb Souptick Joarder:
> > Previouly drivers have their own way of mapping range of
> > kernel pages/memory into user vma and this was done by
> > invoking vm_insert_page() within a loop.
> >
> > As this pattern is common across different drivers, it can
> > be generalized by creating new functions and use it across
> > the drivers.
> >
> > vm_insert_range() is the API which could be used to mapped
> > kernel memory/pages in drivers which has considered vm_pgoff
> >
> > vm_insert_range_buggy() is the API which could be used to map
> > range of kernel memory/pages in drivers which has not considered
> > vm_pgoff. vm_pgoff is passed default as 0 for those drivers.
> >
> > We _could_ then at a later "fix" these drivers which are using
> > vm_insert_range_buggy() to behave according to the normal vm_pgoff
> > offsetting simply by removing the _buggy suffix on the function
> > name and if that causes regressions, it gives us an easy way to revert.
> >
> > Signed-off-by: Souptick Joarder 
> > Suggested-by: Russell King 
> > Suggested-by: Matthew Wilcox 
>
> hmm, I'm missing a changelog here between v1 and v2.
> Nevertheless I managed to test v1 on Rockchip hardware
> and display is still working, including talking to Lima via prime.
>
> So if there aren't any big changes for v2, on Rockchip
> Tested-by: Heiko Stuebner 

Change log is available in [0/9].
Patch [1/9] & [4/9] have no changes between v1 -> v2.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCHv2 1/9] mm: Introduce new vm_insert_range and vm_insert_range_buggy API

2019-01-31 Thread Souptick Joarder
On Thu, Jan 31, 2019 at 2:09 PM Mike Rapoport  wrote:
>
> On Thu, Jan 31, 2019 at 08:38:12AM +0530, Souptick Joarder wrote:
> > Previouly drivers have their own way of mapping range of
> > kernel pages/memory into user vma and this was done by
> > invoking vm_insert_page() within a loop.
> >
> > As this pattern is common across different drivers, it can
> > be generalized by creating new functions and use it across
> > the drivers.
> >
> > vm_insert_range() is the API which could be used to mapped
> > kernel memory/pages in drivers which has considered vm_pgoff
> >
> > vm_insert_range_buggy() is the API which could be used to map
> > range of kernel memory/pages in drivers which has not considered
> > vm_pgoff. vm_pgoff is passed default as 0 for those drivers.
> >
> > We _could_ then at a later "fix" these drivers which are using
> > vm_insert_range_buggy() to behave according to the normal vm_pgoff
> > offsetting simply by removing the _buggy suffix on the function
> > name and if that causes regressions, it gives us an easy way to revert.
> >
> > Signed-off-by: Souptick Joarder 
> > Suggested-by: Russell King 
> > Suggested-by: Matthew Wilcox 
> > ---
> >  include/linux/mm.h |  4 +++
> >  mm/memory.c| 81 
> > ++
> >  mm/nommu.c | 14 ++
> >  3 files changed, 99 insertions(+)
> >
> > diff --git a/include/linux/mm.h b/include/linux/mm.h
> > index 80bb640..25752b0 100644
> > --- a/include/linux/mm.h
> > +++ b/include/linux/mm.h
> > @@ -2565,6 +2565,10 @@ unsigned long change_prot_numa(struct vm_area_struct 
> > *vma,
> >  int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
> >   unsigned long pfn, unsigned long size, pgprot_t);
> >  int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct 
> > page *);
> > +int vm_insert_range(struct vm_area_struct *vma, struct page **pages,
> > + unsigned long num);
> > +int vm_insert_range_buggy(struct vm_area_struct *vma, struct page **pages,
> > + unsigned long num);
> >  vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
> >   unsigned long pfn);
> >  vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long 
> > addr,
> > diff --git a/mm/memory.c b/mm/memory.c
> > index e11ca9d..0a4bf57 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -1520,6 +1520,87 @@ int vm_insert_page(struct vm_area_struct *vma, 
> > unsigned long addr,
> >  }
> >  EXPORT_SYMBOL(vm_insert_page);
> >
> > +/**
> > + * __vm_insert_range - insert range of kernel pages into user vma
> > + * @vma: user vma to map to
> > + * @pages: pointer to array of source kernel pages
> > + * @num: number of pages in page array
> > + * @offset: user's requested vm_pgoff
> > + *
> > + * This allows drivers to insert range of kernel pages they've allocated
> > + * into a user vma.
> > + *
> > + * If we fail to insert any page into the vma, the function will return
> > + * immediately leaving any previously inserted pages present.  Callers
> > + * from the mmap handler may immediately return the error as their caller
> > + * will destroy the vma, removing any successfully inserted pages. Other
> > + * callers should make their own arrangements for calling unmap_region().
> > + *
> > + * Context: Process context.
> > + * Return: 0 on success and error code otherwise.
> > + */
> > +static int __vm_insert_range(struct vm_area_struct *vma, struct page 
> > **pages,
> > + unsigned long num, unsigned long offset)
> > +{
> > + unsigned long count = vma_pages(vma);
> > + unsigned long uaddr = vma->vm_start;
> > + int ret, i;
> > +
> > + /* Fail if the user requested offset is beyond the end of the object 
> > */
> > + if (offset > num)
> > + return -ENXIO;
> > +
> > + /* Fail if the user requested size exceeds available object size */
> > + if (count > num - offset)
> > + return -ENXIO;
> > +
> > + for (i = 0; i < count; i++) {
> > + ret = vm_insert_page(vma, uaddr, pages[offset + i]);
> > + if (ret < 0)
> > + return ret;
> > + uaddr += PAGE_SIZE;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +

[PATCHv2 6/9] iommu/dma-iommu.c: Convert to use vm_insert_range

2019-01-30 Thread Souptick Joarder
Convert to use vm_insert_range() to map range of kernel
memory to user vma.

Signed-off-by: Souptick Joarder 
---
 drivers/iommu/dma-iommu.c | 12 +---
 1 file changed, 1 insertion(+), 11 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d19f3d6..bdf14b87 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -620,17 +620,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t 
size, gfp_t gfp,
 
 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct 
*vma)
 {
-   unsigned long uaddr = vma->vm_start;
-   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-   int ret = -ENXIO;
-
-   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
-   ret = vm_insert_page(vma, uaddr, pages[i]);
-   if (ret)
-   break;
-   uaddr += PAGE_SIZE;
-   }
-   return ret;
+   return vm_insert_range(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
 }
 
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCHv2 1/9] mm: Introduce new vm_insert_range and vm_insert_range_buggy API

2019-01-30 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating new functions and use it across
the drivers.

vm_insert_range() is the API which could be used to mapped
kernel memory/pages in drivers which has considered vm_pgoff

vm_insert_range_buggy() is the API which could be used to map
range of kernel memory/pages in drivers which has not considered
vm_pgoff. vm_pgoff is passed default as 0 for those drivers.

We _could_ then at a later "fix" these drivers which are using
vm_insert_range_buggy() to behave according to the normal vm_pgoff
offsetting simply by removing the _buggy suffix on the function
name and if that causes regressions, it gives us an easy way to revert.

Signed-off-by: Souptick Joarder 
Suggested-by: Russell King 
Suggested-by: Matthew Wilcox 
---
 include/linux/mm.h |  4 +++
 mm/memory.c| 81 ++
 mm/nommu.c | 14 ++
 3 files changed, 99 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80bb640..25752b0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2565,6 +2565,10 @@ unsigned long change_prot_numa(struct vm_area_struct 
*vma,
 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+int vm_insert_range(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num);
+int vm_insert_range_buggy(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num);
 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index e11ca9d..0a4bf57 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1520,6 +1520,87 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned 
long addr,
 }
 EXPORT_SYMBOL(vm_insert_page);
 
+/**
+ * __vm_insert_range - insert range of kernel pages into user vma
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ * @offset: user's requested vm_pgoff
+ *
+ * This allows drivers to insert range of kernel pages they've allocated
+ * into a user vma.
+ *
+ * If we fail to insert any page into the vma, the function will return
+ * immediately leaving any previously inserted pages present.  Callers
+ * from the mmap handler may immediately return the error as their caller
+ * will destroy the vma, removing any successfully inserted pages. Other
+ * callers should make their own arrangements for calling unmap_region().
+ *
+ * Context: Process context.
+ * Return: 0 on success and error code otherwise.
+ */
+static int __vm_insert_range(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num, unsigned long offset)
+{
+   unsigned long count = vma_pages(vma);
+   unsigned long uaddr = vma->vm_start;
+   int ret, i;
+
+   /* Fail if the user requested offset is beyond the end of the object */
+   if (offset > num)
+   return -ENXIO;
+
+   /* Fail if the user requested size exceeds available object size */
+   if (count > num - offset)
+   return -ENXIO;
+
+   for (i = 0; i < count; i++) {
+   ret = vm_insert_page(vma, uaddr, pages[offset + i]);
+   if (ret < 0)
+   return ret;
+   uaddr += PAGE_SIZE;
+   }
+
+   return 0;
+}
+
+/**
+ * vm_insert_range - insert range of kernel pages starts with non zero offset
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ *
+ * Maps an object consisting of `num' `pages', catering for the user's
+ * requested vm_pgoff
+ *
+ * Context: Process context. Called by mmap handlers.
+ * Return: 0 on success and error code otherwise.
+ */
+int vm_insert_range(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num)
+{
+   return __vm_insert_range(vma, pages, num, vma->vm_pgoff);
+}
+EXPORT_SYMBOL(vm_insert_range);
+
+/**
+ * vm_insert_range_buggy - insert range of kernel pages starts with zero offset
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ *
+ * Maps a set of pages, always starting at page[0]
+ *
+ * Context: Process context. Called by mmap handlers.
+ * Return: 0 on success and error code otherwise.
+ */
+int vm_insert_range_buggy(struct vm_area_struct *vma, struct page **pages,
+

[PATCHv2 0/9] Use vm_insert_range and vm_insert_range_buggy

2019-01-30 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating new functions and use it across
the drivers.

vm_insert_range() is the API which could be used to mapped
kernel memory/pages in drivers which has considered vm_pgoff

vm_insert_range_buggy() is the API which could be used to map
range of kernel memory/pages in drivers which has not considered
vm_pgoff. vm_pgoff is passed default as 0 for those drivers.

We _could_ then at a later "fix" these drivers which are using
vm_insert_range_buggy() to behave according to the normal vm_pgoff
offsetting simply by removing the _buggy suffix on the function
name and if that causes regressions, it gives us an easy way to revert.

v1 -> v2:
Few Reviewed-by.

Updated the change log in [8/9]

In [7/9], vm_pgoff is treated in V4L2 API as a 'cookie'
to select a buffer, not as a in-buffer offset by design
and it always want to mmap a whole buffer from its beginning.
Added additional changes after discussing with Marek and
vm_insert_range could be used instead of vm_insert_range_buggy.

Souptick Joarder (9):
  mm: Introduce new vm_insert_range and vm_insert_range_buggy API
  arch/arm/mm/dma-mapping.c: Convert to use vm_insert_range
  drivers/firewire/core-iso.c: Convert to use vm_insert_range_buggy
  drm/rockchip/rockchip_drm_gem.c: Convert to use vm_insert_range
  drm/xen/xen_drm_front_gem.c: Convert to use vm_insert_range
  iommu/dma-iommu.c: Convert to use vm_insert_range
  videobuf2/videobuf2-dma-sg.c: Convert to use vm_insert_range
  xen/gntdev.c: Convert to use vm_insert_range
  xen/privcmd-buf.c: Convert to use vm_insert_range_buggy

 arch/arm/mm/dma-mapping.c  | 22 ++
 drivers/firewire/core-iso.c| 15 +---
 drivers/gpu/drm/rockchip/rockchip_drm_gem.c| 17 +
 drivers/gpu/drm/xen/xen_drm_front_gem.c| 18 ++---
 drivers/iommu/dma-iommu.c  | 12 +---
 drivers/media/common/videobuf2/videobuf2-core.c|  7 ++
 .../media/common/videobuf2/videobuf2-dma-contig.c  |  6 --
 drivers/media/common/videobuf2/videobuf2-dma-sg.c  | 22 ++
 drivers/xen/gntdev.c   | 16 ++---
 drivers/xen/privcmd-buf.c  |  8 +--
 include/linux/mm.h |  4 ++
 mm/memory.c| 81 ++
 mm/nommu.c | 14 
 13 files changed, 136 insertions(+), 106 deletions(-)

-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 6/9] iommu/dma-iommu.c: Convert to use vm_insert_range

2019-01-27 Thread Souptick Joarder
On Fri, Jan 11, 2019 at 8:37 PM Souptick Joarder  wrote:
>
> Convert to use vm_insert_range() to map range of kernel
> memory to user vma.
>
> Signed-off-by: Souptick Joarder 

Any comment on this patch ?
> ---
>  drivers/iommu/dma-iommu.c | 12 +---
>  1 file changed, 1 insertion(+), 11 deletions(-)
>
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index d1b0475..802de67 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -622,17 +622,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t 
> size, gfp_t gfp,
>
>  int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct 
> *vma)
>  {
> -   unsigned long uaddr = vma->vm_start;
> -   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> -   int ret = -ENXIO;
> -
> -   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
> -   ret = vm_insert_page(vma, uaddr, pages[i]);
> -   if (ret)
> -   break;
> -   uaddr += PAGE_SIZE;
> -   }
> -   return ret;
> +   return vm_insert_range(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
>  }
>
>  static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
> --
> 1.9.1
>
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 1/9] mm: Introduce new vm_insert_range and vm_insert_range_buggy API

2019-01-21 Thread Souptick Joarder
On Fri, Jan 11, 2019 at 8:33 PM Souptick Joarder  wrote:
>
> Previouly drivers have their own way of mapping range of
> kernel pages/memory into user vma and this was done by
> invoking vm_insert_page() within a loop.
>
> As this pattern is common across different drivers, it can
> be generalized by creating new functions and use it across
> the drivers.
>
> vm_insert_range() is the API which could be used to mapped
> kernel memory/pages in drivers which has considered vm_pgoff
>
> vm_insert_range_buggy() is the API which could be used to map
> range of kernel memory/pages in drivers which has not considered
> vm_pgoff. vm_pgoff is passed default as 0 for those drivers.
>
> We _could_ then at a later "fix" these drivers which are using
> vm_insert_range_buggy() to behave according to the normal vm_pgoff
> offsetting simply by removing the _buggy suffix on the function
> name and if that causes regressions, it gives us an easy way to revert.
>
> Signed-off-by: Souptick Joarder 
> Suggested-by: Russell King 
> Suggested-by: Matthew Wilcox 

Any comment on these APIs ?

> ---
>  include/linux/mm.h |  4 +++
>  mm/memory.c| 81 
> ++
>  mm/nommu.c | 14 ++
>  3 files changed, 99 insertions(+)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 5411de9..9d1dff6 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2514,6 +2514,10 @@ unsigned long change_prot_numa(struct vm_area_struct 
> *vma,
>  int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
> unsigned long pfn, unsigned long size, pgprot_t);
>  int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page 
> *);
> +int vm_insert_range(struct vm_area_struct *vma, struct page **pages,
> +   unsigned long num);
> +int vm_insert_range_buggy(struct vm_area_struct *vma, struct page **pages,
> +   unsigned long num);
>  vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
> unsigned long pfn);
>  vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long 
> addr,
> diff --git a/mm/memory.c b/mm/memory.c
> index 4ad2d29..00e66df 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -1520,6 +1520,87 @@ int vm_insert_page(struct vm_area_struct *vma, 
> unsigned long addr,
>  }
>  EXPORT_SYMBOL(vm_insert_page);
>
> +/**
> + * __vm_insert_range - insert range of kernel pages into user vma
> + * @vma: user vma to map to
> + * @pages: pointer to array of source kernel pages
> + * @num: number of pages in page array
> + * @offset: user's requested vm_pgoff
> + *
> + * This allows drivers to insert range of kernel pages they've allocated
> + * into a user vma.
> + *
> + * If we fail to insert any page into the vma, the function will return
> + * immediately leaving any previously inserted pages present.  Callers
> + * from the mmap handler may immediately return the error as their caller
> + * will destroy the vma, removing any successfully inserted pages. Other
> + * callers should make their own arrangements for calling unmap_region().
> + *
> + * Context: Process context.
> + * Return: 0 on success and error code otherwise.
> + */
> +static int __vm_insert_range(struct vm_area_struct *vma, struct page **pages,
> +   unsigned long num, unsigned long offset)
> +{
> +   unsigned long count = vma_pages(vma);
> +   unsigned long uaddr = vma->vm_start;
> +   int ret, i;
> +
> +   /* Fail if the user requested offset is beyond the end of the object 
> */
> +   if (offset > num)
> +   return -ENXIO;
> +
> +   /* Fail if the user requested size exceeds available object size */
> +   if (count > num - offset)
> +   return -ENXIO;
> +
> +   for (i = 0; i < count; i++) {
> +   ret = vm_insert_page(vma, uaddr, pages[offset + i]);
> +   if (ret < 0)
> +   return ret;
> +   uaddr += PAGE_SIZE;
> +   }
> +
> +   return 0;
> +}
> +
> +/**
> + * vm_insert_range - insert range of kernel pages starts with non zero offset
> + * @vma: user vma to map to
> + * @pages: pointer to array of source kernel pages
> + * @num: number of pages in page array
> + *
> + * Maps an object consisting of `num' `pages', catering for the user's
> + * requested vm_pgoff
> + *
> + * Context: Process context. Called by mmap handlers.
> + * Return: 0 on success and error code otherwise.
> + */
> +int vm_insert_range(struct vm_area_struct *

Re: [PATCH 0/9] Use vm_insert_range and vm_insert_range_buggy

2019-01-17 Thread Souptick Joarder
On Fri, Jan 11, 2019 at 8:31 PM Souptick Joarder  wrote:
>
> Previouly drivers have their own way of mapping range of
> kernel pages/memory into user vma and this was done by
> invoking vm_insert_page() within a loop.
>
> As this pattern is common across different drivers, it can
> be generalized by creating new functions and use it across
> the drivers.
>
> vm_insert_range() is the API which could be used to mapped
> kernel memory/pages in drivers which has considered vm_pgoff
>
> vm_insert_range_buggy() is the API which could be used to map
> range of kernel memory/pages in drivers which has not considered
> vm_pgoff. vm_pgoff is passed default as 0 for those drivers.
>
> We _could_ then at a later "fix" these drivers which are using
> vm_insert_range_buggy() to behave according to the normal vm_pgoff
> offsetting simply by removing the _buggy suffix on the function
> name and if that causes regressions, it gives us an easy way to revert.
>
> There is an existing bug in [7/9], where user passed length is not
> verified against object_count. For any value of length > object_count
> it will end up overrun page array which could lead to a potential bug.
> This is fixed as part of these conversion.
>
> Souptick Joarder (9):
>   mm: Introduce new vm_insert_range and vm_insert_range_buggy API
>   arch/arm/mm/dma-mapping.c: Convert to use vm_insert_range
>   drivers/firewire/core-iso.c: Convert to use vm_insert_range_buggy
>   drm/rockchip/rockchip_drm_gem.c: Convert to use vm_insert_range
>   drm/xen/xen_drm_front_gem.c: Convert to use vm_insert_range
>   iommu/dma-iommu.c: Convert to use vm_insert_range
>   videobuf2/videobuf2-dma-sg.c: Convert to use vm_insert_range_buggy
>   xen/gntdev.c: Convert to use vm_insert_range
>   xen/privcmd-buf.c: Convert to use vm_insert_range_buggy

Any further comment on these patches ?

>
>  arch/arm/mm/dma-mapping.c | 22 ++
>  drivers/firewire/core-iso.c   | 15 +
>  drivers/gpu/drm/rockchip/rockchip_drm_gem.c   | 17 +
>  drivers/gpu/drm/xen/xen_drm_front_gem.c   | 18 ++---
>  drivers/iommu/dma-iommu.c | 12 +---
>  drivers/media/common/videobuf2/videobuf2-dma-sg.c | 22 ++
>  drivers/xen/gntdev.c  | 16 ++---
>  drivers/xen/privcmd-buf.c |  8 +--
>  include/linux/mm.h|  4 ++
>  mm/memory.c   | 81 
> +++
>  mm/nommu.c| 14 
>  11 files changed, 129 insertions(+), 100 deletions(-)
>
> --
> 1.9.1
>
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 6/9] iommu/dma-iommu.c: Convert to use vm_insert_range

2019-01-11 Thread Souptick Joarder
Convert to use vm_insert_range() to map range of kernel
memory to user vma.

Signed-off-by: Souptick Joarder 
---
 drivers/iommu/dma-iommu.c | 12 +---
 1 file changed, 1 insertion(+), 11 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d1b0475..802de67 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -622,17 +622,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t 
size, gfp_t gfp,
 
 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct 
*vma)
 {
-   unsigned long uaddr = vma->vm_start;
-   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-   int ret = -ENXIO;
-
-   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
-   ret = vm_insert_page(vma, uaddr, pages[i]);
-   if (ret)
-   break;
-   uaddr += PAGE_SIZE;
-   }
-   return ret;
+   return vm_insert_range(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
 }
 
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 1/9] mm: Introduce new vm_insert_range and vm_insert_range_buggy API

2019-01-11 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating new functions and use it across
the drivers.

vm_insert_range() is the API which could be used to mapped
kernel memory/pages in drivers which has considered vm_pgoff

vm_insert_range_buggy() is the API which could be used to map
range of kernel memory/pages in drivers which has not considered
vm_pgoff. vm_pgoff is passed default as 0 for those drivers.

We _could_ then at a later "fix" these drivers which are using
vm_insert_range_buggy() to behave according to the normal vm_pgoff
offsetting simply by removing the _buggy suffix on the function
name and if that causes regressions, it gives us an easy way to revert.

Signed-off-by: Souptick Joarder 
Suggested-by: Russell King 
Suggested-by: Matthew Wilcox 
---
 include/linux/mm.h |  4 +++
 mm/memory.c| 81 ++
 mm/nommu.c | 14 ++
 3 files changed, 99 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5411de9..9d1dff6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2514,6 +2514,10 @@ unsigned long change_prot_numa(struct vm_area_struct 
*vma,
 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+int vm_insert_range(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num);
+int vm_insert_range_buggy(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num);
 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index 4ad2d29..00e66df 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1520,6 +1520,87 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned 
long addr,
 }
 EXPORT_SYMBOL(vm_insert_page);
 
+/**
+ * __vm_insert_range - insert range of kernel pages into user vma
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ * @offset: user's requested vm_pgoff
+ *
+ * This allows drivers to insert range of kernel pages they've allocated
+ * into a user vma.
+ *
+ * If we fail to insert any page into the vma, the function will return
+ * immediately leaving any previously inserted pages present.  Callers
+ * from the mmap handler may immediately return the error as their caller
+ * will destroy the vma, removing any successfully inserted pages. Other
+ * callers should make their own arrangements for calling unmap_region().
+ *
+ * Context: Process context.
+ * Return: 0 on success and error code otherwise.
+ */
+static int __vm_insert_range(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num, unsigned long offset)
+{
+   unsigned long count = vma_pages(vma);
+   unsigned long uaddr = vma->vm_start;
+   int ret, i;
+
+   /* Fail if the user requested offset is beyond the end of the object */
+   if (offset > num)
+   return -ENXIO;
+
+   /* Fail if the user requested size exceeds available object size */
+   if (count > num - offset)
+   return -ENXIO;
+
+   for (i = 0; i < count; i++) {
+   ret = vm_insert_page(vma, uaddr, pages[offset + i]);
+   if (ret < 0)
+   return ret;
+   uaddr += PAGE_SIZE;
+   }
+
+   return 0;
+}
+
+/**
+ * vm_insert_range - insert range of kernel pages starts with non zero offset
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ *
+ * Maps an object consisting of `num' `pages', catering for the user's
+ * requested vm_pgoff
+ *
+ * Context: Process context. Called by mmap handlers.
+ * Return: 0 on success and error code otherwise.
+ */
+int vm_insert_range(struct vm_area_struct *vma, struct page **pages,
+   unsigned long num)
+{
+   return __vm_insert_range(vma, pages, num, vma->vm_pgoff);
+}
+EXPORT_SYMBOL(vm_insert_range);
+
+/**
+ * vm_insert_range_buggy - insert range of kernel pages starts with zero offset
+ * @vma: user vma to map to
+ * @pages: pointer to array of source kernel pages
+ * @num: number of pages in page array
+ *
+ * Maps a set of pages, always starting at page[0]
+ *
+ * Context: Process context. Called by mmap handlers.
+ * Return: 0 on success and error code otherwise.
+ */
+int vm_insert_range_buggy(struct vm_area_struct *vma, struct page **pages,
+

[PATCH 0/9] Use vm_insert_range and vm_insert_range_buggy

2019-01-11 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating new functions and use it across
the drivers.

vm_insert_range() is the API which could be used to mapped
kernel memory/pages in drivers which has considered vm_pgoff

vm_insert_range_buggy() is the API which could be used to map
range of kernel memory/pages in drivers which has not considered
vm_pgoff. vm_pgoff is passed default as 0 for those drivers.

We _could_ then at a later "fix" these drivers which are using
vm_insert_range_buggy() to behave according to the normal vm_pgoff
offsetting simply by removing the _buggy suffix on the function
name and if that causes regressions, it gives us an easy way to revert.

There is an existing bug in [7/9], where user passed length is not
verified against object_count. For any value of length > object_count
it will end up overrun page array which could lead to a potential bug.
This is fixed as part of these conversion.

Souptick Joarder (9):
  mm: Introduce new vm_insert_range and vm_insert_range_buggy API
  arch/arm/mm/dma-mapping.c: Convert to use vm_insert_range
  drivers/firewire/core-iso.c: Convert to use vm_insert_range_buggy
  drm/rockchip/rockchip_drm_gem.c: Convert to use vm_insert_range
  drm/xen/xen_drm_front_gem.c: Convert to use vm_insert_range
  iommu/dma-iommu.c: Convert to use vm_insert_range
  videobuf2/videobuf2-dma-sg.c: Convert to use vm_insert_range_buggy
  xen/gntdev.c: Convert to use vm_insert_range
  xen/privcmd-buf.c: Convert to use vm_insert_range_buggy

 arch/arm/mm/dma-mapping.c | 22 ++
 drivers/firewire/core-iso.c   | 15 +
 drivers/gpu/drm/rockchip/rockchip_drm_gem.c   | 17 +
 drivers/gpu/drm/xen/xen_drm_front_gem.c   | 18 ++---
 drivers/iommu/dma-iommu.c | 12 +---
 drivers/media/common/videobuf2/videobuf2-dma-sg.c | 22 ++
 drivers/xen/gntdev.c  | 16 ++---
 drivers/xen/privcmd-buf.c |  8 +--
 include/linux/mm.h|  4 ++
 mm/memory.c   | 81 +++
 mm/nommu.c| 14 
 11 files changed, 129 insertions(+), 100 deletions(-)

-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v5 0/9] Use vm_insert_range

2018-12-26 Thread Souptick Joarder
On Mon, Dec 24, 2018 at 8:51 PM Russell King - ARM Linux
 wrote:
>
> Having discussed with Matthew offlist, I think we've come to the
> following conclusion - there's a number of drivers that buggily
> ignore vm_pgoff.
>
> So, what I proposed is:
>
> static int __vm_insert_range(struct vm_struct *vma, struct page *pages,
>  size_t num, unsigned long offset)
> {
> unsigned long count = vma_pages(vma);
> unsigned long uaddr = vma->vm_start;
> int ret;
>
> /* Fail if the user requested offset is beyond the end of the object 
> */
> if (offset > num)
> return -ENXIO;
>
> /* Fail if the user requested size exceeds available object size */
> if (count > num - offset)
> return -ENXIO;
>
> /* Never exceed the number of pages that the user requested */
> for (i = 0; i < count; i++) {
> ret = vm_insert_page(vma, uaddr, pages[offset + i]);
> if (ret < 0)
> return ret;
> uaddr += PAGE_SIZE;
> }
>
> return 0;
> }
>
> /*
>  * Maps an object consisting of `num' `pages', catering for the user's
>  * requested vm_pgoff
>  */
> int vm_insert_range(struct vm_struct *vma, struct page *pages, size_t num)
> {
> return __vm_insert_range(vma, pages, num, vma->vm_pgoff);
> }
>
> /*
>  * Maps a set of pages, always starting at page[0]
>  */
> int vm_insert_range_buggy(struct vm_struct *vma, struct page *pages, size_t 
> num)
> {
> return __vm_insert_range(vma, pages, num, 0);
> }
>
> With this, drivers such as iommu/dma-iommu.c can be converted thusly:
>
>  int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct 
> *vma+)
>  {
> -   unsigned long uaddr = vma->vm_start;
> -   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> -   int ret = -ENXIO;
> -
> -   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
> -   ret = vm_insert_page(vma, uaddr, pages[i]);
> -   if (ret)
> -   break;
> -   uaddr += PAGE_SIZE;
> -   }
> -   return ret;
> +   return vm_insert_range(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
> }
>
> and drivers such as firewire/core-iso.c:
>
>  int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
>   struct vm_area_struct *vma)
>  {
> -   unsigned long uaddr;
> -   int i, err;
> -
> -   uaddr = vma->vm_start;
> -   for (i = 0; i < buffer->page_count; i++) {
> -   err = vm_insert_page(vma, uaddr, buffer->pages[i]);
> -   if (err)
> -   return err;
> -
> -   uaddr += PAGE_SIZE;
> -   }
> -
> -   return 0;
> +   return vm_insert_range_buggy(vma, buffer->pages, buffer->page_count);
> }
>
> and this gives us something to grep for to find these buggy drivers.
>
> Now, this may not look exactly equivalent, but if you look at
> fw_device_op_mmap(), buffer->page_count is basically vma_pages(vma)
> at this point, which means this should be equivalent.
>
> We _could_ then at a later date "fix" these drivers to behave according
> to the normal vm_pgoff offsetting simply by removing the _buggy suffix
> on the function name... and if that causes regressions, it gives us an
> easy way to revert (as long as vm_insert_range_buggy() remains
> available.)
>
> In the case of firewire/core-iso.c, it currently ignores the mmap offset
> entirely, so making the above suggested change would be tantamount to
> causing it to return -ENXIO for any non-zero mmap offset.
>
> IMHO, this approach is way simpler, and easier to get it correct at
> each call site, rather than the current approach which seems to be
> error-prone.

Thanks Russell.
I will drop this patch series and rework on it as suggested.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v5 0/9] Use vm_insert_range

2018-12-24 Thread Souptick Joarder
v1 -> v2:
Address review comment on mm/memory.c. Add EXPORT_SYMBOL
for vm_insert_range and corrected the documentation part
for this API.

In drivers/gpu/drm/xen/xen_drm_front_gem.c, replace err
with ret as suggested.

In drivers/iommu/dma-iommu.c, handle the scenario of partial
mmap() of large buffer by passing *pages + vma->vm_pgoff* to
vm_insert_range().

v2 -> v3:
Declaration of vm_insert_range() moved to include/linux/mm.h

v3 -> v4:
Address review comments.

In mm/memory.c. Added error check.

In arch/arm/mm/dma-mapping.c, remove part of error check as the
similar is checked inside vm_insert_range.

In rockchip/rockchip_drm_gem.c, vma->vm_pgoff is respected as
this might be passed as non zero value considering partial
mapping of large buffer.

In iommu/dma-iommu.c, count is modifed as (count - vma->vm_pgoff)
to handle partial mapping scenario in v2.

v4 -> v5:
Address review comment on [2/9] and [4/9]

In arch/arm/mm/dma-mapping.c, added the error check which was removed
in v4, as without those error check we might end up overrun the page
array.

In rockchip/rockchip_drm_gem.c, added error check which was removed in
v1, as without this it might overrun page array. Adjusted page_count
parameter before passing it to vm_insert_range().

Souptick Joarder (9):
  mm: Introduce new vm_insert_range API
  arch/arm/mm/dma-mapping.c: Convert to use vm_insert_range
  drivers/firewire/core-iso.c: Convert to use vm_insert_range
  drm/rockchip/rockchip_drm_gem.c: Convert to use vm_insert_range
  drm/xen/xen_drm_front_gem.c: Convert to use vm_insert_range
  iommu/dma-iommu.c: Convert to use vm_insert_range
  videobuf2/videobuf2-dma-sg.c: Convert to use vm_insert_range
  xen/gntdev.c: Convert to use vm_insert_range
  xen/privcmd-buf.c: Convert to use vm_insert_range

 arch/arm/mm/dma-mapping.c | 18 --
 drivers/firewire/core-iso.c   | 15 ++---
 drivers/gpu/drm/rockchip/rockchip_drm_gem.c   | 14 ++--
 drivers/gpu/drm/xen/xen_drm_front_gem.c   | 20 ---
 drivers/iommu/dma-iommu.c | 13 ++-
 drivers/media/common/videobuf2/videobuf2-dma-sg.c | 23 -
 drivers/xen/gntdev.c  | 11 +++---
 drivers/xen/privcmd-buf.c |  8 ++---
 include/linux/mm.h|  2 ++
 mm/memory.c   | 41 +++
 mm/nommu.c|  7 
 11 files changed, 83 insertions(+), 89 deletions(-)

-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v4 6/9] iommu/dma-iommu.c: Convert to use vm_insert_range

2018-12-18 Thread Souptick Joarder
On Tue, Dec 18, 2018 at 1:50 AM Souptick Joarder  wrote:
>
> Convert to use vm_insert_range() to map range of kernel
> memory to user vma.
>
> Signed-off-by: Souptick Joarder 
> Reviewed-by: Matthew Wilcox 

Cc'd: Robin Murphy
> ---
>  drivers/iommu/dma-iommu.c | 13 +++--
>  1 file changed, 3 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index d1b0475..de7ffd8 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -622,17 +622,10 @@ struct page **iommu_dma_alloc(struct device *dev, 
> size_t size, gfp_t gfp,
>
>  int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct 
> *vma)
>  {
> -   unsigned long uaddr = vma->vm_start;
> -   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> -   int ret = -ENXIO;
> +   unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
>
> -   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
> -   ret = vm_insert_page(vma, uaddr, pages[i]);
> -   if (ret)
> -   break;
> -   uaddr += PAGE_SIZE;
> -   }
> -   return ret;
> +   return vm_insert_range(vma, vma->vm_start, pages + vma->vm_pgoff,
> +   count - vma->vm_pgoff);
>  }
>
>  static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
> --
> 1.9.1
>
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v4 6/9] iommu/dma-iommu.c: Convert to use vm_insert_range

2018-12-17 Thread Souptick Joarder
Convert to use vm_insert_range() to map range of kernel
memory to user vma.

Signed-off-by: Souptick Joarder 
Reviewed-by: Matthew Wilcox 
---
 drivers/iommu/dma-iommu.c | 13 +++--
 1 file changed, 3 insertions(+), 10 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d1b0475..de7ffd8 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -622,17 +622,10 @@ struct page **iommu_dma_alloc(struct device *dev, size_t 
size, gfp_t gfp,
 
 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct 
*vma)
 {
-   unsigned long uaddr = vma->vm_start;
-   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-   int ret = -ENXIO;
+   unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 
-   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
-   ret = vm_insert_page(vma, uaddr, pages[i]);
-   if (ret)
-   break;
-   uaddr += PAGE_SIZE;
-   }
-   return ret;
+   return vm_insert_range(vma, vma->vm_start, pages + vma->vm_pgoff,
+   count - vma->vm_pgoff);
 }
 
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v4 1/9] mm: Introduce new vm_insert_range API

2018-12-17 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating a new function and use it across
the drivers.

vm_insert_range is the new API which will be used to map a
range of kernel memory/pages to user vma.

This API is tested by Heiko for Rockchip drm driver, on rk3188,
rk3288, rk3328 and rk3399 with graphics.

Signed-off-by: Souptick Joarder 
Reviewed-by: Matthew Wilcox 
Reviewed-by: Mike Rapoport 
Reviewed-by: Mauro Carvalho Chehab 
Tested-by: Heiko Stuebner 
---
 include/linux/mm.h |  2 ++
 mm/memory.c| 41 +
 mm/nommu.c |  7 +++
 3 files changed, 50 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index fcf9cc9..2bc399f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2506,6 +2506,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
+   struct page **pages, unsigned long page_count);
 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index 15c417e..d44d4a8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1478,6 +1478,47 @@ static int insert_page(struct vm_area_struct *vma, 
unsigned long addr,
 }
 
 /**
+ * vm_insert_range - insert range of kernel pages into user vma
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pages: pointer to array of source kernel pages
+ * @page_count: number of pages need to insert into user vma
+ *
+ * This allows drivers to insert range of kernel pages they've allocated
+ * into a user vma. This is a generic function which drivers can use
+ * rather than using their own way of mapping range of kernel pages into
+ * user vma.
+ *
+ * If we fail to insert any page into the vma, the function will return
+ * immediately leaving any previously-inserted pages present.  Callers
+ * from the mmap handler may immediately return the error as their caller
+ * will destroy the vma, removing any successfully-inserted pages. Other
+ * callers should make their own arrangements for calling unmap_region().
+ *
+ * Context: Process context. Called by mmap handlers.
+ * Return: 0 on success and error code otherwise
+ */
+int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
+   struct page **pages, unsigned long page_count)
+{
+   unsigned long uaddr = addr;
+   int ret = 0, i;
+
+   if (page_count > vma_pages(vma))
+   return -ENXIO;
+
+   for (i = 0; i < page_count; i++) {
+   ret = vm_insert_page(vma, uaddr, pages[i]);
+   if (ret < 0)
+   return ret;
+   uaddr += PAGE_SIZE;
+   }
+
+   return ret;
+}
+EXPORT_SYMBOL(vm_insert_range);
+
+/**
  * vm_insert_page - insert single page into user vma
  * @vma: user vma to map to
  * @addr: target user address of this page
diff --git a/mm/nommu.c b/mm/nommu.c
index 749276b..d6ef5c7 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -473,6 +473,13 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned 
long addr,
 }
 EXPORT_SYMBOL(vm_insert_page);
 
+int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
+   struct page **pages, unsigned long page_count)
+{
+   return -EINVAL;
+}
+EXPORT_SYMBOL(vm_insert_range);
+
 /*
  *  sys_brk() for the most part doesn't need the global kernel
  *  lock, except when an application is doing something nasty
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v4 0/9] Use vm_insert_range

2018-12-17 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating a new function and use it across
the drivers.

vm_insert_range is the new API which will be used to map a
range of kernel memory/pages to user vma.

All the applicable places are converted to use new vm_insert_range
in this patch series.

v1 -> v2:
Address review comment on mm/memory.c. Add EXPORT_SYMBOL
for vm_insert_range and corrected the documentation part
for this API.

In drivers/gpu/drm/xen/xen_drm_front_gem.c, replace err
with ret as suggested.

In drivers/iommu/dma-iommu.c, handle the scenario of partial
mmap() of large buffer by passing *pages + vma->vm_pgoff* to
vm_insert_range().

v2 -> v3:
Declaration of vm_insert_range() moved to include/linux/mm.h

v3 -> v4:
Address review comments.

In mm/memory.c. Added error check.

In arch/arm/mm/dma-mapping.c, remove part of error check as the
similar is checked inside vm_insert_range.

In rockchip/rockchip_drm_gem.c, vma->vm_pgoff is respected as
this might be passed as non zero value considering partial
mapping of large buffer.

In iommu/dma-iommu.c, count is modifed as (count - vma->vm_pgoff)
to handle partial mapping scenario in v2.

Souptick Joarder (9):
  mm: Introduce new vm_insert_range API
  arch/arm/mm/dma-mapping.c: Convert to use vm_insert_range
  drivers/firewire/core-iso.c: Convert to use vm_insert_range
  drm/rockchip/rockchip_drm_gem.c: Convert to use vm_insert_range
  drm/xen/xen_drm_front_gem.c: Convert to use vm_insert_range
  iommu/dma-iommu.c: Convert to use vm_insert_range
  videobuf2/videobuf2-dma-sg.c: Convert to use vm_insert_range
  xen/gntdev.c: Convert to use vm_insert_range
  xen/privcmd-buf.c: Convert to use vm_insert_range

 arch/arm/mm/dma-mapping.c | 21 
 drivers/firewire/core-iso.c   | 15 ++---
 drivers/gpu/drm/rockchip/rockchip_drm_gem.c   | 19 ++-
 drivers/gpu/drm/xen/xen_drm_front_gem.c   | 20 ---
 drivers/iommu/dma-iommu.c | 13 ++-
 drivers/media/common/videobuf2/videobuf2-dma-sg.c | 23 -
 drivers/xen/gntdev.c  | 11 +++---
 drivers/xen/privcmd-buf.c |  8 ++---
 include/linux/mm.h|  2 ++
 mm/memory.c   | 41 +++
 mm/nommu.c|  7 
 11 files changed, 83 insertions(+), 97 deletions(-)

-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v3 1/9] mm: Introduce new vm_insert_range API

2018-12-07 Thread Souptick Joarder
On Sat, Dec 8, 2018 at 2:40 AM Robin Murphy  wrote:
>
> On 2018-12-07 7:28 pm, Souptick Joarder wrote:
> > On Fri, Dec 7, 2018 at 10:41 PM Matthew Wilcox  wrote:
> >>
> >> On Fri, Dec 07, 2018 at 03:34:56PM +, Robin Murphy wrote:
> >>>> +int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
> >>>> +   struct page **pages, unsigned long page_count)
> >>>> +{
> >>>> +   unsigned long uaddr = addr;
> >>>> +   int ret = 0, i;
> >>>
> >>> Some of the sites being replaced were effectively ensuring that vma and
> >>> pages were mutually compatible as an initial condition - would it be worth
> >>> adding something here for robustness, e.g.:
> >>>
> >>> + if (page_count != vma_pages(vma))
> >>> + return -ENXIO;
> >>
> >> I think we want to allow this to be used to populate part of a VMA.
> >> So perhaps:
> >>
> >>  if (page_count > vma_pages(vma))
> >>  return -ENXIO;
> >
> > Ok, This can be added.
> >
> > I think Patch [2/9] is the only leftover place where this
> > check could be removed.
>
> Right, 9/9 could also have relied on my stricter check here, but since
> it's really testing whether it actually managed to allocate vma_pages()
> worth of pages earlier, Matthew's more lenient version won't help for
> that one.


(Why privcmd_buf_mmap() doesn't clean up and return an error
> as soon as that allocation loop fails, without taking the mutex under
> which it still does a bunch more pointless work to only undo it again,
> is a mind-boggling mystery, but that's not our problem here...)

I think some clean up can be done here in a separate patch.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v3 6/9] iommu/dma-iommu.c: Convert to use vm_insert_range

2018-12-07 Thread Souptick Joarder
On Fri, Dec 7, 2018 at 7:17 PM Robin Murphy  wrote:
>
> On 06/12/2018 18:43, Souptick Joarder wrote:
> > Convert to use vm_insert_range() to map range of kernel
> > memory to user vma.
> >
> > Signed-off-by: Souptick Joarder 
> > Reviewed-by: Matthew Wilcox 
> > ---
> >   drivers/iommu/dma-iommu.c | 13 +++--
> >   1 file changed, 3 insertions(+), 10 deletions(-)
> >
> > diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> > index d1b0475..a2c65e2 100644
> > --- a/drivers/iommu/dma-iommu.c
> > +++ b/drivers/iommu/dma-iommu.c
> > @@ -622,17 +622,10 @@ struct page **iommu_dma_alloc(struct device *dev, 
> > size_t size, gfp_t gfp,
> >
> >   int iommu_dma_mmap(struct page **pages, size_t size, struct 
> > vm_area_struct *vma)
> >   {
> > - unsigned long uaddr = vma->vm_start;
> > - unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> > - int ret = -ENXIO;
> > + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> >
> > - for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
> > - ret = vm_insert_page(vma, uaddr, pages[i]);
> > - if (ret)
> > - break;
> > - uaddr += PAGE_SIZE;
> > - }
> > - return ret;
> > + return vm_insert_range(vma, vma->vm_start,
> > + pages + vma->vm_pgoff, count);
>
> You also need to adjust count to compensate for the pages skipped by
> vm_pgoff, otherwise you've got an out-of-bounds dereference triggered
> from userspace, which is pretty high up the "not good" scale (not to
> mention the entire call would then propagate -EFAULT back from
> vm_insert_page() and thus always appear to fail for nonzero offsets).

So this should something similar to ->

return vm_insert_range(vma, vma->vm_start,
pages + vma->vm_pgoff, count - vma->vm_pgoff);
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v3 1/9] mm: Introduce new vm_insert_range API

2018-12-07 Thread Souptick Joarder
On Fri, Dec 7, 2018 at 10:41 PM Matthew Wilcox  wrote:
>
> On Fri, Dec 07, 2018 at 03:34:56PM +, Robin Murphy wrote:
> > > +int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
> > > +   struct page **pages, unsigned long page_count)
> > > +{
> > > +   unsigned long uaddr = addr;
> > > +   int ret = 0, i;
> >
> > Some of the sites being replaced were effectively ensuring that vma and
> > pages were mutually compatible as an initial condition - would it be worth
> > adding something here for robustness, e.g.:
> >
> > + if (page_count != vma_pages(vma))
> > + return -ENXIO;
>
> I think we want to allow this to be used to populate part of a VMA.
> So perhaps:
>
> if (page_count > vma_pages(vma))
> return -ENXIO;

Ok, This can be added.

I think Patch [2/9] is the only leftover place where this
check could be removed.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v3 6/9] iommu/dma-iommu.c: Convert to use vm_insert_range

2018-12-06 Thread Souptick Joarder
Convert to use vm_insert_range() to map range of kernel
memory to user vma.

Signed-off-by: Souptick Joarder 
Reviewed-by: Matthew Wilcox 
---
 drivers/iommu/dma-iommu.c | 13 +++--
 1 file changed, 3 insertions(+), 10 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d1b0475..a2c65e2 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -622,17 +622,10 @@ struct page **iommu_dma_alloc(struct device *dev, size_t 
size, gfp_t gfp,
 
 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct 
*vma)
 {
-   unsigned long uaddr = vma->vm_start;
-   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-   int ret = -ENXIO;
+   unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 
-   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
-   ret = vm_insert_page(vma, uaddr, pages[i]);
-   if (ret)
-   break;
-   uaddr += PAGE_SIZE;
-   }
-   return ret;
+   return vm_insert_range(vma, vma->vm_start,
+   pages + vma->vm_pgoff, count);
 }
 
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v3 1/9] mm: Introduce new vm_insert_range API

2018-12-06 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating a new function and use it across
the drivers.

vm_insert_range is the new API which will be used to map a
range of kernel memory/pages to user vma.

This API is tested by Heiko for Rockchip drm driver, on rk3188,
rk3288, rk3328 and rk3399 with graphics.

Signed-off-by: Souptick Joarder 
Reviewed-by: Matthew Wilcox 
Reviewed-by: Mike Rapoport 
Tested-by: Heiko Stuebner 
---
 include/linux/mm.h |  2 ++
 mm/memory.c| 38 ++
 mm/nommu.c |  7 +++
 3 files changed, 47 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index fcf9cc9..2bc399f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2506,6 +2506,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
+   struct page **pages, unsigned long page_count);
 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index 15c417e..84ea46c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1478,6 +1478,44 @@ static int insert_page(struct vm_area_struct *vma, 
unsigned long addr,
 }
 
 /**
+ * vm_insert_range - insert range of kernel pages into user vma
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pages: pointer to array of source kernel pages
+ * @page_count: number of pages need to insert into user vma
+ *
+ * This allows drivers to insert range of kernel pages they've allocated
+ * into a user vma. This is a generic function which drivers can use
+ * rather than using their own way of mapping range of kernel pages into
+ * user vma.
+ *
+ * If we fail to insert any page into the vma, the function will return
+ * immediately leaving any previously-inserted pages present.  Callers
+ * from the mmap handler may immediately return the error as their caller
+ * will destroy the vma, removing any successfully-inserted pages. Other
+ * callers should make their own arrangements for calling unmap_region().
+ *
+ * Context: Process context. Called by mmap handlers.
+ * Return: 0 on success and error code otherwise
+ */
+int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
+   struct page **pages, unsigned long page_count)
+{
+   unsigned long uaddr = addr;
+   int ret = 0, i;
+
+   for (i = 0; i < page_count; i++) {
+   ret = vm_insert_page(vma, uaddr, pages[i]);
+   if (ret < 0)
+   return ret;
+   uaddr += PAGE_SIZE;
+   }
+
+   return ret;
+}
+EXPORT_SYMBOL(vm_insert_range);
+
+/**
  * vm_insert_page - insert single page into user vma
  * @vma: user vma to map to
  * @addr: target user address of this page
diff --git a/mm/nommu.c b/mm/nommu.c
index 749276b..d6ef5c7 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -473,6 +473,13 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned 
long addr,
 }
 EXPORT_SYMBOL(vm_insert_page);
 
+int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
+   struct page **pages, unsigned long page_count)
+{
+   return -EINVAL;
+}
+EXPORT_SYMBOL(vm_insert_range);
+
 /*
  *  sys_brk() for the most part doesn't need the global kernel
  *  lock, except when an application is doing something nasty
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v3 0/9] Use vm_insert_range

2018-12-06 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating a new function and use it across
the drivers.

vm_insert_range is the new API which will be used to map a
range of kernel memory/pages to user vma.

All the applicable places are converted to use new vm_insert_range
in this patch series.

v1 -> v2:
Address review comment on mm/memory.c. Add EXPORT_SYMBOL
for vm_insert_range and corrected the documentation part
for this API.

In drivers/gpu/drm/xen/xen_drm_front_gem.c, replace err
with ret as suggested.

In drivers/iommu/dma-iommu.c, handle the scenario of partial
mmap() of large buffer by passing *pages + vma->vm_pgoff* to
vm_insert_range().

v2 -> v3:
Declaration of vm_insert_range() moved to include/linux/mm.h

Souptick Joarder (9):
  mm: Introduce new vm_insert_range API
  arch/arm/mm/dma-mapping.c: Convert to use vm_insert_range
  drivers/firewire/core-iso.c: Convert to use vm_insert_range
  drm/rockchip/rockchip_drm_gem.c: Convert to use vm_insert_range
  drm/xen/xen_drm_front_gem.c: Convert to use vm_insert_range
  iommu/dma-iommu.c: Convert to use vm_insert_range
  videobuf2/videobuf2-dma-sg.c: Convert to use vm_insert_range
  xen/gntdev.c: Convert to use vm_insert_range
  xen/privcmd-buf.c: Convert to use vm_insert_range

 arch/arm/mm/dma-mapping.c | 21 +
 drivers/firewire/core-iso.c   | 15 ++---
 drivers/gpu/drm/rockchip/rockchip_drm_gem.c   | 20 ++--
 drivers/gpu/drm/xen/xen_drm_front_gem.c   | 20 
 drivers/iommu/dma-iommu.c | 13 ++--
 drivers/media/common/videobuf2/videobuf2-dma-sg.c | 23 +-
 drivers/xen/gntdev.c  | 11 +++
 drivers/xen/privcmd-buf.c |  8 ++---
 include/linux/mm.h|  2 ++
 mm/memory.c   | 38 +++
 mm/nommu.c|  7 +
 11 files changed, 80 insertions(+), 98 deletions(-)

-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v2 1/9] mm: Introduce new vm_insert_range API

2018-12-03 Thread Souptick Joarder
On Mon, Dec 3, 2018 at 11:52 AM Mike Rapoport  wrote:
>
> On Mon, Dec 03, 2018 at 09:51:45AM +0530, Souptick Joarder wrote:
> > Hi Mike,
> >
> > On Sun, Dec 2, 2018 at 4:43 PM Mike Rapoport  wrote:
> > >
> > > On Sun, Dec 02, 2018 at 11:49:44AM +0530, Souptick Joarder wrote:
> > > > Previouly drivers have their own way of mapping range of
> > > > kernel pages/memory into user vma and this was done by
> > > > invoking vm_insert_page() within a loop.
> > > >
> > > > As this pattern is common across different drivers, it can
> > > > be generalized by creating a new function and use it across
> > > > the drivers.
> > > >
> > > > vm_insert_range is the new API which will be used to map a
> > > > range of kernel memory/pages to user vma.
> > > >
> > > > This API is tested by Heiko for Rockchip drm driver, on rk3188,
> > > > rk3288, rk3328 and rk3399 with graphics.
> > > >
> > > > Signed-off-by: Souptick Joarder 
> > > > Reviewed-by: Matthew Wilcox 
> > > > Tested-by: Heiko Stuebner 
> > > > ---
> > > >  include/linux/mm_types.h |  3 +++
> > > >  mm/memory.c  | 38 ++
> > > >  mm/nommu.c   |  7 +++
> > > >  3 files changed, 48 insertions(+)
> > > >
> > > > diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> > > > index 5ed8f62..15ae24f 100644
> > > > --- a/include/linux/mm_types.h
> > > > +++ b/include/linux/mm_types.h
> > > > @@ -523,6 +523,9 @@ extern void tlb_gather_mmu(struct mmu_gather *tlb, 
> > > > struct mm_struct *mm,
> > > >  extern void tlb_finish_mmu(struct mmu_gather *tlb,
> > > >   unsigned long start, unsigned long end);
> > > >
> > > > +int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
> > > > + struct page **pages, unsigned long page_count);
> > > > +
> > >
> > > This seem to belong to include/linux/mm.h, near vm_insert_page()
> >
> > Ok, I will change it. Apart from this change does it looks good ?
>
> With this change you can add
>
> Reviewed-by: Mike Rapoport 

Thanks Mike.

>
> > >
> > > >  static inline void init_tlb_flush_pending(struct mm_struct *mm)
> > > >  {
> > > >   atomic_set(>tlb_flush_pending, 0);
> > > > diff --git a/mm/memory.c b/mm/memory.c
> > > > index 15c417e..84ea46c 100644
> > > > --- a/mm/memory.c
> > > > +++ b/mm/memory.c
> > > > @@ -1478,6 +1478,44 @@ static int insert_page(struct vm_area_struct 
> > > > *vma, unsigned long addr,
> > > >  }
> > > >
> > > >  /**
> > > > + * vm_insert_range - insert range of kernel pages into user vma
> > > > + * @vma: user vma to map to
> > > > + * @addr: target user address of this page
> > > > + * @pages: pointer to array of source kernel pages
> > > > + * @page_count: number of pages need to insert into user vma
> > > > + *
> > > > + * This allows drivers to insert range of kernel pages they've 
> > > > allocated
> > > > + * into a user vma. This is a generic function which drivers can use
> > > > + * rather than using their own way of mapping range of kernel pages 
> > > > into
> > > > + * user vma.
> > > > + *
> > > > + * If we fail to insert any page into the vma, the function will return
> > > > + * immediately leaving any previously-inserted pages present.  Callers
> > > > + * from the mmap handler may immediately return the error as their 
> > > > caller
> > > > + * will destroy the vma, removing any successfully-inserted pages. 
> > > > Other
> > > > + * callers should make their own arrangements for calling 
> > > > unmap_region().
> > > > + *
> > > > + * Context: Process context. Called by mmap handlers.
> > > > + * Return: 0 on success and error code otherwise
> > > > + */
> > > > +int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
> > > > + struct page **pages, unsigned long page_count)
> > > > +{
> > > > + unsigned long uaddr = addr;
> > > > + int ret = 0, i;
> > > > +
> > > >

Re: [PATCH v2 1/9] mm: Introduce new vm_insert_range API

2018-12-02 Thread Souptick Joarder
Hi Mike,

On Sun, Dec 2, 2018 at 4:43 PM Mike Rapoport  wrote:
>
> On Sun, Dec 02, 2018 at 11:49:44AM +0530, Souptick Joarder wrote:
> > Previouly drivers have their own way of mapping range of
> > kernel pages/memory into user vma and this was done by
> > invoking vm_insert_page() within a loop.
> >
> > As this pattern is common across different drivers, it can
> > be generalized by creating a new function and use it across
> > the drivers.
> >
> > vm_insert_range is the new API which will be used to map a
> > range of kernel memory/pages to user vma.
> >
> > This API is tested by Heiko for Rockchip drm driver, on rk3188,
> > rk3288, rk3328 and rk3399 with graphics.
> >
> > Signed-off-by: Souptick Joarder 
> > Reviewed-by: Matthew Wilcox 
> > Tested-by: Heiko Stuebner 
> > ---
> >  include/linux/mm_types.h |  3 +++
> >  mm/memory.c  | 38 ++
> >  mm/nommu.c   |  7 +++
> >  3 files changed, 48 insertions(+)
> >
> > diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> > index 5ed8f62..15ae24f 100644
> > --- a/include/linux/mm_types.h
> > +++ b/include/linux/mm_types.h
> > @@ -523,6 +523,9 @@ extern void tlb_gather_mmu(struct mmu_gather *tlb, 
> > struct mm_struct *mm,
> >  extern void tlb_finish_mmu(struct mmu_gather *tlb,
> >   unsigned long start, unsigned long end);
> >
> > +int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
> > + struct page **pages, unsigned long page_count);
> > +
>
> This seem to belong to include/linux/mm.h, near vm_insert_page()

Ok, I will change it. Apart from this change does it looks good ?

>
> >  static inline void init_tlb_flush_pending(struct mm_struct *mm)
> >  {
> >   atomic_set(>tlb_flush_pending, 0);
> > diff --git a/mm/memory.c b/mm/memory.c
> > index 15c417e..84ea46c 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -1478,6 +1478,44 @@ static int insert_page(struct vm_area_struct *vma, 
> > unsigned long addr,
> >  }
> >
> >  /**
> > + * vm_insert_range - insert range of kernel pages into user vma
> > + * @vma: user vma to map to
> > + * @addr: target user address of this page
> > + * @pages: pointer to array of source kernel pages
> > + * @page_count: number of pages need to insert into user vma
> > + *
> > + * This allows drivers to insert range of kernel pages they've allocated
> > + * into a user vma. This is a generic function which drivers can use
> > + * rather than using their own way of mapping range of kernel pages into
> > + * user vma.
> > + *
> > + * If we fail to insert any page into the vma, the function will return
> > + * immediately leaving any previously-inserted pages present.  Callers
> > + * from the mmap handler may immediately return the error as their caller
> > + * will destroy the vma, removing any successfully-inserted pages. Other
> > + * callers should make their own arrangements for calling unmap_region().
> > + *
> > + * Context: Process context. Called by mmap handlers.
> > + * Return: 0 on success and error code otherwise
> > + */
> > +int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
> > + struct page **pages, unsigned long page_count)
> > +{
> > + unsigned long uaddr = addr;
> > + int ret = 0, i;
> > +
> > + for (i = 0; i < page_count; i++) {
> > + ret = vm_insert_page(vma, uaddr, pages[i]);
> > + if (ret < 0)
> > + return ret;
> > + uaddr += PAGE_SIZE;
> > + }
> > +
> > + return ret;
> > +}
> > +EXPORT_SYMBOL(vm_insert_range);
> > +
> > +/**
> >   * vm_insert_page - insert single page into user vma
> >   * @vma: user vma to map to
> >   * @addr: target user address of this page
> > diff --git a/mm/nommu.c b/mm/nommu.c
> > index 749276b..d6ef5c7 100644
> > --- a/mm/nommu.c
> > +++ b/mm/nommu.c
> > @@ -473,6 +473,13 @@ int vm_insert_page(struct vm_area_struct *vma, 
> > unsigned long addr,
> >  }
> >  EXPORT_SYMBOL(vm_insert_page);
> >
> > +int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
> > + struct page **pages, unsigned long page_count)
> > +{
> > + return -EINVAL;
> > +}
> > +EXPORT_SYMBOL(vm_insert_range);
> > +
> >  /*
> >   *  sys_brk() for the most part doesn't need the global kernel
> >   *  lock, except when an application is doing something nasty
> > --
> > 1.9.1
> >
>
> --
> Sincerely yours,
> Mike.
>
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v2 6/9] iommu/dma-iommu.c: Convert to use vm_insert_range

2018-12-01 Thread Souptick Joarder
Convert to use vm_insert_range() to map range of kernel
memory to user vma.

Signed-off-by: Souptick Joarder 
Reviewed-by: Matthew Wilcox 
---
 drivers/iommu/dma-iommu.c | 13 +++--
 1 file changed, 3 insertions(+), 10 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d1b0475..a2c65e2 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -622,17 +622,10 @@ struct page **iommu_dma_alloc(struct device *dev, size_t 
size, gfp_t gfp,
 
 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct 
*vma)
 {
-   unsigned long uaddr = vma->vm_start;
-   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-   int ret = -ENXIO;
+   unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 
-   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
-   ret = vm_insert_page(vma, uaddr, pages[i]);
-   if (ret)
-   break;
-   uaddr += PAGE_SIZE;
-   }
-   return ret;
+   return vm_insert_range(vma, vma->vm_start,
+   pages + vma->vm_pgoff, count);
 }
 
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v2 1/9] mm: Introduce new vm_insert_range API

2018-12-01 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating a new function and use it across
the drivers.

vm_insert_range is the new API which will be used to map a
range of kernel memory/pages to user vma.

This API is tested by Heiko for Rockchip drm driver, on rk3188,
rk3288, rk3328 and rk3399 with graphics.

Signed-off-by: Souptick Joarder 
Reviewed-by: Matthew Wilcox 
Tested-by: Heiko Stuebner 
---
 include/linux/mm_types.h |  3 +++
 mm/memory.c  | 38 ++
 mm/nommu.c   |  7 +++
 3 files changed, 48 insertions(+)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5ed8f62..15ae24f 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -523,6 +523,9 @@ extern void tlb_gather_mmu(struct mmu_gather *tlb, struct 
mm_struct *mm,
 extern void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end);
 
+int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
+   struct page **pages, unsigned long page_count);
+
 static inline void init_tlb_flush_pending(struct mm_struct *mm)
 {
atomic_set(>tlb_flush_pending, 0);
diff --git a/mm/memory.c b/mm/memory.c
index 15c417e..84ea46c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1478,6 +1478,44 @@ static int insert_page(struct vm_area_struct *vma, 
unsigned long addr,
 }
 
 /**
+ * vm_insert_range - insert range of kernel pages into user vma
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pages: pointer to array of source kernel pages
+ * @page_count: number of pages need to insert into user vma
+ *
+ * This allows drivers to insert range of kernel pages they've allocated
+ * into a user vma. This is a generic function which drivers can use
+ * rather than using their own way of mapping range of kernel pages into
+ * user vma.
+ *
+ * If we fail to insert any page into the vma, the function will return
+ * immediately leaving any previously-inserted pages present.  Callers
+ * from the mmap handler may immediately return the error as their caller
+ * will destroy the vma, removing any successfully-inserted pages. Other
+ * callers should make their own arrangements for calling unmap_region().
+ *
+ * Context: Process context. Called by mmap handlers.
+ * Return: 0 on success and error code otherwise
+ */
+int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
+   struct page **pages, unsigned long page_count)
+{
+   unsigned long uaddr = addr;
+   int ret = 0, i;
+
+   for (i = 0; i < page_count; i++) {
+   ret = vm_insert_page(vma, uaddr, pages[i]);
+   if (ret < 0)
+   return ret;
+   uaddr += PAGE_SIZE;
+   }
+
+   return ret;
+}
+EXPORT_SYMBOL(vm_insert_range);
+
+/**
  * vm_insert_page - insert single page into user vma
  * @vma: user vma to map to
  * @addr: target user address of this page
diff --git a/mm/nommu.c b/mm/nommu.c
index 749276b..d6ef5c7 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -473,6 +473,13 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned 
long addr,
 }
 EXPORT_SYMBOL(vm_insert_page);
 
+int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
+   struct page **pages, unsigned long page_count)
+{
+   return -EINVAL;
+}
+EXPORT_SYMBOL(vm_insert_range);
+
 /*
  *  sys_brk() for the most part doesn't need the global kernel
  *  lock, except when an application is doing something nasty
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v2 0/9] Use vm_insert_range

2018-12-01 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating a new function and use it across
the drivers.

vm_insert_range is the new API which will be used to map a
range of kernel memory/pages to user vma.

All the applicable places are converted to use new vm_insert_range
in this patch series.

v1 -> v2:
Address review comment on mm/memory.c. Add EXPORT_SYMBOL
for vm_insert_range and corrected the documentation part
for this API.

In drivers/gpu/drm/xen/xen_drm_front_gem.c, replace err
with ret as suggested.

In drivers/iommu/dma-iommu.c, handle the scenario of partial
mmap() of large buffer by passing *pages + vma->vm_pgoff* to
vm_insert_range().

Souptick Joarder (9):
  mm: Introduce new vm_insert_range API
  arch/arm/mm/dma-mapping.c: Convert to use vm_insert_range
  drivers/firewire/core-iso.c: Convert to use vm_insert_range
  drm/rockchip/rockchip_drm_gem.c: Convert to use vm_insert_range
  drm/xen/xen_drm_front_gem.c: Convert to use vm_insert_range
  iommu/dma-iommu.c: Convert to use vm_insert_range
  videobuf2/videobuf2-dma-sg.c: Convert to use vm_insert_range
  xen/gntdev.c: Convert to use vm_insert_range
  xen/privcmd-buf.c: Convert to use vm_insert_range

 arch/arm/mm/dma-mapping.c | 21 +
 drivers/firewire/core-iso.c   | 15 ++---
 drivers/gpu/drm/rockchip/rockchip_drm_gem.c   | 20 ++--
 drivers/gpu/drm/xen/xen_drm_front_gem.c   | 20 
 drivers/iommu/dma-iommu.c | 13 ++--
 drivers/media/common/videobuf2/videobuf2-dma-sg.c | 23 +-
 drivers/xen/gntdev.c  | 11 +++
 drivers/xen/privcmd-buf.c |  8 ++---
 include/linux/mm_types.h  |  3 ++
 mm/memory.c   | 38 +++
 mm/nommu.c|  7 +
 11 files changed, 81 insertions(+), 98 deletions(-)

-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 6/9] iommu/dma-iommu.c: Convert to use vm_insert_range

2018-11-25 Thread Souptick Joarder
On Sat, Nov 24, 2018 at 3:04 AM Matthew Wilcox  wrote:
>
> On Fri, Nov 23, 2018 at 05:23:06PM +, Robin Murphy wrote:
> > On 15/11/2018 15:49, Souptick Joarder wrote:
> > > Convert to use vm_insert_range() to map range of kernel
> > > memory to user vma.
> > >
> > > Signed-off-by: Souptick Joarder 
> > > Reviewed-by: Matthew Wilcox 
> > > ---
> > >   drivers/iommu/dma-iommu.c | 12 ++--
> > >   1 file changed, 2 insertions(+), 10 deletions(-)
> > >
> > > diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> > > index d1b0475..69c66b1 100644
> > > --- a/drivers/iommu/dma-iommu.c
> > > +++ b/drivers/iommu/dma-iommu.c
> > > @@ -622,17 +622,9 @@ struct page **iommu_dma_alloc(struct device *dev, 
> > > size_t size, gfp_t gfp,
> > >   int iommu_dma_mmap(struct page **pages, size_t size, struct 
> > > vm_area_struct *vma)
> > >   {
> > > -   unsigned long uaddr = vma->vm_start;
> > > -   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> > > -   int ret = -ENXIO;
> > > +   unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> > > -   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
> > > -   ret = vm_insert_page(vma, uaddr, pages[i]);
> > > -   if (ret)
> > > -   break;
> > > -   uaddr += PAGE_SIZE;
> > > -   }
> > > -   return ret;
> > > +   return vm_insert_range(vma, vma->vm_start, pages, count);
> >
> > AFIACS, vm_insert_range() doesn't respect vma->vm_pgoff, so doesn't this
> > break partial mmap()s of a large buffer? (which I believe can be a thing)
>
> Whoops.  That should have been:
>
> return vm_insert_range(vma, vma->vm_start, pages + vma->vm_pgoff, count);

I am unable to trace back where vma->vm_pgoff is set for this driver ? if any ?
If default value set to 0 then I think existing code is correct.

>
> I suppose.
>

> Although arguably we should respect vm_pgoff inside vm_insert_region()
> and then callers automatically get support for vm_pgoff without having
> to think about it ...

I assume, vm_insert_region() means vm_insert_range(). If we respect vm_pgoff
inside vm_insert_range, for any uninitialized/ error value set for vm_pgoff from
drivers will introduce a bug inside core mm which might be difficult
to trace back.
But when vm_pgoff set and passed from caller (drivers) it might be
easy to figure out.

> although we should then also pass in the length
> of the pages array to avoid pages being mapped in which aren't part of
> the allocated array.

Mostly Partial mapping is done by starting from an index and mapped it till
end of pages array. Calculating length of the pages array will have a small
overhead for each drivers.

Please correct me if I am wrong.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 6/9] iommu/dma-iommu.c: Convert to use vm_insert_range

2018-11-23 Thread Souptick Joarder
On Sat, Nov 24, 2018 at 3:04 AM Matthew Wilcox  wrote:
>
> On Fri, Nov 23, 2018 at 05:23:06PM +, Robin Murphy wrote:
> > On 15/11/2018 15:49, Souptick Joarder wrote:
> > > Convert to use vm_insert_range() to map range of kernel
> > > memory to user vma.
> > >
> > > Signed-off-by: Souptick Joarder 
> > > Reviewed-by: Matthew Wilcox 
> > > ---
> > >   drivers/iommu/dma-iommu.c | 12 ++--
> > >   1 file changed, 2 insertions(+), 10 deletions(-)
> > >
> > > diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> > > index d1b0475..69c66b1 100644
> > > --- a/drivers/iommu/dma-iommu.c
> > > +++ b/drivers/iommu/dma-iommu.c
> > > @@ -622,17 +622,9 @@ struct page **iommu_dma_alloc(struct device *dev, 
> > > size_t size, gfp_t gfp,
> > >   int iommu_dma_mmap(struct page **pages, size_t size, struct 
> > > vm_area_struct *vma)
> > >   {
> > > -   unsigned long uaddr = vma->vm_start;
> > > -   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> > > -   int ret = -ENXIO;
> > > +   unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
> > > -   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
> > > -   ret = vm_insert_page(vma, uaddr, pages[i]);
> > > -   if (ret)
> > > -   break;
> > > -   uaddr += PAGE_SIZE;
> > > -   }
> > > -   return ret;
> > > +   return vm_insert_range(vma, vma->vm_start, pages, count);
> >
> > AFIACS, vm_insert_range() doesn't respect vma->vm_pgoff, so doesn't this
> > break partial mmap()s of a large buffer? (which I believe can be a thing)
>
> Whoops.  That should have been:
>
> return vm_insert_range(vma, vma->vm_start, pages + vma->vm_pgoff, count);
>
> I suppose.

Matthew, patch "drm/rockchip/rockchip_drm_gem.c: Convert to use vm_insert_range"
also need to address the same issue ?

>
> Although arguably we should respect vm_pgoff inside vm_insert_region()
> and then callers automatically get support for vm_pgoff without having
> to think about it ... although we should then also pass in the length
> of the pages array to avoid pages being mapped in which aren't part of
> the allocated array.
>
> Hm.  More thought required.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 0/9] Use vm_insert_range

2018-11-21 Thread Souptick Joarder
On Thu, Nov 22, 2018 at 1:08 AM Boris Ostrovsky
 wrote:
>
> On 11/21/18 1:24 AM, Souptick Joarder wrote:
> > On Thu, Nov 15, 2018 at 9:09 PM Souptick Joarder  
> > wrote:
> >> Previouly drivers have their own way of mapping range of
> >> kernel pages/memory into user vma and this was done by
> >> invoking vm_insert_page() within a loop.
> >>
> >> As this pattern is common across different drivers, it can
> >> be generalized by creating a new function and use it across
> >> the drivers.
> >>
> >> vm_insert_range is the new API which will be used to map a
> >> range of kernel memory/pages to user vma.
> >>
> >> All the applicable places are converted to use new vm_insert_range
> >> in this patch series.
> >>
> >> Souptick Joarder (9):
> >>   mm: Introduce new vm_insert_range API
> >>   arch/arm/mm/dma-mapping.c: Convert to use vm_insert_range
> >>   drivers/firewire/core-iso.c: Convert to use vm_insert_range
> >>   drm/rockchip/rockchip_drm_gem.c: Convert to use vm_insert_range
> >>   drm/xen/xen_drm_front_gem.c: Convert to use vm_insert_range
> >>   iommu/dma-iommu.c: Convert to use vm_insert_range
> >>   videobuf2/videobuf2-dma-sg.c: Convert to use vm_insert_range
> >>   xen/gntdev.c: Convert to use vm_insert_range
> >>   xen/privcmd-buf.c: Convert to use vm_insert_range
> > Any further comment on driver changes ?
>
> Xen drivers (the last two patches) look fine to me.

Thanks, can I considered this as Reviewed-by ?

>
> -boris
>
>
> >>  arch/arm/mm/dma-mapping.c | 21 ++---
> >>  drivers/firewire/core-iso.c   | 15 ++--
> >>  drivers/gpu/drm/rockchip/rockchip_drm_gem.c   | 20 ++--
> >>  drivers/gpu/drm/xen/xen_drm_front_gem.c   | 20 +---
> >>  drivers/iommu/dma-iommu.c | 12 ++
> >>  drivers/media/common/videobuf2/videobuf2-dma-sg.c | 23 ++-
> >>  drivers/xen/gntdev.c  | 11 -
> >>  drivers/xen/privcmd-buf.c |  8 ++-
> >>  include/linux/mm_types.h  |  3 +++
> >>  mm/memory.c   | 28 
> >> +++
> >>  mm/nommu.c|  7 ++
> >>  11 files changed, 70 insertions(+), 98 deletions(-)
> >>
> >> --
> >> 1.9.1
> >>
>
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 0/9] Use vm_insert_range

2018-11-20 Thread Souptick Joarder
On Thu, Nov 15, 2018 at 9:09 PM Souptick Joarder  wrote:
>
> Previouly drivers have their own way of mapping range of
> kernel pages/memory into user vma and this was done by
> invoking vm_insert_page() within a loop.
>
> As this pattern is common across different drivers, it can
> be generalized by creating a new function and use it across
> the drivers.
>
> vm_insert_range is the new API which will be used to map a
> range of kernel memory/pages to user vma.
>
> All the applicable places are converted to use new vm_insert_range
> in this patch series.
>
> Souptick Joarder (9):
>   mm: Introduce new vm_insert_range API
>   arch/arm/mm/dma-mapping.c: Convert to use vm_insert_range
>   drivers/firewire/core-iso.c: Convert to use vm_insert_range
>   drm/rockchip/rockchip_drm_gem.c: Convert to use vm_insert_range
>   drm/xen/xen_drm_front_gem.c: Convert to use vm_insert_range
>   iommu/dma-iommu.c: Convert to use vm_insert_range
>   videobuf2/videobuf2-dma-sg.c: Convert to use vm_insert_range
>   xen/gntdev.c: Convert to use vm_insert_range
>   xen/privcmd-buf.c: Convert to use vm_insert_range

Any further comment on driver changes ?
>
>  arch/arm/mm/dma-mapping.c | 21 ++---
>  drivers/firewire/core-iso.c   | 15 ++--
>  drivers/gpu/drm/rockchip/rockchip_drm_gem.c   | 20 ++--
>  drivers/gpu/drm/xen/xen_drm_front_gem.c   | 20 +---
>  drivers/iommu/dma-iommu.c | 12 ++
>  drivers/media/common/videobuf2/videobuf2-dma-sg.c | 23 ++-
>  drivers/xen/gntdev.c  | 11 -
>  drivers/xen/privcmd-buf.c |  8 ++-
>  include/linux/mm_types.h  |  3 +++
>  mm/memory.c   | 28 
> +++
>  mm/nommu.c|  7 ++
>  11 files changed, 70 insertions(+), 98 deletions(-)
>
> --
> 1.9.1
>
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 1/9] mm: Introduce new vm_insert_range API

2018-11-19 Thread Souptick Joarder
On Mon, Nov 19, 2018 at 9:56 PM Mike Rapoport  wrote:
>
> On Mon, Nov 19, 2018 at 08:43:09PM +0530, Souptick Joarder wrote:
> > Hi Mike,
> >
> > On Sat, Nov 17, 2018 at 8:07 PM Matthew Wilcox  wrote:
> > >
> > > On Sat, Nov 17, 2018 at 12:26:38PM +0530, Souptick Joarder wrote:
> > > > On Fri, Nov 16, 2018 at 11:59 PM Mike Rapoport  
> > > > wrote:
> > > > > > + * vm_insert_range - insert range of kernel pages into user vma
> > > > > > + * @vma: user vma to map to
> > > > > > + * @addr: target user address of this page
> > > > > > + * @pages: pointer to array of source kernel pages
> > > > > > + * @page_count: no. of pages need to insert into user vma
> > > > > > + *
> > > > > > + * This allows drivers to insert range of kernel pages they've 
> > > > > > allocated
> > > > > > + * into a user vma. This is a generic function which drivers can 
> > > > > > use
> > > > > > + * rather than using their own way of mapping range of kernel 
> > > > > > pages into
> > > > > > + * user vma.
> > > > >
> > > > > Please add the return value and context descriptions.
> > > > >
> > > >
> > > > Sure I will wait for some time to get additional review comments and
> > > > add all of those requested changes in v2.
> > >
> > > You could send your proposed wording now which might remove the need
> > > for a v3 if we end up arguing about the wording.
> >
> > Does this description looks good ?
> >
> > /**
> >  * vm_insert_range - insert range of kernel pages into user vma
> >  * @vma: user vma to map to
> >  * @addr: target user address of this page
> >  * @pages: pointer to array of source kernel pages
> >  * @page_count: number of pages need to insert into user vma
> >  *
> >  * This allows drivers to insert range of kernel pages they've allocated
> >  * into a user vma. This is a generic function which drivers can use
> >  * rather than using their own way of mapping range of kernel pages into
> >  * user vma.
> >  *
> >  * Context - Process context. Called by mmap handlers.
>
> Context:
>
> >  * Return - int error value
>
> Return:
>
> >  * 0- OK
> >  * -EINVAL  - Invalid argument
> >  * -ENOMEM  - No memory
> >  * -EFAULT  - Bad address
> >  * -EBUSY   - Device or resource busy
>
> I don't think that elaborate description of error values is needed, just "0
> on success and error code otherwise" would be sufficient.

/**
 * vm_insert_range - insert range of kernel pages into user vma
 * @vma: user vma to map to
 * @addr: target user address of this page
 * @pages: pointer to array of source kernel pages
 * @page_count: number of pages need to insert into user vma
 *
 * This allows drivers to insert range of kernel pages they've allocated
 * into a user vma. This is a generic function which drivers can use
 * rather than using their own way of mapping range of kernel pages into
 * user vma.
 *
 * Context: Process context. Called by mmap handlers.
 * Return: 0 on success and error code otherwise
 */
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 1/9] mm: Introduce new vm_insert_range API

2018-11-19 Thread Souptick Joarder
Hi Mike,

On Sat, Nov 17, 2018 at 8:07 PM Matthew Wilcox  wrote:
>
> On Sat, Nov 17, 2018 at 12:26:38PM +0530, Souptick Joarder wrote:
> > On Fri, Nov 16, 2018 at 11:59 PM Mike Rapoport  wrote:
> > > > + * vm_insert_range - insert range of kernel pages into user vma
> > > > + * @vma: user vma to map to
> > > > + * @addr: target user address of this page
> > > > + * @pages: pointer to array of source kernel pages
> > > > + * @page_count: no. of pages need to insert into user vma
> > > > + *
> > > > + * This allows drivers to insert range of kernel pages they've 
> > > > allocated
> > > > + * into a user vma. This is a generic function which drivers can use
> > > > + * rather than using their own way of mapping range of kernel pages 
> > > > into
> > > > + * user vma.
> > >
> > > Please add the return value and context descriptions.
> > >
> >
> > Sure I will wait for some time to get additional review comments and
> > add all of those requested changes in v2.
>
> You could send your proposed wording now which might remove the need
> for a v3 if we end up arguing about the wording.

Does this description looks good ?

/**
 * vm_insert_range - insert range of kernel pages into user vma
 * @vma: user vma to map to
 * @addr: target user address of this page
 * @pages: pointer to array of source kernel pages
 * @page_count: number of pages need to insert into user vma
 *
 * This allows drivers to insert range of kernel pages they've allocated
 * into a user vma. This is a generic function which drivers can use
 * rather than using their own way of mapping range of kernel pages into
 * user vma.
 *
 * Context - Process context. Called by mmap handlers.
 * Return - int error value
 * 0- OK
 * -EINVAL  - Invalid argument
 * -ENOMEM  - No memory
 * -EFAULT  - Bad address
 * -EBUSY   - Device or resource busy
 */
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 1/9] mm: Introduce new vm_insert_range API

2018-11-16 Thread Souptick Joarder
On Fri, Nov 16, 2018 at 11:59 PM Mike Rapoport  wrote:
>
> On Thu, Nov 15, 2018 at 09:15:30PM +0530, Souptick Joarder wrote:
> > Previouly drivers have their own way of mapping range of
> > kernel pages/memory into user vma and this was done by
> > invoking vm_insert_page() within a loop.
> >
> > As this pattern is common across different drivers, it can
> > be generalized by creating a new function and use it across
> > the drivers.
> >
> > vm_insert_range is the new API which will be used to map a
> > range of kernel memory/pages to user vma.
> >
> > Signed-off-by: Souptick Joarder 
> > Reviewed-by: Matthew Wilcox 
> > ---
> >  include/linux/mm_types.h |  3 +++
> >  mm/memory.c  | 28 
> >  mm/nommu.c   |  7 +++
> >  3 files changed, 38 insertions(+)
> >
> > diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> > index 5ed8f62..15ae24f 100644
> > --- a/include/linux/mm_types.h
> > +++ b/include/linux/mm_types.h
> > @@ -523,6 +523,9 @@ extern void tlb_gather_mmu(struct mmu_gather *tlb, 
> > struct mm_struct *mm,
> >  extern void tlb_finish_mmu(struct mmu_gather *tlb,
> >   unsigned long start, unsigned long end);
> >
> > +int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
> > + struct page **pages, unsigned long page_count);
> > +
> >  static inline void init_tlb_flush_pending(struct mm_struct *mm)
> >  {
> >   atomic_set(>tlb_flush_pending, 0);
> > diff --git a/mm/memory.c b/mm/memory.c
> > index 15c417e..da904ed 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -1478,6 +1478,34 @@ static int insert_page(struct vm_area_struct *vma, 
> > unsigned long addr,
> >  }
> >
> >  /**
> > + * vm_insert_range - insert range of kernel pages into user vma
> > + * @vma: user vma to map to
> > + * @addr: target user address of this page
> > + * @pages: pointer to array of source kernel pages
> > + * @page_count: no. of pages need to insert into user vma
> > + *
> > + * This allows drivers to insert range of kernel pages they've allocated
> > + * into a user vma. This is a generic function which drivers can use
> > + * rather than using their own way of mapping range of kernel pages into
> > + * user vma.
>
> Please add the return value and context descriptions.
>
> > + */
> > +int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
> > + struct page **pages, unsigned long page_count)
> > +{
> > + unsigned long uaddr = addr;
> > + int ret = 0, i;
> > +
> > + for (i = 0; i < page_count; i++) {
> > + ret = vm_insert_page(vma, uaddr, pages[i]);
> > + if (ret < 0)
> > + return ret;
> > + uaddr += PAGE_SIZE;
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +/**
> >   * vm_insert_page - insert single page into user vma
> >   * @vma: user vma to map to
> >   * @addr: target user address of this page
> > diff --git a/mm/nommu.c b/mm/nommu.c
> > index 749276b..d6ef5c7 100644
> > --- a/mm/nommu.c
> > +++ b/mm/nommu.c
> > @@ -473,6 +473,13 @@ int vm_insert_page(struct vm_area_struct *vma, 
> > unsigned long addr,
> >  }
> >  EXPORT_SYMBOL(vm_insert_page);
> >
> > +int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
> > + struct page **pages, unsigned long page_count)
> > +{
> > + return -EINVAL;
> > +}
> > +EXPORT_SYMBOL(vm_insert_range);
> > +
> >  /*
> >   *  sys_brk() for the most part doesn't need the global kernel
> >   *  lock, except when an application is doing something nasty
> > --
> > 1.9.1
> >
>
> --
> Sincerely yours,
> Mike.
>

Sure I will wait for some time to get additional review comments and
add all of those requested changes in v2.

Any further feedback on driver changes as part of this patch series ?
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 1/9] mm: Introduce new vm_insert_range API

2018-11-16 Thread Souptick Joarder
On Fri, Nov 16, 2018 at 12:11 PM Matthew Wilcox  wrote:
>
> On Fri, Nov 16, 2018 at 11:00:30AM +0530, Souptick Joarder wrote:
> > On Thu, Nov 15, 2018 at 11:44 PM Randy Dunlap  wrote:
> > > On 11/15/18 7:45 AM, Souptick Joarder wrote:
> > > What is the opposite of vm_insert_range() or even of vm_insert_page()?
> > > or is there no need for that?
> >
> > There is no opposite function of vm_insert_range() / vm_insert_page().
> > My understanding is, in case of any error, mmap handlers will return the
> > err to user process and user space will decide the next action. So next
> > time when mmap handler is getting invoked it will map from the beginning.
> > Correct me if I am wrong.
>
> The opposite function, I suppose, is unmap_region().
>
> > > s/no./number/
> >
> > I didn't get it ??
>
> This is a 'sed' expression.  's' is the 'substitute' command; the /
> is a separator, 'no.' is what you wrote, and 'number' is what Randy
> is recommending instead.

Ok. Will change it in v2.
>
> > > > + for (i = 0; i < page_count; i++) {
> > > > + ret = vm_insert_page(vma, uaddr, pages[i]);
> > > > + if (ret < 0)
> > > > + return ret;
> > >
> > > For a non-trivial value of page_count:
> > > Is it a problem if vm_insert_page() succeeds for several pages
> > > and then fails?
> >
> > No, it will be considered as total failure and mmap handler will return
> > the err to user space.
>
> I think what Randy means is "What happens to the inserted pages?" and
> the answer is that mmap_region() jumps to the 'unmap_and_free_vma'
> label, which is an accurate name.

Sorry for incorrect understanding of the question.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 1/9] mm: Introduce new vm_insert_range API

2018-11-15 Thread Souptick Joarder
On Thu, Nov 15, 2018 at 11:44 PM Randy Dunlap  wrote:
>
> On 11/15/18 7:45 AM, Souptick Joarder wrote:
> > Previouly drivers have their own way of mapping range of
> > kernel pages/memory into user vma and this was done by
> > invoking vm_insert_page() within a loop.
> >
> > As this pattern is common across different drivers, it can
> > be generalized by creating a new function and use it across
> > the drivers.
> >
> > vm_insert_range is the new API which will be used to map a
> > range of kernel memory/pages to user vma.
> >
> > Signed-off-by: Souptick Joarder 
> > Reviewed-by: Matthew Wilcox 
> > ---
> >  include/linux/mm_types.h |  3 +++
> >  mm/memory.c  | 28 
> >  mm/nommu.c   |  7 +++
> >  3 files changed, 38 insertions(+)
>
> Hi,
>
> What is the opposite of vm_insert_range() or even of vm_insert_page()?
> or is there no need for that?

There is no opposite function of vm_insert_range() / vm_insert_page().
My understanding is, in case of any error, mmap handlers will return the
err to user process and user space will decide the next action. So next
time when mmap handler is getting invoked it will map from the beginning.
Correct me if I am wrong.
>
>
> > diff --git a/mm/memory.c b/mm/memory.c
> > index 15c417e..da904ed 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -1478,6 +1478,34 @@ static int insert_page(struct vm_area_struct *vma, 
> > unsigned long addr,
> >  }
> >
> >  /**
> > + * vm_insert_range - insert range of kernel pages into user vma
> > + * @vma: user vma to map to
> > + * @addr: target user address of this page
> > + * @pages: pointer to array of source kernel pages
> > + * @page_count: no. of pages need to insert into user vma
>
> s/no./number/

I didn't get it ??
>
> > + *
> > + * This allows drivers to insert range of kernel pages they've allocated
> > + * into a user vma. This is a generic function which drivers can use
> > + * rather than using their own way of mapping range of kernel pages into
> > + * user vma.
> > + */
> > +int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
> > + struct page **pages, unsigned long page_count)
> > +{
> > + unsigned long uaddr = addr;
> > + int ret = 0, i;
> > +
> > + for (i = 0; i < page_count; i++) {
> > + ret = vm_insert_page(vma, uaddr, pages[i]);
> > + if (ret < 0)
> > + return ret;
>
> For a non-trivial value of page_count:
> Is it a problem if vm_insert_page() succeeds for several pages
> and then fails?

No, it will be considered as total failure and mmap handler will return
the err to user space.
>
> > + uaddr += PAGE_SIZE;
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +/**
> >   * vm_insert_page - insert single page into user vma
> >   * @vma: user vma to map to
> >   * @addr: target user address of this page
>
>
> thanks.
> --
> ~Randy
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 6/9] iommu/dma-iommu.c: Convert to use vm_insert_range

2018-11-15 Thread Souptick Joarder
Convert to use vm_insert_range() to map range of kernel
memory to user vma.

Signed-off-by: Souptick Joarder 
Reviewed-by: Matthew Wilcox 
---
 drivers/iommu/dma-iommu.c | 12 ++--
 1 file changed, 2 insertions(+), 10 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d1b0475..69c66b1 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -622,17 +622,9 @@ struct page **iommu_dma_alloc(struct device *dev, size_t 
size, gfp_t gfp,
 
 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct 
*vma)
 {
-   unsigned long uaddr = vma->vm_start;
-   unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-   int ret = -ENXIO;
+   unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 
-   for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
-   ret = vm_insert_page(vma, uaddr, pages[i]);
-   if (ret)
-   break;
-   uaddr += PAGE_SIZE;
-   }
-   return ret;
+   return vm_insert_range(vma, vma->vm_start, pages, count);
 }
 
 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 1/9] mm: Introduce new vm_insert_range API

2018-11-15 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating a new function and use it across
the drivers.

vm_insert_range is the new API which will be used to map a
range of kernel memory/pages to user vma.

Signed-off-by: Souptick Joarder 
Reviewed-by: Matthew Wilcox 
---
 include/linux/mm_types.h |  3 +++
 mm/memory.c  | 28 
 mm/nommu.c   |  7 +++
 3 files changed, 38 insertions(+)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5ed8f62..15ae24f 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -523,6 +523,9 @@ extern void tlb_gather_mmu(struct mmu_gather *tlb, struct 
mm_struct *mm,
 extern void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end);
 
+int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
+   struct page **pages, unsigned long page_count);
+
 static inline void init_tlb_flush_pending(struct mm_struct *mm)
 {
atomic_set(>tlb_flush_pending, 0);
diff --git a/mm/memory.c b/mm/memory.c
index 15c417e..da904ed 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1478,6 +1478,34 @@ static int insert_page(struct vm_area_struct *vma, 
unsigned long addr,
 }
 
 /**
+ * vm_insert_range - insert range of kernel pages into user vma
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pages: pointer to array of source kernel pages
+ * @page_count: no. of pages need to insert into user vma
+ *
+ * This allows drivers to insert range of kernel pages they've allocated
+ * into a user vma. This is a generic function which drivers can use
+ * rather than using their own way of mapping range of kernel pages into
+ * user vma.
+ */
+int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
+   struct page **pages, unsigned long page_count)
+{
+   unsigned long uaddr = addr;
+   int ret = 0, i;
+
+   for (i = 0; i < page_count; i++) {
+   ret = vm_insert_page(vma, uaddr, pages[i]);
+   if (ret < 0)
+   return ret;
+   uaddr += PAGE_SIZE;
+   }
+
+   return ret;
+}
+
+/**
  * vm_insert_page - insert single page into user vma
  * @vma: user vma to map to
  * @addr: target user address of this page
diff --git a/mm/nommu.c b/mm/nommu.c
index 749276b..d6ef5c7 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -473,6 +473,13 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned 
long addr,
 }
 EXPORT_SYMBOL(vm_insert_page);
 
+int vm_insert_range(struct vm_area_struct *vma, unsigned long addr,
+   struct page **pages, unsigned long page_count)
+{
+   return -EINVAL;
+}
+EXPORT_SYMBOL(vm_insert_range);
+
 /*
  *  sys_brk() for the most part doesn't need the global kernel
  *  lock, except when an application is doing something nasty
-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 0/9] Use vm_insert_range

2018-11-15 Thread Souptick Joarder
Previouly drivers have their own way of mapping range of
kernel pages/memory into user vma and this was done by
invoking vm_insert_page() within a loop.

As this pattern is common across different drivers, it can
be generalized by creating a new function and use it across
the drivers.

vm_insert_range is the new API which will be used to map a
range of kernel memory/pages to user vma.

All the applicable places are converted to use new vm_insert_range
in this patch series.

Souptick Joarder (9):
  mm: Introduce new vm_insert_range API
  arch/arm/mm/dma-mapping.c: Convert to use vm_insert_range
  drivers/firewire/core-iso.c: Convert to use vm_insert_range
  drm/rockchip/rockchip_drm_gem.c: Convert to use vm_insert_range
  drm/xen/xen_drm_front_gem.c: Convert to use vm_insert_range
  iommu/dma-iommu.c: Convert to use vm_insert_range
  videobuf2/videobuf2-dma-sg.c: Convert to use vm_insert_range
  xen/gntdev.c: Convert to use vm_insert_range
  xen/privcmd-buf.c: Convert to use vm_insert_range

 arch/arm/mm/dma-mapping.c | 21 ++---
 drivers/firewire/core-iso.c   | 15 ++--
 drivers/gpu/drm/rockchip/rockchip_drm_gem.c   | 20 ++--
 drivers/gpu/drm/xen/xen_drm_front_gem.c   | 20 +---
 drivers/iommu/dma-iommu.c | 12 ++
 drivers/media/common/videobuf2/videobuf2-dma-sg.c | 23 ++-
 drivers/xen/gntdev.c  | 11 -
 drivers/xen/privcmd-buf.c |  8 ++-
 include/linux/mm_types.h  |  3 +++
 mm/memory.c   | 28 +++
 mm/nommu.c|  7 ++
 11 files changed, 70 insertions(+), 98 deletions(-)

-- 
1.9.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v2] mm: convert return type of handle_mm_fault() caller to vm_fault_t

2018-06-17 Thread Souptick Joarder
Use new return type vm_fault_t for fault handler. For
now, this is just documenting that the function returns
a VM_FAULT value rather than an errno. Once all instances
are converted, vm_fault_t will become a distinct type.

Ref-> commit 1c8f422059ae ("mm: change return type to vm_fault_t")

In this patch all the caller of handle_mm_fault()
are changed to return vm_fault_t type.

Signed-off-by: Souptick Joarder 
---
v2: Fixed kbuild error

 arch/alpha/mm/fault.c |  3 ++-
 arch/arc/mm/fault.c   |  4 +++-
 arch/arm/mm/fault.c   |  7 ---
 arch/arm64/mm/fault.c |  6 +++---
 arch/hexagon/mm/vm_fault.c|  2 +-
 arch/ia64/mm/fault.c  |  2 +-
 arch/m68k/mm/fault.c  |  4 ++--
 arch/microblaze/mm/fault.c|  2 +-
 arch/mips/mm/fault.c  |  2 +-
 arch/nds32/mm/fault.c |  2 +-
 arch/nios2/mm/fault.c |  2 +-
 arch/openrisc/mm/fault.c  |  2 +-
 arch/parisc/mm/fault.c|  2 +-
 arch/powerpc/include/asm/copro.h  |  4 +++-
 arch/powerpc/mm/copro_fault.c |  2 +-
 arch/powerpc/mm/fault.c   |  7 ---
 arch/powerpc/platforms/cell/spufs/fault.c |  2 +-
 arch/riscv/mm/fault.c |  3 ++-
 arch/s390/mm/fault.c  | 13 -
 arch/sh/mm/fault.c|  4 ++--
 arch/sparc/mm/fault_32.c  |  3 ++-
 arch/sparc/mm/fault_64.c  |  3 ++-
 arch/um/kernel/trap.c |  2 +-
 arch/unicore32/mm/fault.c |  9 +
 arch/x86/mm/fault.c   |  5 +++--
 arch/xtensa/mm/fault.c|  2 +-
 drivers/iommu/amd_iommu_v2.c  |  2 +-
 drivers/iommu/intel-svm.c |  4 +++-
 drivers/misc/cxl/fault.c  |  2 +-
 drivers/misc/ocxl/link.c  |  3 ++-
 mm/hmm.c  |  8 
 mm/ksm.c  |  2 +-
 32 files changed, 69 insertions(+), 51 deletions(-)

diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index cd3c572..2a979ee 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -87,7 +87,8 @@
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
const struct exception_table_entry *fixup;
-   int fault, si_code = SEGV_MAPERR;
+   int si_code = SEGV_MAPERR;
+   vm_fault_t fault;
siginfo_t info;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index a0b7bd6..3a18d33 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -15,6 +15,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -66,7 +67,8 @@ void do_page_fault(unsigned long address, struct pt_regs 
*regs)
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
siginfo_t info;
-   int fault, ret;
+   int ret;
+   vm_fault_t fault;
int write = regs->ecr_cause & ECR_C_PROTV_STORE;  /* ST/EX */
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index b75eada..758abcb 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -219,12 +219,12 @@ static inline bool access_error(unsigned int fsr, struct 
vm_area_struct *vma)
return vma->vm_flags & mask ? false : true;
 }
 
-static int __kprobes
+static vm_fault_t __kprobes
 __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
unsigned int flags, struct task_struct *tsk)
 {
struct vm_area_struct *vma;
-   int fault;
+   vm_fault_t fault;
 
vma = find_vma(mm, addr);
fault = VM_FAULT_BADMAP;
@@ -259,7 +259,8 @@ static inline bool access_error(unsigned int fsr, struct 
vm_area_struct *vma)
 {
struct task_struct *tsk;
struct mm_struct *mm;
-   int fault, sig, code;
+   int sig, code;
+   vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
if (notify_page_fault(regs, fsr))
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2af3dd8..8da263b 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -371,12 +371,12 @@ static void do_bad_area(unsigned long addr, unsigned int 
esr, struct pt_regs *re
 #define VM_FAULT_BADMAP0x01
 #define VM_FAULT_BADACCESS 0x02
 
-static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
+static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
   unsigned int mm_flags, unsigned long vm_flags,
   struct task_struct *tsk)
 {
struct vm_ar

[PATCH] mm: convert return type of handle_mm_fault() caller to vm_fault_t

2018-06-14 Thread Souptick Joarder
Use new return type vm_fault_t for fault handler. For
now, this is just documenting that the function returns
a VM_FAULT value rather than an errno. Once all instances
are converted, vm_fault_t will become a distinct type.

Ref-> commit 1c8f422059ae ("mm: change return type to vm_fault_t")

In this patch all the caller of handle_mm_fault()
are changed to return vm_fault_t type.

Signed-off-by: Souptick Joarder 
---
 arch/alpha/mm/fault.c |  3 ++-
 arch/arc/mm/fault.c   |  4 +++-
 arch/arm/mm/fault.c   |  7 ---
 arch/arm64/mm/fault.c |  6 +++---
 arch/hexagon/mm/vm_fault.c|  2 +-
 arch/ia64/mm/fault.c  |  2 +-
 arch/m68k/mm/fault.c  |  4 ++--
 arch/microblaze/mm/fault.c|  2 +-
 arch/mips/mm/fault.c  |  2 +-
 arch/nds32/mm/fault.c |  2 +-
 arch/nios2/mm/fault.c |  2 +-
 arch/openrisc/mm/fault.c  |  2 +-
 arch/parisc/mm/fault.c|  2 +-
 arch/powerpc/mm/copro_fault.c |  2 +-
 arch/powerpc/mm/fault.c   |  7 ---
 arch/riscv/mm/fault.c |  3 ++-
 arch/s390/mm/fault.c  | 13 -
 arch/sh/mm/fault.c|  4 ++--
 arch/sparc/mm/fault_32.c  |  3 ++-
 arch/sparc/mm/fault_64.c  |  3 ++-
 arch/um/kernel/trap.c |  2 +-
 arch/unicore32/mm/fault.c |  9 +
 arch/x86/mm/fault.c   |  5 +++--
 arch/xtensa/mm/fault.c|  2 +-
 drivers/iommu/amd_iommu_v2.c  |  2 +-
 drivers/iommu/intel-svm.c |  4 +++-
 mm/hmm.c  |  8 
 mm/ksm.c  |  2 +-
 28 files changed, 62 insertions(+), 47 deletions(-)

diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index cd3c572..2a979ee 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -87,7 +87,8 @@
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
const struct exception_table_entry *fixup;
-   int fault, si_code = SEGV_MAPERR;
+   int si_code = SEGV_MAPERR;
+   vm_fault_t fault;
siginfo_t info;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index a0b7bd6..3a18d33 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -15,6 +15,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -66,7 +67,8 @@ void do_page_fault(unsigned long address, struct pt_regs 
*regs)
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
siginfo_t info;
-   int fault, ret;
+   int ret;
+   vm_fault_t fault;
int write = regs->ecr_cause & ECR_C_PROTV_STORE;  /* ST/EX */
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index b75eada..758abcb 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -219,12 +219,12 @@ static inline bool access_error(unsigned int fsr, struct 
vm_area_struct *vma)
return vma->vm_flags & mask ? false : true;
 }
 
-static int __kprobes
+static vm_fault_t __kprobes
 __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
unsigned int flags, struct task_struct *tsk)
 {
struct vm_area_struct *vma;
-   int fault;
+   vm_fault_t fault;
 
vma = find_vma(mm, addr);
fault = VM_FAULT_BADMAP;
@@ -259,7 +259,8 @@ static inline bool access_error(unsigned int fsr, struct 
vm_area_struct *vma)
 {
struct task_struct *tsk;
struct mm_struct *mm;
-   int fault, sig, code;
+   int sig, code;
+   vm_fault_t fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
if (notify_page_fault(regs, fsr))
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2af3dd8..8da263b 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -371,12 +371,12 @@ static void do_bad_area(unsigned long addr, unsigned int 
esr, struct pt_regs *re
 #define VM_FAULT_BADMAP0x01
 #define VM_FAULT_BADACCESS 0x02
 
-static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
+static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
   unsigned int mm_flags, unsigned long vm_flags,
   struct task_struct *tsk)
 {
struct vm_area_struct *vma;
-   int fault;
+   vm_fault_t fault;
 
vma = find_vma(mm, addr);
fault = VM_FAULT_BADMAP;
@@ -419,7 +419,7 @@ static int __kprobes do_page_fault(unsigned long addr, 
unsigned int esr,
struct task_struct *tsk;
struct mm_struct *mm;
struct siginfo si;
-   int fault, major = 0;
+   vm_fault_t fault, major = 0;
unsigned long vm_flags = VM_READ | VM_WRITE;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_faul