On Thu, May 08, 2025 at 08:02:35PM -0700, Nicolin Chen wrote:
> +int _iommufd_alloc_mmap(struct iommufd_ctx *ictx, struct iommufd_object 
> *owner,
> +                     phys_addr_t base, size_t length, unsigned long *offset)
> +{
> +     struct iommufd_mmap *immap;
> +     unsigned long startp;
> +     int num_pfns, rc;
> +
> +     if (WARN_ON_ONCE(!offset))
> +             return -EINVAL;

We don't need checks like this, just let it oops

> +     if (!PAGE_ALIGNED(base))
> +             return -EINVAL;
> +     if (!length || !PAGE_ALIGNED(length))
> +             return -EINVAL;
> +     num_pfns = length >> PAGE_SHIFT;
> +
> +     immap = kzalloc(sizeof(*immap), GFP_KERNEL);
> +     if (!immap)
> +             return -ENOMEM;
> +     immap->owner = owner;
> +     immap->base_pfn = base >> PAGE_SHIFT;

'base' is a confusing name for the mmio address. Call it mmio_pfn or something

> +static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
> +{
> +     struct iommufd_ctx *ictx = filp->private_data;
> +     size_t length = vma->vm_end - vma->vm_start;
> +     struct iommufd_mmap *immap;
> +     int rc;
> +
> +     if (!PAGE_ALIGNED(length))
> +             return -EINVAL;
> +     if (!(vma->vm_flags & VM_SHARED))
> +             return -EINVAL;
> +     if (vma->vm_flags & VM_EXEC)
> +             return -EPERM;
> +
> +     /* vma->vm_pgoff carries an index to an mtree entry (immap) */
> +     immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff);
> +     if (!immap)
> +             return -ENXIO;
> +     if (length >> PAGE_SHIFT != immap->num_pfns)
> +             return -ENXIO;

This needs to validate that vm_pgoff is at the start of the immap or
num_pfns is the wrong thing to validate length against.

length >> PAGE_SHIFT will truncate non-zero bits which will not check
it properly.

> +     vma->vm_pgoff = 0;
> +     vma->vm_private_data = immap;
> +     vma->vm_ops = &iommufd_vma_ops;
> +     vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
> +     vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);

remap_pfn_range already sets these vm_flags

> +     rc = remap_pfn_range(vma, vma->vm_start, immap->base_pfn, length,
> +                          vma->vm_page_prot);

This shoudl be io_remap_pfn_range() if it is mmio

> +     if (!rc) /* vm_ops.open won't be called for mmap itself. */
> +             refcount_inc(&immap->owner->users);
> +     return rc;

Success oriented flow

Jason

Reply via email to