On Tue, Mar 17, 2026 at 02:32:16PM -0700, Suren Baghdasaryan wrote:
> On Tue, Mar 17, 2026 at 2:26 PM Suren Baghdasaryan <[email protected]> wrote:
> >
> > On Mon, Mar 16, 2026 at 2:14 PM Lorenzo Stoakes (Oracle) <[email protected]> 
> > wrote:
> > >
> > > The f_op->mmap interface is deprecated, so update driver to use its
> > > successor, mmap_prepare.
> > >
> > > The driver previously used vm_iomap_memory(), so this change replaces it
> > > with its mmap_prepare equivalent, mmap_action_simple_ioremap().
> > >
> > > Functions that wrap mmap() are also converted to wrap mmap_prepare()
> > > instead.
> > >
> > > Also update the documentation accordingly.
> > >
> > > Signed-off-by: Lorenzo Stoakes (Oracle) <[email protected]>
> > > ---
> > >  Documentation/driver-api/vme.rst    |  2 +-
> > >  drivers/staging/vme_user/vme.c      | 20 +++++------
> > >  drivers/staging/vme_user/vme.h      |  2 +-
> > >  drivers/staging/vme_user/vme_user.c | 51 +++++++++++++++++------------
> > >  4 files changed, 42 insertions(+), 33 deletions(-)
> > >
> > > diff --git a/Documentation/driver-api/vme.rst 
> > > b/Documentation/driver-api/vme.rst
> > > index c0b475369de0..7111999abc14 100644
> > > --- a/Documentation/driver-api/vme.rst
> > > +++ b/Documentation/driver-api/vme.rst
> > > @@ -107,7 +107,7 @@ The function :c:func:`vme_master_read` can be used to 
> > > read from and
> > >
> > >  In addition to simple reads and writes, :c:func:`vme_master_rmw` is 
> > > provided to
> > >  do a read-modify-write transaction. Parts of a VME window can also be 
> > > mapped
> > > -into user space memory using :c:func:`vme_master_mmap`.
> > > +into user space memory using :c:func:`vme_master_mmap_prepare`.
> > >
> > >
> > >  Slave windows
> > > diff --git a/drivers/staging/vme_user/vme.c 
> > > b/drivers/staging/vme_user/vme.c
> > > index f10a00c05f12..7220aba7b919 100644
> > > --- a/drivers/staging/vme_user/vme.c
> > > +++ b/drivers/staging/vme_user/vme.c
> > > @@ -735,9 +735,9 @@ unsigned int vme_master_rmw(struct vme_resource 
> > > *resource, unsigned int mask,
> > >  EXPORT_SYMBOL(vme_master_rmw);
> > >
> > >  /**
> > > - * vme_master_mmap - Mmap region of VME master window.
> > > + * vme_master_mmap_prepare - Mmap region of VME master window.
> > >   * @resource: Pointer to VME master resource.
> > > - * @vma: Pointer to definition of user mapping.
> > > + * @desc: Pointer to descriptor of user mapping.
> > >   *
> > >   * Memory map a region of the VME master window into user space.
> > >   *
> > > @@ -745,12 +745,13 @@ EXPORT_SYMBOL(vme_master_rmw);
> > >   *         resource or -EFAULT if map exceeds window size. Other generic 
> > > mmap
> > >   *         errors may also be returned.
> > >   */
> > > -int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct 
> > > *vma)
> > > +int vme_master_mmap_prepare(struct vme_resource *resource,
> > > +                           struct vm_area_desc *desc)
> > >  {
> > > +       const unsigned long vma_size = vma_desc_size(desc);
> > >         struct vme_bridge *bridge = find_bridge(resource);
> > >         struct vme_master_resource *image;
> > >         phys_addr_t phys_addr;
> > > -       unsigned long vma_size;
> > >
> > >         if (resource->type != VME_MASTER) {
> > >                 dev_err(bridge->parent, "Not a master resource\n");
> > > @@ -758,19 +759,18 @@ int vme_master_mmap(struct vme_resource *resource, 
> > > struct vm_area_struct *vma)
> > >         }
> > >
> > >         image = list_entry(resource->entry, struct vme_master_resource, 
> > > list);
> > > -       phys_addr = image->bus_resource.start + (vma->vm_pgoff << 
> > > PAGE_SHIFT);
> > > -       vma_size = vma->vm_end - vma->vm_start;
> > > +       phys_addr = image->bus_resource.start + (desc->pgoff << 
> > > PAGE_SHIFT);
> > >
> > >         if (phys_addr + vma_size > image->bus_resource.end + 1) {
> > >                 dev_err(bridge->parent, "Map size cannot exceed the 
> > > window size\n");
> > >                 return -EFAULT;
> > >         }
> > >
> > > -       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
> > > -
> > > -       return vm_iomap_memory(vma, phys_addr, vma->vm_end - 
> > > vma->vm_start);
> > > +       desc->page_prot = pgprot_noncached(desc->page_prot);
> > > +       mmap_action_simple_ioremap(desc, phys_addr, vma_size);
> > > +       return 0;
> > >  }
> > > -EXPORT_SYMBOL(vme_master_mmap);
> > > +EXPORT_SYMBOL(vme_master_mmap_prepare);
> > >
> > >  /**
> > >   * vme_master_free - Free VME master window
> > > diff --git a/drivers/staging/vme_user/vme.h 
> > > b/drivers/staging/vme_user/vme.h
> > > index 797e9940fdd1..b6413605ea49 100644
> > > --- a/drivers/staging/vme_user/vme.h
> > > +++ b/drivers/staging/vme_user/vme.h
> > > @@ -151,7 +151,7 @@ ssize_t vme_master_read(struct vme_resource 
> > > *resource, void *buf, size_t count,
> > >  ssize_t vme_master_write(struct vme_resource *resource, void *buf, 
> > > size_t count, loff_t offset);
> > >  unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int 
> > > mask, unsigned int compare,
> > >                             unsigned int swap, loff_t offset);
> > > -int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct 
> > > *vma);
> > > +int vme_master_mmap_prepare(struct vme_resource *resource, struct 
> > > vm_area_desc *desc);
> > >  void vme_master_free(struct vme_resource *resource);
> > >
> > >  struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route);
> > > diff --git a/drivers/staging/vme_user/vme_user.c 
> > > b/drivers/staging/vme_user/vme_user.c
> > > index d95dd7d9190a..11e25c2f6b0a 100644
> > > --- a/drivers/staging/vme_user/vme_user.c
> > > +++ b/drivers/staging/vme_user/vme_user.c
> > > @@ -446,24 +446,14 @@ static void vme_user_vm_close(struct vm_area_struct 
> > > *vma)
> > >         kfree(vma_priv);
> > >  }
> > >
> > > -static const struct vm_operations_struct vme_user_vm_ops = {
> > > -       .open = vme_user_vm_open,
> > > -       .close = vme_user_vm_close,
> > > -};
> > > -
> > > -static int vme_user_master_mmap(unsigned int minor, struct 
> > > vm_area_struct *vma)
> > > +static int vme_user_vm_mapped(unsigned long start, unsigned long end, 
> > > pgoff_t pgoff,
> > > +                             const struct file *file, void 
> > > **vm_private_data)
> > >  {
> > > -       int err;
> > > +       const unsigned int minor = iminor(file_inode(file));
> > >         struct vme_user_vma_priv *vma_priv;
> > >
> > >         mutex_lock(&image[minor].mutex);
> > >
> > > -       err = vme_master_mmap(image[minor].resource, vma);
> > > -       if (err) {
> > > -               mutex_unlock(&image[minor].mutex);
> > > -               return err;
> > > -       }
> > > -
> >
> > Ok, this changes the set of the operations performed under 
> > image[minor].mutex.
> > Before we had:
> >
> > mutex_lock(&image[minor].mutex);
> > vme_master_mmap();
> > <some final adjustments>
> > mutex_unlock(&image[minor].mutex);
> >
> > Now we have:
> >
> > mutex_lock(&image[minor].mutex);
> > vme_master_mmap_prepare()
> > mutex_unlock(&image[minor].mutex);
> > vm_iomap_memory();
> > mutex_lock(&image[minor].mutex);
> > vme_user_vm_mapped(); // <some final adjustments>
> > mutex_unlock(&image[minor].mutex);
> >
> > I think as long as image[minor] does not change while we are not
> > holding the mutex we should be safe, and looking at the code it seems
> > to be the case. But I'm not familiar with this driver and might be
> > wrong. Worth double-checking.

The file is pinned for the duration, the mutex is associated with the file,
so there's no sane world in which that could be problematic.

Keeping in mind that we manipulate stuff on vme_user_vm_close() that
directly acceses image[minor] at an arbitary time.

>
> A side note: if we had to hold the mutex across all those operations I
> think we would need to take the mutex in the vm_ops->mmap_prepare and
> add a vm_ops->map_failed hook or something along that line to drop the
> mutex in case mmap_action_complete() fails. Not sure if we will have
> such cases though...

No, I don't want to do this if it can be at all avoided. You should in
nearly any sane circumstance be able to defer things until the mapped hook
anyway.

Also a merge can happen too after an .mmap_prepare, so we'd have to have
some 'success' hook and I'm just not going there it'll end up open to abuse
again.

(We do have success and error filtering hooks right now, sadly, but they're
really for hugetlb and I plan to find a way to get rid of them).

The mmap_prepare is meant to essentially be as stateless as possible.

Anyway I don't think it's relevant here.

>
> >
> > >         vma_priv = kmalloc_obj(*vma_priv);
> > >         if (!vma_priv) {
> > >                 mutex_unlock(&image[minor].mutex);
> > > @@ -472,22 +462,41 @@ static int vme_user_master_mmap(unsigned int minor, 
> > > struct vm_area_struct *vma)
> > >
> > >         vma_priv->minor = minor;
> > >         refcount_set(&vma_priv->refcnt, 1);
> > > -       vma->vm_ops = &vme_user_vm_ops;
> > > -       vma->vm_private_data = vma_priv;
> > > -
> > > +       *vm_private_data = vma_priv;
> > >         image[minor].mmap_count++;
> > >
> > >         mutex_unlock(&image[minor].mutex);
> > > -
> > >         return 0;
> > >  }
> > >
> > > -static int vme_user_mmap(struct file *file, struct vm_area_struct *vma)
> > > +static const struct vm_operations_struct vme_user_vm_ops = {
> > > +       .mapped = vme_user_vm_mapped,
> > > +       .open = vme_user_vm_open,
> > > +       .close = vme_user_vm_close,
> > > +};
> > > +
> > > +static int vme_user_master_mmap_prepare(unsigned int minor,
> > > +                                       struct vm_area_desc *desc)
> > > +{
> > > +       int err;
> > > +
> > > +       mutex_lock(&image[minor].mutex);
> > > +
> > > +       err = vme_master_mmap_prepare(image[minor].resource, desc);
> > > +       if (!err)
> > > +               desc->vm_ops = &vme_user_vm_ops;
> > > +
> > > +       mutex_unlock(&image[minor].mutex);
> > > +       return err;
> > > +}
> > > +
> > > +static int vme_user_mmap_prepare(struct vm_area_desc *desc)
> > >  {
> > > -       unsigned int minor = iminor(file_inode(file));
> > > +       const struct file *file = desc->file;
> > > +       const unsigned int minor = iminor(file_inode(file));
> > >
> > >         if (type[minor] == MASTER_MINOR)
> > > -               return vme_user_master_mmap(minor, vma);
> > > +               return vme_user_master_mmap_prepare(minor, desc);
> > >
> > >         return -ENODEV;
> > >  }
> > > @@ -498,7 +507,7 @@ static const struct file_operations vme_user_fops = {
> > >         .llseek = vme_user_llseek,
> > >         .unlocked_ioctl = vme_user_unlocked_ioctl,
> > >         .compat_ioctl = compat_ptr_ioctl,
> > > -       .mmap = vme_user_mmap,
> > > +       .mmap_prepare = vme_user_mmap_prepare,
> > >  };
> > >
> > >  static int vme_user_match(struct vme_dev *vdev)
> > > --
> > > 2.53.0
> > >

Cheers, Lorenzo

Reply via email to