On Thu, Jan 30, 2025 at 04:57:39PM +1100, Alistair Popple wrote:
> On Wed, Jan 29, 2025 at 12:54:01PM +0100, David Hildenbrand wrote:

[...]

> > -int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
> > -                           unsigned long end, struct page **pages,
> > -                           void *owner)
> > +struct page *make_device_exclusive(struct mm_struct *mm, unsigned long 
> > addr,
> > +           void *owner, struct folio **foliop)
> >  {
> > -   long npages = (end - start) >> PAGE_SHIFT;
> > -   long i;
> > +   struct folio *folio;
> > +   struct page *page;
> > +   long npages;
> > +
> > +   mmap_assert_locked(mm);
> >  
> > -   npages = get_user_pages_remote(mm, start, npages,
> > +   /*
> > +    * Fault in the page writable and try to lock it; note that if the
> > +    * address would already be marked for exclusive use by the device,
> > +    * the GUP call would undo that first by triggering a fault.
> > +    */
> > +   npages = get_user_pages_remote(mm, addr, 1,
> >                                    FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD,
> > -                                  pages, NULL);
> > -   if (npages < 0)
> > -           return npages;
> > -
> > -   for (i = 0; i < npages; i++, start += PAGE_SIZE) {
> > -           struct folio *folio = page_folio(pages[i]);
> > -           if (PageTail(pages[i]) || !folio_trylock(folio)) {
> > -                   folio_put(folio);
> > -                   pages[i] = NULL;
> > -                   continue;
> > -           }
> > +                                  &page, NULL);
> > +   if (npages != 1)
> > +           return ERR_PTR(npages);
> > +   folio = page_folio(page);
> >  
> > -           if (!folio_make_device_exclusive(folio, mm, start, owner)) {
> > -                   folio_unlock(folio);
> > -                   folio_put(folio);
> > -                   pages[i] = NULL;
> > -           }
> > +   if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) {
> > +           folio_put(folio);
> > +           return ERR_PTR(-EOPNOTSUPP);
> > +   }
> > +
> > +   if (!folio_trylock(folio)) {

Actually I think we can make this folio_lock(folio) now. The only reason for
the trylock was to avoid deadlock between other threads looping over a range
of folios while holding folio locks which is something the migration code also
does.

 - Alistair

> > +           folio_put(folio);
> > +           return ERR_PTR(-EBUSY);
> >     }
> >  
> > -   return npages;
> > +   if (!folio_make_device_exclusive(folio, mm, addr, owner)) {
> > +           folio_unlock(folio);
> > +           folio_put(folio);
> > +           return ERR_PTR(-EBUSY);
> > +   }
> > +   *foliop = folio;
> > +   return page;
> >  }
> > -EXPORT_SYMBOL_GPL(make_device_exclusive_range);
> > +EXPORT_SYMBOL_GPL(make_device_exclusive);
> >  #endif
> >  
> >  void __put_anon_vma(struct anon_vma *anon_vma)
> > -- 
> > 2.48.1
> > 
> 

Reply via email to