From: Barry Song <v-songbao...@oppo.com>

mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
__GFP_NOFAIL without direct reclamation may just result in a busy
loop within non-sleepable contexts.

static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                                struct alloc_context *ac)
{
        ...
        /*
         * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
         * we always retry
         */
        if (gfp_mask & __GFP_NOFAIL) {
                /*
                 * All existing users of the __GFP_NOFAIL are blockable, so warn
                 * of any new users that actually require GFP_NOWAIT
                 */
                if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
                        goto fail;
                ...
        }
        ...
fail:
        warn_alloc(gfp_mask, ac->nodemask,
                        "page allocation failure: order:%u", order);
got_pg:
        return page;
}

Let's move the memory allocation out of the atomic context and use
the normal sleepable context to get pages.

[RFC]: This has only been compile-tested; I'd prefer if the VDPA maintainers
handles it.

Cc: "Michael S. Tsirkin" <m...@redhat.com>
Cc: Jason Wang <jasow...@redhat.com>
Cc: Xuan Zhuo <xuanz...@linux.alibaba.com>
Cc: "Eugenio Pérez" <epere...@redhat.com>
Cc: Maxime Coquelin <maxime.coque...@redhat.com>
Signed-off-by: Barry Song <v-songbao...@oppo.com>
---
 drivers/vdpa/vdpa_user/iova_domain.c | 24 ++++++++++++++++++++----
 1 file changed, 20 insertions(+), 4 deletions(-)

diff --git a/drivers/vdpa/vdpa_user/iova_domain.c 
b/drivers/vdpa/vdpa_user/iova_domain.c
index 791d38d6284c..eff700e5f7a2 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -287,28 +287,44 @@ void vduse_domain_remove_user_bounce_pages(struct 
vduse_iova_domain *domain)
 {
        struct vduse_bounce_map *map;
        unsigned long i, count;
+       struct page **pages = NULL;
 
        write_lock(&domain->bounce_lock);
        if (!domain->user_bounce_pages)
                goto out;
-
        count = domain->bounce_size >> PAGE_SHIFT;
+       write_unlock(&domain->bounce_lock);
+
+       pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
+       for (i = 0; i < count; i++)
+               pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
+
+       write_lock(&domain->bounce_lock);
+       if (!domain->user_bounce_pages) {
+               for (i = 0; i < count; i++)
+                       put_page(pages[i]);
+               kfree(pages);
+               goto out;
+       }
+
        for (i = 0; i < count; i++) {
-               struct page *page = NULL;
+               struct page *page = pages[i];
 
                map = &domain->bounce_maps[i];
-               if (WARN_ON(!map->bounce_page))
+               if (WARN_ON(!map->bounce_page)) {
+                       put_page(page);
                        continue;
+               }
 
                /* Copy user page to kernel page if it's in use */
                if (map->orig_phys != INVALID_PHYS_ADDR) {
-                       page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
                        memcpy_from_page(page_address(page),
                                         map->bounce_page, 0, PAGE_SIZE);
                }
                put_page(map->bounce_page);
                map->bounce_page = page;
        }
+       kfree(pages);
        domain->user_bounce_pages = false;
 out:
        write_unlock(&domain->bounce_lock);
-- 
2.34.1


Reply via email to