When dma_direct_alloc_pages encounters a highmem page it just gives up
currently.  But what we really should do is to try memory using the
page allocator instead - without this platforms with a global highmem
CMA pool will fail all dma_alloc_pages allocations.

Fixes: efa70f2fdc84 ("dma-mapping: add a new dma_alloc_pages API")
Reported-by: Mark O'Neill <m...@tumblingdice.co.uk>
Signed-off-by: Christoph Hellwig <h...@lst.de>
---
 kernel/dma/direct.c | 27 ++++++++++-----------------
 1 file changed, 10 insertions(+), 17 deletions(-)

diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 9743c6ccce1a9..3e7f4aab740e4 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -115,7 +115,7 @@ static struct page *dma_direct_alloc_swiotlb(struct device 
*dev, size_t size)
 }
 
 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
-               gfp_t gfp)
+               gfp_t gfp, bool allow_highmem)
 {
        int node = dev_to_node(dev);
        struct page *page = NULL;
@@ -129,9 +129,12 @@ static struct page *__dma_direct_alloc_pages(struct device 
*dev, size_t size,
        gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
                                           &phys_limit);
        page = dma_alloc_contiguous(dev, size, gfp);
-       if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
-               dma_free_contiguous(dev, page, size);
-               page = NULL;
+       if (page) {
+               if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
+                   (!allow_highmem && PageHighMem(page))) {
+                       dma_free_contiguous(dev, page, size);
+                       page = NULL;
+               }
        }
 again:
        if (!page)
@@ -189,7 +192,7 @@ static void *dma_direct_alloc_no_mapping(struct device 
*dev, size_t size,
 {
        struct page *page;
 
-       page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
+       page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
        if (!page)
                return NULL;
 
@@ -262,7 +265,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
                return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
        /* we always manually zero the memory once we are done */
-       page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
+       page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
        if (!page)
                return NULL;
 
@@ -370,19 +373,9 @@ struct page *dma_direct_alloc_pages(struct device *dev, 
size_t size,
        if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
                return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
-       page = __dma_direct_alloc_pages(dev, size, gfp);
+       page = __dma_direct_alloc_pages(dev, size, gfp, false);
        if (!page)
                return NULL;
-       if (PageHighMem(page)) {
-               /*
-                * Depending on the cma= arguments and per-arch setup
-                * dma_alloc_contiguous could return highmem pages.
-                * Without remapping there is no way to return them here,
-                * so log an error and fail.
-                */
-               dev_info(dev, "Rejecting highmem page from CMA.\n");
-               goto out_free_pages;
-       }
 
        ret = page_address(page);
        if (dma_set_decrypted(dev, ret, size))
-- 
2.30.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to