Commit-ID:  51c7eeba7975c1d2a02eefd00ece6de25176f5f3
Gitweb:     https://git.kernel.org/tip/51c7eeba7975c1d2a02eefd00ece6de25176f5f3
Author:     Christoph Hellwig <h...@lst.de>
AuthorDate: Mon, 19 Mar 2018 11:38:18 +0100
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Tue, 20 Mar 2018 10:01:57 +0100

x86/dma/amd_gart: Use dma_direct_{alloc,free}()

This gains support for CMA allocations for the force_iommu case, and
cleans up the code a bit.

Tested-by: Tom Lendacky <thomas.lenda...@amd.com>
Signed-off-by: Christoph Hellwig <h...@lst.de>
Reviewed-by: Thomas Gleixner <t...@linutronix.de>
Cc: David Woodhouse <dw...@infradead.org>
Cc: Joerg Roedel <j...@8bytes.org>
Cc: Jon Mason <jdma...@kudzu.us>
Cc: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Muli Ben-Yehuda <mu...@mulix.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: io...@lists.linux-foundation.org
Link: http://lkml.kernel.org/r/20180319103826.12853-7-...@lst.de
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/kernel/amd_gart_64.c | 36 ++++++++++++++----------------------
 1 file changed, 14 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 79ac6caaaabb..f299d8a479bb 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -480,29 +480,21 @@ static void *
 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
                    gfp_t flag, unsigned long attrs)
 {
-       dma_addr_t paddr;
-       unsigned long align_mask;
-       struct page *page;
-
-       if (force_iommu && dev->coherent_dma_mask > DMA_BIT_MASK(24)) {
-               flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
-               page = alloc_pages(flag | __GFP_ZERO, get_order(size));
-               if (!page)
-                       return NULL;
-
-               align_mask = (1UL << get_order(size)) - 1;
-               paddr = dma_map_area(dev, page_to_phys(page), size,
-                                    DMA_BIDIRECTIONAL, align_mask);
-
-               flush_gart();
-               if (paddr != bad_dma_addr) {
-                       *dma_addr = paddr;
-                       return page_address(page);
-               }
-               __free_pages(page, get_order(size));
-       } else
-               return dma_direct_alloc(dev, size, dma_addr, flag, attrs);
+       void *vaddr;
+
+       vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
+       if (!vaddr ||
+           !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
+               return vaddr;
 
+       *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
+                       DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
+       flush_gart();
+       if (unlikely(*dma_addr == bad_dma_addr))
+               goto out_free;
+       return vaddr;
+out_free:
+       dma_direct_free(dev, size, vaddr, *dma_addr, attrs);
        return NULL;
 }
 

Reply via email to