We can do it here to, in the same way x86_64 does. Signed-off-by: Glauber Costa <[EMAIL PROTECTED]> --- arch/x86/kernel/pci-dma_32.c | 27 ++++++++++++++++++++++----- 1 files changed, 22 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c index 5ae3470..0d630ae 100644 --- a/arch/x86/kernel/pci-dma_32.c +++ b/arch/x86/kernel/pci-dma_32.c @@ -48,10 +48,23 @@ static int dma_release_coherent(struct device *dev, int order, void *vaddr) return 0; } +/* Allocate DMA memory on node near device */ +noinline struct page * +dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) +{ + int node; + + node = dev_to_node(dev); + + return alloc_pages_node(node, gfp, order); +} + void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { void *ret = NULL; + struct page *page; + dma_addr_t bus; int order = get_order(size); /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); @@ -62,12 +75,16 @@ void *dma_alloc_coherent(struct device *dev, size_t size, if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) gfp |= GFP_DMA; - ret = (void *)__get_free_pages(gfp, order); + page = dma_alloc_pages(dev, gfp, order); + if (page == NULL) + return NULL; + + ret = page_address(page); + bus = page_to_phys(page); + + memset(ret, 0, size); + *dma_handle = bus; - if (ret != NULL) { - memset(ret, 0, size); - *dma_handle = virt_to_phys(ret); - } return ret; } EXPORT_SYMBOL(dma_alloc_coherent); -- 1.5.0.6 ------------------------------------------------------------------------- This SF.net email is sponsored by the 2008 JavaOne(SM) Conference Register now and save $200. Hurry, offer ends at 11:59 p.m., Monday, April 7! Use priority code J8TLD2. http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone _______________________________________________ kvm-devel mailing list kvm-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/kvm-devel