> +#ifdef CONFIG_RISCV_DMA_NONCOHERENT
> +struct riscv_dma_cache_sync {
> +     void (*cache_invalidate)(phys_addr_t paddr, size_t size);
> +     void (*cache_clean)(phys_addr_t paddr, size_t size);
> +     void (*cache_flush)(phys_addr_t paddr, size_t size);
> +};
> +
> +void riscv_dma_cache_sync_set(struct riscv_dma_cache_sync *ops);
> +#endif

As told a bunch of times before: doing indirect calls here is a
performance nightmare.  Use something that actually does perform
horribly like alternatives.  Or even delay implementing that until
we need it and do a plain direct call for now.

static void __dma_sync(phys_addr_t paddr, size_t size, enum dma_data_direction 
dir)
> +{
> +     if ((dir == DMA_FROM_DEVICE) && (dma_cache_sync->cache_invalidate))
> +             dma_cache_sync->cache_invalidate(paddr, size);
> +     else if ((dir == DMA_TO_DEVICE) && (dma_cache_sync->cache_clean))
> +             dma_cache_sync->cache_clean(paddr, size);
> +     else if ((dir == DMA_BIDIRECTIONAL) && dma_cache_sync->cache_flush)
> +             dma_cache_sync->cache_flush(paddr, size);
> +}

The seletion of flush types is completely broken.  Take a look at the
comment in arch/arc/mm/dma.c above arch_sync_dma_for_device for a good
explanation.

> +void arch_dma_prep_coherent(struct page *page, size_t size)
> +{
> +     void *flush_addr = page_address(page);
> +
> +     memset(flush_addr, 0, size);

arch_dma_prep_coherent is not supposed to modify the content of
the data.
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to