On Tue, Jun 11, 2019 at 11:17:30AM -0400, Masayoshi Mizuma wrote:
> --- a/arch/arm64/mm/dma-mapping.c
> +++ b/arch/arm64/mm/dma-mapping.c
> @@ -91,10 +91,6 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
>  
>  static int __init arm64_dma_init(void)
>  {
> -     WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
> -                TAINT_CPU_OUT_OF_SPEC,
> -                "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
> -                ARCH_DMA_MINALIGN, cache_line_size());
>       return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC));
>  }
>  arch_initcall(arm64_dma_init);
> @@ -473,6 +469,11 @@ void arch_setup_dma_ops(struct device *dev, u64 
> dma_base, u64 size,
>                       const struct iommu_ops *iommu, bool coherent)
>  {
>       dev->dma_coherent = coherent;
> +
> +     if (!coherent && (cache_line_size() > ARCH_DMA_MINALIGN))
> +             dev_WARN(dev, "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < 
> %d)",
> +                             ARCH_DMA_MINALIGN, cache_line_size());

I'm ok in principle with this patch, with the minor issue that since
commit 7b8c87b297a7 ("arm64: cacheinfo: Update cache_line_size detected
from DT or PPTT") queued for 5.3 cache_line_size() gets the information
from DT or ACPI. The reason for this change is that the information is
used for performance tuning rather than DMA coherency.

You can go for a direct cache_type_cwg() check in here, unless Robin
(cc'ed) has a better idea.

-- 
Catalin

Reply via email to