Hi Christoph,

On 11.11.2021 07:50, Christoph Hellwig wrote:
> Add a helper to check if a potentially blocking operation should
> dip into the atomic pools.
>
> Signed-off-by: Christoph Hellwig <[email protected]>
> Reviewed-by: Robin Murphy <[email protected]>
> ---
>   kernel/dma/direct.c | 18 ++++++++++++------
>   1 file changed, 12 insertions(+), 6 deletions(-)
>
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 924937c54e8ab..d0a317ed8f029 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -156,6 +156,15 @@ static struct page *__dma_direct_alloc_pages(struct 
> device *dev, size_t size,
>       return page;
>   }
>   
> +/*
> + * Check if a potentially blocking operations needs to dip into the atomic
> + * pools for the given device/gfp.
> + */
> +static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
> +{
> +     return gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
This should be:

return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);

otherwise all dma allocations fail badly on ARM64, what happens on today's 
linux-next (plenty of "Failed to get suitable pool for XYZ" messages).

Do you want me to send a fixup patch or would you simply fix it in your tree?

> +}
> +
>   static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
>               dma_addr_t *dma_handle, gfp_t gfp)
>   {
> @@ -235,8 +244,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>                */
>               remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
>               if (remap) {
> -                     if (!gfpflags_allow_blocking(gfp) &&
> -                         !is_swiotlb_for_alloc(dev))
> +                     if (dma_direct_use_pool(dev, gfp))
>                               return dma_direct_alloc_from_pool(dev, size,
>                                               dma_handle, gfp);
>               } else {
> @@ -250,8 +258,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
>        * Decrypting memory may block, so allocate the memory from the atomic
>        * pools if we can't block.
>        */
> -     if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
> -         !is_swiotlb_for_alloc(dev))
> +     if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
>               return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
>   
>       /* we always manually zero the memory once we are done */
> @@ -360,8 +367,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, 
> size_t size,
>       struct page *page;
>       void *ret;
>   
> -     if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
> -         !is_swiotlb_for_alloc(dev))
> +     if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
>               return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
>   
>       page = __dma_direct_alloc_pages(dev, size, gfp);

Best regards
-- 
Marek Szyprowski, PhD
Samsung R&D Institute Poland

_______________________________________________
iommu mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to