We must never unencryped memory go back into the general page pool. So if we fail to set it back to encrypted when freeing DMA memory, leak the memory insted and warn the user.
Signed-off-by: Christoph Hellwig <h...@lst.de> --- kernel/dma/direct.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 96f02248e7fa2..6673f7aebf787 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -264,9 +264,11 @@ void *dma_direct_alloc(struct device *dev, size_t size, return ret; out_encrypt_pages: - /* If memory cannot be re-encrypted, it must be leaked */ - if (dma_set_encrypted(dev, page_address(page), size)) + if (dma_set_encrypted(dev, page_address(page), size)) { + pr_warn_ratelimited( + "leaking DMA memory that can't be re-encrypted\n"); return NULL; + } out_free_pages: __dma_direct_free_pages(dev, page, size); return NULL; @@ -305,7 +307,11 @@ void dma_direct_free(struct device *dev, size_t size, dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) return; - dma_set_encrypted(dev, cpu_addr, 1 << page_order); + if (dma_set_encrypted(dev, cpu_addr, 1 << page_order)) { + pr_warn_ratelimited( + "leaking DMA memory that can't be re-encrypted\n"); + return; + } if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) vunmap(cpu_addr); @@ -363,7 +369,10 @@ void dma_direct_free_pages(struct device *dev, size_t size, dma_free_from_pool(dev, vaddr, size)) return; - dma_set_encrypted(dev, vaddr, 1 << page_order); + if (dma_set_encrypted(dev, vaddr, 1 << page_order)) { + pr_warn_ratelimited( + "leaking DMA memory that can't be re-encrypted\n"); + } __dma_direct_free_pages(dev, page, size); } -- 2.30.2 _______________________________________________ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu