From: Leon Romanovsky <[email protected]>
HMM mirroring can work on coherent systems without SWIOTLB path only.
Until introduction of DMA_ATTR_REQUIRE_COHERENT, there was no reliable
way to indicate that and various approximation was done:
int hmm_dma_map_alloc(struct device *dev, struct hmm_dma_map *map,
size_t nr_entries, size_t dma_entry_size)
{
<...>
/*
* The HMM API violates our normal DMA buffer ownership rules and can't
* transfer buffer ownership. The dma_addressing_limited() check is a
* best approximation to ensure no swiotlb buffering happens.
*/
dma_need_sync = !dev->dma_skip_sync;
if (dma_need_sync || dma_addressing_limited(dev))
return -EOPNOTSUPP;
So let's mark mapped buffers with DMA_ATTR_REQUIRE_COHERENT attribute
to prevent DMA debugging warnings for cache overlapped entries.
Signed-off-by: Leon Romanovsky <[email protected]>
---
mm/hmm.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/hmm.c b/mm/hmm.c
index f6c4ddff4bd61..5955f2f0c83db 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -778,7 +778,7 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct
hmm_dma_map *map,
struct page *page = hmm_pfn_to_page(pfns[idx]);
phys_addr_t paddr = hmm_pfn_to_phys(pfns[idx]);
size_t offset = idx * map->dma_entry_size;
- unsigned long attrs = 0;
+ unsigned long attrs = DMA_ATTR_REQUIRE_COHERENT;
dma_addr_t dma_addr;
int ret;
@@ -871,7 +871,7 @@ bool hmm_dma_unmap_pfn(struct device *dev, struct
hmm_dma_map *map, size_t idx)
struct dma_iova_state *state = &map->state;
dma_addr_t *dma_addrs = map->dma_list;
unsigned long *pfns = map->pfn_list;
- unsigned long attrs = 0;
+ unsigned long attrs = DMA_ATTR_REQUIRE_COHERENT;
if ((pfns[idx] & valid_dma) != valid_dma)
return false;
--
2.53.0