When device is a virtio device, use virtio's DMA interface.
Signed-off-by: Xuan Zhuo <[email protected]>
---
net/xdp/xsk_buff_pool.c | 59 +++++++++++++++++++++++++++++++----------
1 file changed, 45 insertions(+), 14 deletions(-)
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 78e325e195fa..e2785aca8396 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -3,6 +3,7 @@
#include <net/xsk_buff_pool.h>
#include <net/xdp_sock.h>
#include <net/xdp_sock_drv.h>
+#include <linux/virtio.h>
#include "xsk_queue.h"
#include "xdp_umem.h"
@@ -334,8 +335,12 @@ static void __xp_dma_unmap(struct xsk_dma_map *dma_map,
unsigned long attrs)
dma = &dma_map->dma_pages[i];
if (*dma) {
*dma &= ~XSK_NEXT_PG_CONTIG_MASK;
- dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
- DMA_BIDIRECTIONAL, attrs);
+ if (is_virtio_device(dma_map->dev))
+ virtio_dma_unmap(dma_map->dev, *dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ else
+ dma_unmap_page_attrs(dma_map->dev, *dma,
PAGE_SIZE,
+ DMA_BIDIRECTIONAL, attrs);
*dma = 0;
}
}
@@ -435,22 +440,40 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device
*dev,
return 0;
}
- pool->dma_sync_for_cpu = dma_sync_for_cpu;
- pool->dma_sync_for_device = dma_sync_for_device;
+ if (is_virtio_device(dev)) {
+ pool->dma_sync_for_cpu = virtio_dma_sync_signle_range_for_cpu;
+ pool->dma_sync_for_device =
virtio_dma_sync_signle_range_for_device;
+
+ } else {
+ pool->dma_sync_for_cpu = dma_sync_for_cpu;
+ pool->dma_sync_for_device = dma_sync_for_device;
+ }
dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
if (!dma_map)
return -ENOMEM;
for (i = 0; i < dma_map->dma_pages_cnt; i++) {
- dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL, attrs);
- if (dma_mapping_error(dev, dma)) {
- __xp_dma_unmap(dma_map, attrs);
- return -ENOMEM;
+ if (is_virtio_device(dev)) {
+ dma = virtio_dma_map_page(dev, pages[i], 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (virtio_dma_mapping_error(dev, dma))
+ goto err;
+
+ if (virtio_dma_need_sync(dev, dma))
+ dma_map->dma_need_sync = true;
+
+ } else {
+ dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, attrs);
+
+ if (dma_mapping_error(dev, dma))
+ goto err;
+
+ if (dma_need_sync(dev, dma))
+ dma_map->dma_need_sync = true;
}
- if (dma_need_sync(dev, dma))
- dma_map->dma_need_sync = true;
dma_map->dma_pages[i] = dma;
}
@@ -464,6 +487,9 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device
*dev,
}
return 0;
+err:
+ __xp_dma_unmap(dma_map, attrs);
+ return -ENOMEM;
}
EXPORT_SYMBOL(xp_dma_map);
@@ -546,9 +572,14 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
xskb->xdp.data_meta = xskb->xdp.data;
if (pool->dma_need_sync) {
- dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
- pool->frame_len,
- DMA_BIDIRECTIONAL);
+ if (is_virtio_device(pool->dev))
+ virtio_dma_sync_signle_range_for_device(pool->dev,
xskb->dma, 0,
+ pool->frame_len,
+
DMA_BIDIRECTIONAL);
+ else
+ dma_sync_single_range_for_device(pool->dev, xskb->dma,
0,
+ pool->frame_len,
+ DMA_BIDIRECTIONAL);
}
return &xskb->xdp;
}
--
2.32.0.3.g01195cf9f
_______________________________________________
Virtualization mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/virtualization