Use callback to implement dma sync to simplify subsequent support for
virtio dma sync.

Signed-off-by: Xuan Zhuo <[email protected]>
---
 include/net/xsk_buff_pool.h |  6 ++++++
 net/xdp/xsk_buff_pool.c     | 24 ++++++++++++++++++++----
 2 files changed, 26 insertions(+), 4 deletions(-)

diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 3e952e569418..53b681120354 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -75,6 +75,12 @@ struct xsk_buff_pool {
        u32 chunk_size;
        u32 chunk_shift;
        u32 frame_len;
+       void (*dma_sync_for_cpu)(struct device *dev, dma_addr_t addr,
+                                unsigned long offset, size_t size,
+                                enum dma_data_direction dir);
+       void (*dma_sync_for_device)(struct device *dev, dma_addr_t addr,
+                                   unsigned long offset, size_t size,
+                                   enum dma_data_direction dir);
        u8 cached_need_wakeup;
        bool uses_need_wakeup;
        bool dma_need_sync;
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index ed6c71826d31..78e325e195fa 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -403,6 +403,20 @@ static int xp_init_dma_info(struct xsk_buff_pool *pool, 
struct xsk_dma_map *dma_
        return 0;
 }
 
+static void dma_sync_for_cpu(struct device *dev, dma_addr_t addr,
+                            unsigned long offset, size_t size,
+                            enum dma_data_direction dir)
+{
+       dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
+}
+
+static void dma_sync_for_device(struct device *dev, dma_addr_t addr,
+                               unsigned long offset, size_t size,
+                               enum dma_data_direction dir)
+{
+       dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+}
+
 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
               unsigned long attrs, struct page **pages, u32 nr_pages)
 {
@@ -421,6 +435,9 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device 
*dev,
                return 0;
        }
 
+       pool->dma_sync_for_cpu = dma_sync_for_cpu;
+       pool->dma_sync_for_device = dma_sync_for_device;
+
        dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
        if (!dma_map)
                return -ENOMEM;
@@ -667,15 +684,14 @@ EXPORT_SYMBOL(xp_raw_get_dma);
 
 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb)
 {
-       dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0,
-                                     xskb->pool->frame_len, DMA_BIDIRECTIONAL);
+       xskb->pool->dma_sync_for_cpu(xskb->pool->dev, xskb->dma, 0,
+                                    xskb->pool->frame_len, DMA_BIDIRECTIONAL);
 }
 EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow);
 
 void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
                                 size_t size)
 {
-       dma_sync_single_range_for_device(pool->dev, dma, 0,
-                                        size, DMA_BIDIRECTIONAL);
+       pool->dma_sync_for_device(pool->dev, dma, 0, size, DMA_BIDIRECTIONAL);
 }
 EXPORT_SYMBOL(xp_dma_sync_for_device_slow);
-- 
2.32.0.3.g01195cf9f

_______________________________________________
Virtualization mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to