Re: [PATCH v7 5/9] virtio_ring: Support DMA APIs

2016-02-03 Thread Andy Lutomirski
On Feb 3, 2016 5:52 AM, "Michael S. Tsirkin"  wrote:
>
> On Tue, Feb 02, 2016 at 09:46:36PM -0800, Andy Lutomirski wrote:
> > virtio_ring currently sends the device (usually a hypervisor)
> > physical addresses of its I/O buffers.  This is okay when DMA
> > addresses and physical addresses are the same thing, but this isn't
> > always the case.  For example, this never works on Xen guests, and
> > it is likely to fail if a physical "virtio" device ever ends up
> > behind an IOMMU or swiotlb.
> >
> > The immediate use case for me is to enable virtio on Xen guests.
> > For that to work, we need vring to support DMA address translation
> > as well as a corresponding change to virtio_pci or to another
> > driver.
> >
> > Signed-off-by: Andy Lutomirski 
> > ---
> >  drivers/virtio/Kconfig   |   2 +-
> >  drivers/virtio/virtio_ring.c | 200 
> > ---
> >  tools/virtio/linux/dma-mapping.h |  17 
> >  3 files changed, 183 insertions(+), 36 deletions(-)
> >  create mode 100644 tools/virtio/linux/dma-mapping.h
> >
> > diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
> > index cab9f3f63a38..77590320d44c 100644
> > --- a/drivers/virtio/Kconfig
> > +++ b/drivers/virtio/Kconfig
> > @@ -60,7 +60,7 @@ config VIRTIO_INPUT
> >
> >   config VIRTIO_MMIO
> >   tristate "Platform bus driver for memory mapped virtio devices"
> > - depends on HAS_IOMEM
> > + depends on HAS_IOMEM && HAS_DMA
> >   select VIRTIO
> >   ---help---
> >This drivers provides support for memory mapped virtio
>
> What's this chunk doing here btw? Should be part of the mmio patch?
>

IIRC it was deliberate.  Making virtio depend on HAS_DMA didn't work
right because kconfig doesn't propagate dependencies through select
intelligently.  This patch makes core virtio depend on HAS_DMA, so I
added the dependency here, too.

--Andy


Re: [PATCH v7 5/9] virtio_ring: Support DMA APIs

2016-02-03 Thread Michael S. Tsirkin
On Tue, Feb 02, 2016 at 09:46:36PM -0800, Andy Lutomirski wrote:
> virtio_ring currently sends the device (usually a hypervisor)
> physical addresses of its I/O buffers.  This is okay when DMA
> addresses and physical addresses are the same thing, but this isn't
> always the case.  For example, this never works on Xen guests, and
> it is likely to fail if a physical "virtio" device ever ends up
> behind an IOMMU or swiotlb.
> 
> The immediate use case for me is to enable virtio on Xen guests.
> For that to work, we need vring to support DMA address translation
> as well as a corresponding change to virtio_pci or to another
> driver.
> 
> Signed-off-by: Andy Lutomirski 
> ---
>  drivers/virtio/Kconfig   |   2 +-
>  drivers/virtio/virtio_ring.c | 200 
> ---
>  tools/virtio/linux/dma-mapping.h |  17 
>  3 files changed, 183 insertions(+), 36 deletions(-)
>  create mode 100644 tools/virtio/linux/dma-mapping.h
> 
> diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
> index cab9f3f63a38..77590320d44c 100644
> --- a/drivers/virtio/Kconfig
> +++ b/drivers/virtio/Kconfig
> @@ -60,7 +60,7 @@ config VIRTIO_INPUT
>  
>   config VIRTIO_MMIO
>   tristate "Platform bus driver for memory mapped virtio devices"
> - depends on HAS_IOMEM
> + depends on HAS_IOMEM && HAS_DMA
>   select VIRTIO
>   ---help---
>This drivers provides support for memory mapped virtio

What's this chunk doing here btw? Should be part of the mmio patch?

> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index ab0be6c084f6..9abc008ff7ea 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -24,6 +24,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  #ifdef DEBUG
>  /* For development, we want to crash whenever the ring is screwed. */
> @@ -54,6 +55,11 @@
>  #define END_USE(vq)
>  #endif
>  
> +struct vring_desc_state {
> + void *data; /* Data for callback. */
> + struct vring_desc *indir_desc;  /* Indirect descriptor, if any. */
> +};
> +
>  struct vring_virtqueue {
>   struct virtqueue vq;
>  
> @@ -98,8 +104,8 @@ struct vring_virtqueue {
>   ktime_t last_add_time;
>  #endif
>  
> - /* Tokens for callbacks. */
> - void *data[];
> + /* Per-descriptor state. */
> + struct vring_desc_state desc_state[];
>  };
>  
>  #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
> @@ -128,6 +134,79 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
>   return false;
>  }
>  
> +/*
> + * The DMA ops on various arches are rather gnarly right now, and
> + * making all of the arch DMA ops work on the vring device itself
> + * is a mess.  For now, we use the parent device for DMA ops.
> + */
> +struct device *vring_dma_dev(const struct vring_virtqueue *vq)
> +{
> + return vq->vq.vdev->dev.parent;
> +}
> +
> +/* Map one sg entry. */
> +static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
> +struct scatterlist *sg,
> +enum dma_data_direction direction)
> +{
> + if (!vring_use_dma_api(vq->vq.vdev))
> + return (dma_addr_t)sg_phys(sg);
> +
> + /*
> +  * We can't use dma_map_sg, because we don't use scatterlists in
> +  * the way it expects (we don't guarantee that the scatterlist
> +  * will exist for the lifetime of the mapping).
> +  */
> + return dma_map_page(vring_dma_dev(vq),
> + sg_page(sg), sg->offset, sg->length,
> + direction);
> +}
> +
> +static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
> +void *cpu_addr, size_t size,
> +enum dma_data_direction direction)
> +{
> + if (!vring_use_dma_api(vq->vq.vdev))
> + return (dma_addr_t)virt_to_phys(cpu_addr);
> +
> + return dma_map_single(vring_dma_dev(vq),
> +   cpu_addr, size, direction);
> +}
> +
> +static void vring_unmap_one(const struct vring_virtqueue *vq,
> + struct vring_desc *desc)
> +{
> + u16 flags;
> +
> + if (!vring_use_dma_api(vq->vq.vdev))
> + return;
> +
> + flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
> +
> + if (flags & VRING_DESC_F_INDIRECT) {
> + dma_unmap_single(vring_dma_dev(vq),
> +  virtio64_to_cpu(vq->vq.vdev, desc->addr),
> +  virtio32_to_cpu(vq->vq.vdev, desc->len),
> +  (flags & VRING_DESC_F_WRITE) ?
> +  DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + } else {
> + dma_unmap_page(vring_dma_dev(vq),
> +virtio64_to_cpu(vq->vq.vdev, desc->addr),
> +virtio32_to_cpu(vq->vq.vdev, desc->len),
> +(flags & 

Re: [PATCH v7 5/9] virtio_ring: Support DMA APIs

2016-02-03 Thread Michael S. Tsirkin
On Tue, Feb 02, 2016 at 09:46:36PM -0800, Andy Lutomirski wrote:
> virtio_ring currently sends the device (usually a hypervisor)
> physical addresses of its I/O buffers.  This is okay when DMA
> addresses and physical addresses are the same thing, but this isn't
> always the case.  For example, this never works on Xen guests, and
> it is likely to fail if a physical "virtio" device ever ends up
> behind an IOMMU or swiotlb.
> 
> The immediate use case for me is to enable virtio on Xen guests.
> For that to work, we need vring to support DMA address translation
> as well as a corresponding change to virtio_pci or to another
> driver.
> 
> Signed-off-by: Andy Lutomirski 
> ---
>  drivers/virtio/Kconfig   |   2 +-
>  drivers/virtio/virtio_ring.c | 200 
> ---
>  tools/virtio/linux/dma-mapping.h |  17 
>  3 files changed, 183 insertions(+), 36 deletions(-)
>  create mode 100644 tools/virtio/linux/dma-mapping.h
> 
> diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
> index cab9f3f63a38..77590320d44c 100644
> --- a/drivers/virtio/Kconfig
> +++ b/drivers/virtio/Kconfig
> @@ -60,7 +60,7 @@ config VIRTIO_INPUT
>  
>   config VIRTIO_MMIO
>   tristate "Platform bus driver for memory mapped virtio devices"
> - depends on HAS_IOMEM
> + depends on HAS_IOMEM && HAS_DMA
>   select VIRTIO
>   ---help---
>This drivers provides support for memory mapped virtio

What's this chunk doing here btw? Should be part of the mmio patch?

> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index ab0be6c084f6..9abc008ff7ea 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -24,6 +24,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  #ifdef DEBUG
>  /* For development, we want to crash whenever the ring is screwed. */
> @@ -54,6 +55,11 @@
>  #define END_USE(vq)
>  #endif
>  
> +struct vring_desc_state {
> + void *data; /* Data for callback. */
> + struct vring_desc *indir_desc;  /* Indirect descriptor, if any. */
> +};
> +
>  struct vring_virtqueue {
>   struct virtqueue vq;
>  
> @@ -98,8 +104,8 @@ struct vring_virtqueue {
>   ktime_t last_add_time;
>  #endif
>  
> - /* Tokens for callbacks. */
> - void *data[];
> + /* Per-descriptor state. */
> + struct vring_desc_state desc_state[];
>  };
>  
>  #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
> @@ -128,6 +134,79 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
>   return false;
>  }
>  
> +/*
> + * The DMA ops on various arches are rather gnarly right now, and
> + * making all of the arch DMA ops work on the vring device itself
> + * is a mess.  For now, we use the parent device for DMA ops.
> + */
> +struct device *vring_dma_dev(const struct vring_virtqueue *vq)
> +{
> + return vq->vq.vdev->dev.parent;
> +}
> +
> +/* Map one sg entry. */
> +static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
> +struct scatterlist *sg,
> +enum dma_data_direction direction)
> +{
> + if (!vring_use_dma_api(vq->vq.vdev))
> + return (dma_addr_t)sg_phys(sg);
> +
> + /*
> +  * We can't use dma_map_sg, because we don't use scatterlists in
> +  * the way it expects (we don't guarantee that the scatterlist
> +  * will exist for the lifetime of the mapping).
> +  */
> + return dma_map_page(vring_dma_dev(vq),
> + sg_page(sg), sg->offset, sg->length,
> + direction);
> +}
> +
> +static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
> +void *cpu_addr, size_t size,
> +enum dma_data_direction direction)
> +{
> + if (!vring_use_dma_api(vq->vq.vdev))
> + return (dma_addr_t)virt_to_phys(cpu_addr);
> +
> + return dma_map_single(vring_dma_dev(vq),
> +   cpu_addr, size, direction);
> +}
> +
> +static void vring_unmap_one(const struct vring_virtqueue *vq,
> + struct vring_desc *desc)
> +{
> + u16 flags;
> +
> + if (!vring_use_dma_api(vq->vq.vdev))
> + return;
> +
> + flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
> +
> + if (flags & VRING_DESC_F_INDIRECT) {
> + dma_unmap_single(vring_dma_dev(vq),
> +  virtio64_to_cpu(vq->vq.vdev, desc->addr),
> +  virtio32_to_cpu(vq->vq.vdev, desc->len),
> +  (flags & VRING_DESC_F_WRITE) ?
> +  DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + } else {
> + dma_unmap_page(vring_dma_dev(vq),
> +virtio64_to_cpu(vq->vq.vdev, desc->addr),
> +virtio32_to_cpu(vq->vq.vdev, desc->len),
> +   

Re: [PATCH v7 5/9] virtio_ring: Support DMA APIs

2016-02-03 Thread Andy Lutomirski
On Feb 3, 2016 5:52 AM, "Michael S. Tsirkin"  wrote:
>
> On Tue, Feb 02, 2016 at 09:46:36PM -0800, Andy Lutomirski wrote:
> > virtio_ring currently sends the device (usually a hypervisor)
> > physical addresses of its I/O buffers.  This is okay when DMA
> > addresses and physical addresses are the same thing, but this isn't
> > always the case.  For example, this never works on Xen guests, and
> > it is likely to fail if a physical "virtio" device ever ends up
> > behind an IOMMU or swiotlb.
> >
> > The immediate use case for me is to enable virtio on Xen guests.
> > For that to work, we need vring to support DMA address translation
> > as well as a corresponding change to virtio_pci or to another
> > driver.
> >
> > Signed-off-by: Andy Lutomirski 
> > ---
> >  drivers/virtio/Kconfig   |   2 +-
> >  drivers/virtio/virtio_ring.c | 200 
> > ---
> >  tools/virtio/linux/dma-mapping.h |  17 
> >  3 files changed, 183 insertions(+), 36 deletions(-)
> >  create mode 100644 tools/virtio/linux/dma-mapping.h
> >
> > diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
> > index cab9f3f63a38..77590320d44c 100644
> > --- a/drivers/virtio/Kconfig
> > +++ b/drivers/virtio/Kconfig
> > @@ -60,7 +60,7 @@ config VIRTIO_INPUT
> >
> >   config VIRTIO_MMIO
> >   tristate "Platform bus driver for memory mapped virtio devices"
> > - depends on HAS_IOMEM
> > + depends on HAS_IOMEM && HAS_DMA
> >   select VIRTIO
> >   ---help---
> >This drivers provides support for memory mapped virtio
>
> What's this chunk doing here btw? Should be part of the mmio patch?
>

IIRC it was deliberate.  Making virtio depend on HAS_DMA didn't work
right because kconfig doesn't propagate dependencies through select
intelligently.  This patch makes core virtio depend on HAS_DMA, so I
added the dependency here, too.

--Andy


[PATCH v7 5/9] virtio_ring: Support DMA APIs

2016-02-02 Thread Andy Lutomirski
virtio_ring currently sends the device (usually a hypervisor)
physical addresses of its I/O buffers.  This is okay when DMA
addresses and physical addresses are the same thing, but this isn't
always the case.  For example, this never works on Xen guests, and
it is likely to fail if a physical "virtio" device ever ends up
behind an IOMMU or swiotlb.

The immediate use case for me is to enable virtio on Xen guests.
For that to work, we need vring to support DMA address translation
as well as a corresponding change to virtio_pci or to another
driver.

Signed-off-by: Andy Lutomirski 
---
 drivers/virtio/Kconfig   |   2 +-
 drivers/virtio/virtio_ring.c | 200 ---
 tools/virtio/linux/dma-mapping.h |  17 
 3 files changed, 183 insertions(+), 36 deletions(-)
 create mode 100644 tools/virtio/linux/dma-mapping.h

diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index cab9f3f63a38..77590320d44c 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -60,7 +60,7 @@ config VIRTIO_INPUT
 
  config VIRTIO_MMIO
tristate "Platform bus driver for memory mapped virtio devices"
-   depends on HAS_IOMEM
+   depends on HAS_IOMEM && HAS_DMA
select VIRTIO
---help---
 This drivers provides support for memory mapped virtio
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ab0be6c084f6..9abc008ff7ea 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -24,6 +24,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #ifdef DEBUG
 /* For development, we want to crash whenever the ring is screwed. */
@@ -54,6 +55,11 @@
 #define END_USE(vq)
 #endif
 
+struct vring_desc_state {
+   void *data; /* Data for callback. */
+   struct vring_desc *indir_desc;  /* Indirect descriptor, if any. */
+};
+
 struct vring_virtqueue {
struct virtqueue vq;
 
@@ -98,8 +104,8 @@ struct vring_virtqueue {
ktime_t last_add_time;
 #endif
 
-   /* Tokens for callbacks. */
-   void *data[];
+   /* Per-descriptor state. */
+   struct vring_desc_state desc_state[];
 };
 
 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
@@ -128,6 +134,79 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
return false;
 }
 
+/*
+ * The DMA ops on various arches are rather gnarly right now, and
+ * making all of the arch DMA ops work on the vring device itself
+ * is a mess.  For now, we use the parent device for DMA ops.
+ */
+struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+{
+   return vq->vq.vdev->dev.parent;
+}
+
+/* Map one sg entry. */
+static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
+  struct scatterlist *sg,
+  enum dma_data_direction direction)
+{
+   if (!vring_use_dma_api(vq->vq.vdev))
+   return (dma_addr_t)sg_phys(sg);
+
+   /*
+* We can't use dma_map_sg, because we don't use scatterlists in
+* the way it expects (we don't guarantee that the scatterlist
+* will exist for the lifetime of the mapping).
+*/
+   return dma_map_page(vring_dma_dev(vq),
+   sg_page(sg), sg->offset, sg->length,
+   direction);
+}
+
+static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
+  void *cpu_addr, size_t size,
+  enum dma_data_direction direction)
+{
+   if (!vring_use_dma_api(vq->vq.vdev))
+   return (dma_addr_t)virt_to_phys(cpu_addr);
+
+   return dma_map_single(vring_dma_dev(vq),
+ cpu_addr, size, direction);
+}
+
+static void vring_unmap_one(const struct vring_virtqueue *vq,
+   struct vring_desc *desc)
+{
+   u16 flags;
+
+   if (!vring_use_dma_api(vq->vq.vdev))
+   return;
+
+   flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
+
+   if (flags & VRING_DESC_F_INDIRECT) {
+   dma_unmap_single(vring_dma_dev(vq),
+virtio64_to_cpu(vq->vq.vdev, desc->addr),
+virtio32_to_cpu(vq->vq.vdev, desc->len),
+(flags & VRING_DESC_F_WRITE) ?
+DMA_FROM_DEVICE : DMA_TO_DEVICE);
+   } else {
+   dma_unmap_page(vring_dma_dev(vq),
+  virtio64_to_cpu(vq->vq.vdev, desc->addr),
+  virtio32_to_cpu(vq->vq.vdev, desc->len),
+  (flags & VRING_DESC_F_WRITE) ?
+  DMA_FROM_DEVICE : DMA_TO_DEVICE);
+   }
+}
+
+static int vring_mapping_error(const struct vring_virtqueue *vq,
+  dma_addr_t addr)
+{
+   if (!vring_use_dma_api(vq->vq.vdev))
+   return 0;
+
+   

[PATCH v7 5/9] virtio_ring: Support DMA APIs

2016-02-02 Thread Andy Lutomirski
virtio_ring currently sends the device (usually a hypervisor)
physical addresses of its I/O buffers.  This is okay when DMA
addresses and physical addresses are the same thing, but this isn't
always the case.  For example, this never works on Xen guests, and
it is likely to fail if a physical "virtio" device ever ends up
behind an IOMMU or swiotlb.

The immediate use case for me is to enable virtio on Xen guests.
For that to work, we need vring to support DMA address translation
as well as a corresponding change to virtio_pci or to another
driver.

Signed-off-by: Andy Lutomirski 
---
 drivers/virtio/Kconfig   |   2 +-
 drivers/virtio/virtio_ring.c | 200 ---
 tools/virtio/linux/dma-mapping.h |  17 
 3 files changed, 183 insertions(+), 36 deletions(-)
 create mode 100644 tools/virtio/linux/dma-mapping.h

diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index cab9f3f63a38..77590320d44c 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -60,7 +60,7 @@ config VIRTIO_INPUT
 
  config VIRTIO_MMIO
tristate "Platform bus driver for memory mapped virtio devices"
-   depends on HAS_IOMEM
+   depends on HAS_IOMEM && HAS_DMA
select VIRTIO
---help---
 This drivers provides support for memory mapped virtio
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ab0be6c084f6..9abc008ff7ea 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -24,6 +24,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #ifdef DEBUG
 /* For development, we want to crash whenever the ring is screwed. */
@@ -54,6 +55,11 @@
 #define END_USE(vq)
 #endif
 
+struct vring_desc_state {
+   void *data; /* Data for callback. */
+   struct vring_desc *indir_desc;  /* Indirect descriptor, if any. */
+};
+
 struct vring_virtqueue {
struct virtqueue vq;
 
@@ -98,8 +104,8 @@ struct vring_virtqueue {
ktime_t last_add_time;
 #endif
 
-   /* Tokens for callbacks. */
-   void *data[];
+   /* Per-descriptor state. */
+   struct vring_desc_state desc_state[];
 };
 
 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
@@ -128,6 +134,79 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
return false;
 }
 
+/*
+ * The DMA ops on various arches are rather gnarly right now, and
+ * making all of the arch DMA ops work on the vring device itself
+ * is a mess.  For now, we use the parent device for DMA ops.
+ */
+struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+{
+   return vq->vq.vdev->dev.parent;
+}
+
+/* Map one sg entry. */
+static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
+  struct scatterlist *sg,
+  enum dma_data_direction direction)
+{
+   if (!vring_use_dma_api(vq->vq.vdev))
+   return (dma_addr_t)sg_phys(sg);
+
+   /*
+* We can't use dma_map_sg, because we don't use scatterlists in
+* the way it expects (we don't guarantee that the scatterlist
+* will exist for the lifetime of the mapping).
+*/
+   return dma_map_page(vring_dma_dev(vq),
+   sg_page(sg), sg->offset, sg->length,
+   direction);
+}
+
+static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
+  void *cpu_addr, size_t size,
+  enum dma_data_direction direction)
+{
+   if (!vring_use_dma_api(vq->vq.vdev))
+   return (dma_addr_t)virt_to_phys(cpu_addr);
+
+   return dma_map_single(vring_dma_dev(vq),
+ cpu_addr, size, direction);
+}
+
+static void vring_unmap_one(const struct vring_virtqueue *vq,
+   struct vring_desc *desc)
+{
+   u16 flags;
+
+   if (!vring_use_dma_api(vq->vq.vdev))
+   return;
+
+   flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
+
+   if (flags & VRING_DESC_F_INDIRECT) {
+   dma_unmap_single(vring_dma_dev(vq),
+virtio64_to_cpu(vq->vq.vdev, desc->addr),
+virtio32_to_cpu(vq->vq.vdev, desc->len),
+(flags & VRING_DESC_F_WRITE) ?
+DMA_FROM_DEVICE : DMA_TO_DEVICE);
+   } else {
+   dma_unmap_page(vring_dma_dev(vq),
+  virtio64_to_cpu(vq->vq.vdev, desc->addr),
+  virtio32_to_cpu(vq->vq.vdev, desc->len),
+  (flags & VRING_DESC_F_WRITE) ?
+  DMA_FROM_DEVICE : DMA_TO_DEVICE);
+   }
+}
+
+static int vring_mapping_error(const struct vring_virtqueue *vq,
+  dma_addr_t addr)
+{
+   if (!vring_use_dma_api(vq->vq.vdev))
+   return