On Wed, Jan 11, 2023 at 02:46:20PM +0000, Eli Cohen wrote:
> > From: Michael S. Tsirkin <[email protected]>
> > Sent: Wednesday, 11 January 2023 15:53
> > To: Eli Cohen <[email protected]>
> > Cc: Jason Wang <[email protected]>; [email protected];
> > [email protected]; [email protected];
> > [email protected]
> > Subject: Re: [PATCH 1/5] virtio_ring: per virtqueue dma device
> > 
> > On Wed, Jan 11, 2023 at 01:32:20PM +0000, Eli Cohen wrote:
> > > > From: Jason Wang <[email protected]>
> > > > Sent: Wednesday, 11 January 2023 8:28
> > > > To: [email protected]; [email protected]
> > > > Cc: Eli Cohen <[email protected]>; [email protected];
> > > > [email protected]; [email protected];
> > > > [email protected]
> > > > Subject: [PATCH 1/5] virtio_ring: per virtqueue dma device
> > > >
> > > > This patch introduces a per virtqueue dma device. This will be used
> > > > for virtio devices whose virtqueue are backed by different underlayer
> > > > devices.
> > > >
> > > > One example is the vDPA that where the control virtqueue could be
> > > > implemented through software mediation.
> > > >
> > > > Some of the work are actually done before since the helper like
> > > > vring_dma_device(). This work left are:
> > > >
> > > > - Let vring_dma_device() return the per virtqueue dma device instead
> > > >   of the vdev's parent.
> > > > - Allow passing a dma_device when creating the virtqueue through a new
> > > >   helper, old vring creation helper will keep using vdev's parent.
> > > >
> > > > Signed-off-by: Jason Wang <[email protected]>
> > > > ---
> > > >  drivers/virtio/virtio_ring.c | 133 ++++++++++++++++++++++++-----------
> > > >  include/linux/virtio_ring.h  |  16 +++++
> > > >  2 files changed, 109 insertions(+), 40 deletions(-)
> > > >
> > > > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> > > > index 723c4e29e1d3..41144b5246a8 100644
> > > > --- a/drivers/virtio/virtio_ring.c
> > > > +++ b/drivers/virtio/virtio_ring.c
> > > > @@ -202,6 +202,9 @@ struct vring_virtqueue {
> > > >         /* DMA, allocation, and size information */
> > > >         bool we_own_ring;
> > > >
> > > > +       /* Device used for doing DMA */
> > > > +       struct device *dma_dev;
> > > > +
> > > >  #ifdef DEBUG
> > > >         /* They're supposed to lock for us. */
> > > >         unsigned int in_use;
> > > > @@ -219,7 +222,8 @@ static struct virtqueue
> > > > *__vring_new_virtqueue(unsigned int index,
> > > >                                                bool context,
> > > >                                                bool (*notify)(struct 
> > > > virtqueue *),
> > > >                                                void (*callback)(struct 
> > > > virtqueue
> > > > *),
> > > > -                                              const char *name);
> > > > +                                              const char *name,
> > > > +                                              struct device *dma_dev);
> > > >  static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int 
> > > > num);
> > > >  static void vring_free(struct virtqueue *_vq);
> > > >
> > > > @@ -297,10 +301,11 @@ size_t virtio_max_dma_size(struct virtio_device
> > > > *vdev)
> > > >  EXPORT_SYMBOL_GPL(virtio_max_dma_size);
> > > >
> > > >  static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
> > > > -                             dma_addr_t *dma_handle, gfp_t flag)
> > > > +                              dma_addr_t *dma_handle, gfp_t flag,
> > > > +                              struct device *dma_dev)
> > > >  {
> > > >         if (vring_use_dma_api(vdev)) {
> > > > -               return dma_alloc_coherent(vdev->dev.parent, size,
> > > > +               return dma_alloc_coherent(dma_dev, size,
> > > >                                           dma_handle, flag);
> > > >         } else {
> > > >                 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
> > > > @@ -330,10 +335,11 @@ static void *vring_alloc_queue(struct
> > virtio_device
> > > > *vdev, size_t size,
> > > >  }
> > > >
> > > >  static void vring_free_queue(struct virtio_device *vdev, size_t size,
> > > > -                            void *queue, dma_addr_t dma_handle)
> > > > +                            void *queue, dma_addr_t dma_handle,
> > > > +                            struct device *dma_dev)
> > > >  {
> > > >         if (vring_use_dma_api(vdev))
> > > > -               dma_free_coherent(vdev->dev.parent, size, queue,
> > > > dma_handle);
> > > > +               dma_free_coherent(dma_dev, size, queue, dma_handle);
> > > >         else
> > > >                 free_pages_exact(queue, PAGE_ALIGN(size));
> > > >  }
> > > > @@ -341,11 +347,11 @@ static void vring_free_queue(struct
> > virtio_device
> > > > *vdev, size_t size,
> > > >  /*
> > > >   * The DMA ops on various arches are rather gnarly right now, and
> > > >   * making all of the arch DMA ops work on the vring device itself
> > > > - * is a mess.  For now, we use the parent device for DMA ops.
> > > > + * is a mess.
> > > >   */
> > > >  static inline struct device *vring_dma_dev(const struct vring_virtqueue
> > *vq)
> > > >  {
> > > > -       return vq->vq.vdev->dev.parent;
> > > > +       return vq->dma_dev;
> > > >  }
> > >
> > > How about getting rid of this function and just use vq->dma_dev?
> > 
> > Will make the patch even bigger than it is.
> 
> I can't see how this can happen. You get rid of the function and you lose 
> overall 10 lines. What am I missing?

This is an existing function, if you drop it you need to refactor
more of the existing code. No?

> > If you do patch on top pls.
> 

_______________________________________________
Virtualization mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to