Re: [PATCH 5/6] media: uvcvideo: Use dma_alloc_noncontiguos API

2020-11-24 Thread Ricardo Ribalda
Hi Robin

On Tue, Nov 24, 2020 at 5:29 PM Robin Murphy  wrote:
>
> On 2020-11-24 15:38, Ricardo Ribalda wrote:
> > On architectures where the is no coherent caching such as ARM use the
> > dma_alloc_noncontiguos API and handle manually the cache flushing using
> > dma_sync_single().
> >
> > With this patch on the affected architectures we can measure up to 20x
> > performance improvement in uvc_video_copy_data_work().
> >
> > Signed-off-by: Ricardo Ribalda 
> > ---
> >   drivers/media/usb/uvc/uvc_video.c | 74 ++-
> >   drivers/media/usb/uvc/uvcvideo.h  |  1 +
> >   2 files changed, 63 insertions(+), 12 deletions(-)
> >
> > diff --git a/drivers/media/usb/uvc/uvc_video.c 
> > b/drivers/media/usb/uvc/uvc_video.c
> > index a6a441d92b94..9e90b261428a 100644
> > --- a/drivers/media/usb/uvc/uvc_video.c
> > +++ b/drivers/media/usb/uvc/uvc_video.c
> > @@ -1490,6 +1490,11 @@ static void uvc_video_encode_bulk(struct uvc_urb 
> > *uvc_urb,
> >   urb->transfer_buffer_length = stream->urb_size - len;
> >   }
> >
> > +static inline struct device *stream_to_dmadev(struct uvc_streaming *stream)
> > +{
> > + return stream->dev->udev->bus->controller->parent;
> > +}
> > +
> >   static void uvc_video_complete(struct urb *urb)
> >   {
> >   struct uvc_urb *uvc_urb = urb->context;
> > @@ -1539,6 +1544,11 @@ static void uvc_video_complete(struct urb *urb)
> >* Process the URB headers, and optionally queue expensive memcpy 
> > tasks
> >* to be deferred to a work queue.
> >*/
> > + if (uvc_urb->pages)
> > + dma_sync_single_for_cpu(stream_to_dmadev(stream),
> > + urb->transfer_dma,
> > + urb->transfer_buffer_length,
> > + DMA_FROM_DEVICE);
>
> This doesn't work. Even in iommu-dma, the streaming API still expects to
> work on physically-contiguous memory that could have been passed to
> dma_map_single() in the first place. As-is, this will invalidate
> transfer_buffer_length bytes from the start of the *first* physical
> page, and thus destroy random other data if lines from subsequent
> unrelated pages are dirty in caches.
>
> The only feasible way to do a DMA sync on disjoint pages in a single
> call is with a scatterlist.

Thanks for pointing this out. I guess I was lucky on my hardware and
the areas were always  contiguous.

Will rework and send back to the list.

Thanks again.

>
> Robin.
>
> >   stream->decode(uvc_urb, buf, buf_meta);
> >
> >   /* If no async work is needed, resubmit the URB immediately. */
> > @@ -1566,8 +1576,15 @@ static void uvc_free_urb_buffers(struct 
> > uvc_streaming *stream)
> >   continue;
> >
> >   #ifndef CONFIG_DMA_NONCOHERENT
> > - usb_free_coherent(stream->dev->udev, stream->urb_size,
> > -   uvc_urb->buffer, uvc_urb->dma);
> > + if (uvc_urb->pages) {
> > + vunmap(uvc_urb->buffer);
> > + dma_free_noncontiguous(stream_to_dmadev(stream),
> > +stream->urb_size,
> > +uvc_urb->pages, uvc_urb->dma);
> > + } else {
> > + usb_free_coherent(stream->dev->udev, stream->urb_size,
> > +   uvc_urb->buffer, uvc_urb->dma);
> > + }
> >   #else
> >   kfree(uvc_urb->buffer);
> >   #endif
> > @@ -1577,6 +1594,47 @@ static void uvc_free_urb_buffers(struct 
> > uvc_streaming *stream)
> >   stream->urb_size = 0;
> >   }
> >
> > +#ifndef CONFIG_DMA_NONCOHERENT
> > +static bool uvc_alloc_urb_buffer(struct uvc_streaming *stream,
> > +  struct uvc_urb *uvc_urb, gfp_t gfp_flags)
> > +{
> > + struct device *dma_dev = dma_dev = stream_to_dmadev(stream);
> > +
> > + if (!dma_can_alloc_noncontiguous(dma_dev)) {
> > + uvc_urb->buffer = usb_alloc_coherent(stream->dev->udev,
> > +  stream->urb_size,
> > +  gfp_flags | __GFP_NOWARN,
> > +  _urb->dma);
> > + return uvc_urb->buffer != NULL;
> > + }
> > +
> > + uvc_urb->pages = dma_alloc_noncontiguous(dma_dev, stream->urb_size,
> > +  _urb->dma,
> > +  gfp_flags | __GFP_NOWARN, 0);
> > + if (!uvc_urb->pages)
> > + return false;
> > +
> > + uvc_urb->buffer = vmap(uvc_urb->pages,
> > +PAGE_ALIGN(stream->urb_size) >> PAGE_SHIFT,
> > +VM_DMA_COHERENT, PAGE_KERNEL);
> > + if (!uvc_urb->buffer) {
> > + dma_free_noncontiguous(dma_dev, stream->urb_size,
> > +uvc_urb->pages, 

Re: [PATCH 5/6] media: uvcvideo: Use dma_alloc_noncontiguos API

2020-11-24 Thread Robin Murphy

On 2020-11-24 15:38, Ricardo Ribalda wrote:

On architectures where the is no coherent caching such as ARM use the
dma_alloc_noncontiguos API and handle manually the cache flushing using
dma_sync_single().

With this patch on the affected architectures we can measure up to 20x
performance improvement in uvc_video_copy_data_work().

Signed-off-by: Ricardo Ribalda 
---
  drivers/media/usb/uvc/uvc_video.c | 74 ++-
  drivers/media/usb/uvc/uvcvideo.h  |  1 +
  2 files changed, 63 insertions(+), 12 deletions(-)

diff --git a/drivers/media/usb/uvc/uvc_video.c 
b/drivers/media/usb/uvc/uvc_video.c
index a6a441d92b94..9e90b261428a 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1490,6 +1490,11 @@ static void uvc_video_encode_bulk(struct uvc_urb 
*uvc_urb,
urb->transfer_buffer_length = stream->urb_size - len;
  }
  
+static inline struct device *stream_to_dmadev(struct uvc_streaming *stream)

+{
+   return stream->dev->udev->bus->controller->parent;
+}
+
  static void uvc_video_complete(struct urb *urb)
  {
struct uvc_urb *uvc_urb = urb->context;
@@ -1539,6 +1544,11 @@ static void uvc_video_complete(struct urb *urb)
 * Process the URB headers, and optionally queue expensive memcpy tasks
 * to be deferred to a work queue.
 */
+   if (uvc_urb->pages)
+   dma_sync_single_for_cpu(stream_to_dmadev(stream),
+   urb->transfer_dma,
+   urb->transfer_buffer_length,
+   DMA_FROM_DEVICE);


This doesn't work. Even in iommu-dma, the streaming API still expects to 
work on physically-contiguous memory that could have been passed to 
dma_map_single() in the first place. As-is, this will invalidate 
transfer_buffer_length bytes from the start of the *first* physical 
page, and thus destroy random other data if lines from subsequent 
unrelated pages are dirty in caches.


The only feasible way to do a DMA sync on disjoint pages in a single 
call is with a scatterlist.


Robin.


stream->decode(uvc_urb, buf, buf_meta);
  
  	/* If no async work is needed, resubmit the URB immediately. */

@@ -1566,8 +1576,15 @@ static void uvc_free_urb_buffers(struct uvc_streaming 
*stream)
continue;
  
  #ifndef CONFIG_DMA_NONCOHERENT

-   usb_free_coherent(stream->dev->udev, stream->urb_size,
- uvc_urb->buffer, uvc_urb->dma);
+   if (uvc_urb->pages) {
+   vunmap(uvc_urb->buffer);
+   dma_free_noncontiguous(stream_to_dmadev(stream),
+  stream->urb_size,
+  uvc_urb->pages, uvc_urb->dma);
+   } else {
+   usb_free_coherent(stream->dev->udev, stream->urb_size,
+ uvc_urb->buffer, uvc_urb->dma);
+   }
  #else
kfree(uvc_urb->buffer);
  #endif
@@ -1577,6 +1594,47 @@ static void uvc_free_urb_buffers(struct uvc_streaming 
*stream)
stream->urb_size = 0;
  }
  
+#ifndef CONFIG_DMA_NONCOHERENT

+static bool uvc_alloc_urb_buffer(struct uvc_streaming *stream,
+struct uvc_urb *uvc_urb, gfp_t gfp_flags)
+{
+   struct device *dma_dev = dma_dev = stream_to_dmadev(stream);
+
+   if (!dma_can_alloc_noncontiguous(dma_dev)) {
+   uvc_urb->buffer = usb_alloc_coherent(stream->dev->udev,
+stream->urb_size,
+gfp_flags | __GFP_NOWARN,
+_urb->dma);
+   return uvc_urb->buffer != NULL;
+   }
+
+   uvc_urb->pages = dma_alloc_noncontiguous(dma_dev, stream->urb_size,
+_urb->dma,
+gfp_flags | __GFP_NOWARN, 0);
+   if (!uvc_urb->pages)
+   return false;
+
+   uvc_urb->buffer = vmap(uvc_urb->pages,
+  PAGE_ALIGN(stream->urb_size) >> PAGE_SHIFT,
+  VM_DMA_COHERENT, PAGE_KERNEL);
+   if (!uvc_urb->buffer) {
+   dma_free_noncontiguous(dma_dev, stream->urb_size,
+  uvc_urb->pages, uvc_urb->dma);
+   return false;
+   }
+
+   return true;
+}
+#else
+static bool uvc_alloc_urb_buffer(struct uvc_streaming *stream,
+struct uvc_urb *uvc_urb, gfp_t gfp_flags)
+{
+   uvc_urb->buffer = kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN);
+
+   return uvc_urb->buffer != NULL;
+}
+#endif
+
  /*
   * Allocate transfer buffers. This function can be called with buffers
   * already allocated when resuming from suspend, in which case it will
@@ 

[PATCH 5/6] media: uvcvideo: Use dma_alloc_noncontiguos API

2020-11-24 Thread Ricardo Ribalda
On architectures where the is no coherent caching such as ARM use the
dma_alloc_noncontiguos API and handle manually the cache flushing using
dma_sync_single().

With this patch on the affected architectures we can measure up to 20x
performance improvement in uvc_video_copy_data_work().

Signed-off-by: Ricardo Ribalda 
---
 drivers/media/usb/uvc/uvc_video.c | 74 ++-
 drivers/media/usb/uvc/uvcvideo.h  |  1 +
 2 files changed, 63 insertions(+), 12 deletions(-)

diff --git a/drivers/media/usb/uvc/uvc_video.c 
b/drivers/media/usb/uvc/uvc_video.c
index a6a441d92b94..9e90b261428a 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1490,6 +1490,11 @@ static void uvc_video_encode_bulk(struct uvc_urb 
*uvc_urb,
urb->transfer_buffer_length = stream->urb_size - len;
 }
 
+static inline struct device *stream_to_dmadev(struct uvc_streaming *stream)
+{
+   return stream->dev->udev->bus->controller->parent;
+}
+
 static void uvc_video_complete(struct urb *urb)
 {
struct uvc_urb *uvc_urb = urb->context;
@@ -1539,6 +1544,11 @@ static void uvc_video_complete(struct urb *urb)
 * Process the URB headers, and optionally queue expensive memcpy tasks
 * to be deferred to a work queue.
 */
+   if (uvc_urb->pages)
+   dma_sync_single_for_cpu(stream_to_dmadev(stream),
+   urb->transfer_dma,
+   urb->transfer_buffer_length,
+   DMA_FROM_DEVICE);
stream->decode(uvc_urb, buf, buf_meta);
 
/* If no async work is needed, resubmit the URB immediately. */
@@ -1566,8 +1576,15 @@ static void uvc_free_urb_buffers(struct uvc_streaming 
*stream)
continue;
 
 #ifndef CONFIG_DMA_NONCOHERENT
-   usb_free_coherent(stream->dev->udev, stream->urb_size,
- uvc_urb->buffer, uvc_urb->dma);
+   if (uvc_urb->pages) {
+   vunmap(uvc_urb->buffer);
+   dma_free_noncontiguous(stream_to_dmadev(stream),
+  stream->urb_size,
+  uvc_urb->pages, uvc_urb->dma);
+   } else {
+   usb_free_coherent(stream->dev->udev, stream->urb_size,
+ uvc_urb->buffer, uvc_urb->dma);
+   }
 #else
kfree(uvc_urb->buffer);
 #endif
@@ -1577,6 +1594,47 @@ static void uvc_free_urb_buffers(struct uvc_streaming 
*stream)
stream->urb_size = 0;
 }
 
+#ifndef CONFIG_DMA_NONCOHERENT
+static bool uvc_alloc_urb_buffer(struct uvc_streaming *stream,
+struct uvc_urb *uvc_urb, gfp_t gfp_flags)
+{
+   struct device *dma_dev = dma_dev = stream_to_dmadev(stream);
+
+   if (!dma_can_alloc_noncontiguous(dma_dev)) {
+   uvc_urb->buffer = usb_alloc_coherent(stream->dev->udev,
+stream->urb_size,
+gfp_flags | __GFP_NOWARN,
+_urb->dma);
+   return uvc_urb->buffer != NULL;
+   }
+
+   uvc_urb->pages = dma_alloc_noncontiguous(dma_dev, stream->urb_size,
+_urb->dma,
+gfp_flags | __GFP_NOWARN, 0);
+   if (!uvc_urb->pages)
+   return false;
+
+   uvc_urb->buffer = vmap(uvc_urb->pages,
+  PAGE_ALIGN(stream->urb_size) >> PAGE_SHIFT,
+  VM_DMA_COHERENT, PAGE_KERNEL);
+   if (!uvc_urb->buffer) {
+   dma_free_noncontiguous(dma_dev, stream->urb_size,
+  uvc_urb->pages, uvc_urb->dma);
+   return false;
+   }
+
+   return true;
+}
+#else
+static bool uvc_alloc_urb_buffer(struct uvc_streaming *stream,
+struct uvc_urb *uvc_urb, gfp_t gfp_flags)
+{
+   uvc_urb->buffer = kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN);
+
+   return uvc_urb->buffer != NULL;
+}
+#endif
+
 /*
  * Allocate transfer buffers. This function can be called with buffers
  * already allocated when resuming from suspend, in which case it will
@@ -1607,19 +1665,11 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming 
*stream,
 
/* Retry allocations until one succeed. */
for (; npackets > 1; npackets /= 2) {
+   stream->urb_size = psize * npackets;
for (i = 0; i < UVC_URBS; ++i) {
struct uvc_urb *uvc_urb = >uvc_urb[i];
 
-   stream->urb_size = psize * npackets;
-#ifndef CONFIG_DMA_NONCOHERENT
-   uvc_urb->buffer = usb_alloc_coherent(
-   stream->dev->udev,