On Sat, Dec 05, 2015 at 12:36:51PM -0500, Alan Stern wrote:
> I meant that this "if" statement should test only uurb_start.  If the 
> test succeeds, a nested "if" statement should test buffer_length and 
> return an error if it is too big.
> 
> That's as opposed to the way you have it now, where if buffer_length is 
> too big you return NULL.  The system would then try to treat the 
> coherent memory as a normal memory buffer, which may not be a good 
> idea.

OK. Why is it a problem to treat it as a normal memory buffer? (I understand
it would be slower, of course.)

>> I can't see
>> offhand another way to call it only once during the function, save for a bool
>> that says if we called it or not.
> Simply call it at the spot where you initialize as->usbm to NULL.  Then 
> it will always run exactly once.

OK, done.

> You don't really need to do it earlier.  An unnecessary calculation of
> num_sgs won't hurt.

Is it unnecessary? I thought the test was really to force num_sgs==0 for the
DMA case, not to save a little arithmetic.

> Comments below.  I'll skip much of the patch because it looks pretty
> good.

Good, I guess we're finally converting on something acceptable to all parties 
:-)

>> +    ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory));
>> +    if (ret) {
>> +            kfree(usbm);
>> +            return ret;
>> +    }
> This part should come first.  If allocating usbm would put us over the
> memory limit, we want to know before we do the allocation.

Done.

>> +    usbm->vm_start = vma->vm_start;
>> +    usbm->vma_use_count = 1;
> I would move these two lines before the remap_pfn_range() call.  Then
> you can use &usbm->vma_use_count as the second argument to
> dec_usb_memory_use_count() and avoid the awkward "if (count)" test at 
> the start of that routine.

Done.

> So this would be:
> 
>               if (uurb_start >= iter->vm_start &&
>                               uurb_start < iter->vm_start + iter->size) {
>                       if (uurb->buffer_length > iter->vm_start + iter->size -
>                                       uurb_start) {
>                               usbm = ERR_PTR(-EINVAL);
>                       } else {
>                               usbm = iter;
>                               usbm->urb_use_count++;
>                       }
>                       break;
>               }
> 
> (That's with the file's convention for continuation lines.)

Done.

> Much of this function is still subject to change, so I won't look at it 
> now.

OK.

/* Steinar */
-- 
Homepage: https://www.sesse.net/
>From 53f132d6fcd9117f50647e604487593adfe85990 Mon Sep 17 00:00:00 2001
From: "Steinar H. Gunderson" <se...@google.com>
Date: Thu, 26 Nov 2015 01:19:13 +0100
Subject: [PATCH] Add support for usbfs zerocopy.

Add a new interface for userspace to preallocate memory that can be
used with usbfs. This gives two primary benefits:

 - Zerocopy; data no longer needs to be copied between the userspace
   and the kernel, but can instead be read directly by the driver from
   userspace's buffers. This works for both bulk and isochronous transfers;
   isochronous also no longer need to memset() the buffer to zero to avoid
   leaking kernel data.

 - Once the buffers are allocated, USB transfers can no longer fail due to
   memory fragmentation; previously, long-running programs could run into
   problems finding a large enough contiguous memory chunk, especially on
   embedded systems or at high rates.

Memory is allocated by using mmap() against the usbfs file descriptor,
and similarly deallocated by munmap(). Once memory has been allocated,
using it as pointers to a bulk or isochronous operation means you will
automatically get zerocopy behavior. Note that this also means you cannot
modify outgoing data until the transfer is complete. The same holds for
data on the same cache lines as incoming data; DMA modifying them at the
same time could lead to your changes being overwritten.

Largely based on a patch by Markus Rechberger with some updates. The original
patch can be found at:

  http://sundtek.de/support/devio_mmap_v0.4.diff

Signed-off-by: Steinar H. Gunderson <se...@google.com>
Signed-off-by: Markus Rechberger <mrechber...@gmail.com>
---
 drivers/usb/core/devio.c | 202 ++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 192 insertions(+), 10 deletions(-)

diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 38ae877c..6b253e8 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -50,6 +50,7 @@
 #include <linux/user_namespace.h>
 #include <linux/scatterlist.h>
 #include <linux/uaccess.h>
+#include <linux/dma-mapping.h>
 #include <asm/byteorder.h>
 #include <linux/moduleparam.h>
 
@@ -69,6 +70,7 @@ struct usb_dev_state {
 	spinlock_t lock;            /* protects the async urb lists */
 	struct list_head async_pending;
 	struct list_head async_completed;
+	struct list_head memory_list;
 	wait_queue_head_t wait;     /* wake up if a request completed */
 	unsigned int discsignr;
 	struct pid *disc_pid;
@@ -79,6 +81,17 @@ struct usb_dev_state {
 	u32 disabled_bulk_eps;
 };
 
+struct usb_memory {
+	struct list_head memlist;
+	int vma_use_count;
+	int urb_use_count;
+	u32 size;
+	void *mem;
+	dma_addr_t dma_handle;
+	unsigned long vm_start;
+	struct usb_dev_state *ps;
+};
+
 struct async {
 	struct list_head asynclist;
 	struct usb_dev_state *ps;
@@ -89,6 +102,7 @@ struct async {
 	void __user *userbuffer;
 	void __user *userurb;
 	struct urb *urb;
+	struct usb_memory *usbm;
 	unsigned int mem_usage;
 	int status;
 	u32 secid;
@@ -157,6 +171,107 @@ static int connected(struct usb_dev_state *ps)
 			ps->dev->state != USB_STATE_NOTATTACHED);
 }
 
+static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
+{
+	struct usb_dev_state *ps = usbm->ps;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ps->lock, flags);
+	--*count;
+	if (usbm->urb_use_count == 0 && usbm->vma_use_count == 0) {
+		list_del_init(&usbm->memlist);
+		spin_unlock_irqrestore(&ps->lock, flags);
+
+		usb_free_coherent(ps->dev, usbm->size, usbm->mem,
+				usbm->dma_handle);
+		usbfs_decrease_memory_usage(
+			usbm->size + sizeof(struct usb_memory));
+		kfree(usbm);
+	} else {
+		spin_unlock_irqrestore(&ps->lock, flags);
+	}
+}
+
+static void usbdev_vm_open(struct vm_area_struct *vma)
+{
+	struct usb_memory *usbm = vma->vm_private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&usbm->ps->lock, flags);
+	++usbm->vma_use_count;
+	spin_unlock_irqrestore(&usbm->ps->lock, flags);
+}
+
+static void usbdev_vm_close(struct vm_area_struct *vma)
+{
+	struct usb_memory *usbm = vma->vm_private_data;
+
+	dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
+}
+
+struct vm_operations_struct usbdev_vm_ops = {
+	.open = usbdev_vm_open,
+	.close = usbdev_vm_close
+};
+
+static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct usb_memory *usbm = NULL;
+	struct usb_dev_state *ps = file->private_data;
+	size_t size = vma->vm_end - vma->vm_start;
+	void *mem;
+	unsigned long flags;
+	dma_addr_t dma_handle;
+	int ret;
+
+	ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory));
+	if (ret) {
+		return ret;
+	}
+
+	usbm = kzalloc(sizeof(struct usb_memory), GFP_KERNEL);
+	if (!usbm) {
+		usbfs_decrease_memory_usage(
+				size + sizeof(struct usb_memory));
+		return -ENOMEM;
+	}
+
+	mem = usb_alloc_coherent(ps->dev, size, GFP_KERNEL, &dma_handle);
+	if (!mem) {
+		usbfs_decrease_memory_usage(
+				size + sizeof(struct usb_memory));
+		kfree(usbm);
+		return -ENOMEM;
+	}
+
+	memset(mem, 0, size);
+
+	usbm->mem = mem;
+	usbm->dma_handle = dma_handle;
+	usbm->size = size;
+	usbm->ps = ps;
+	usbm->vm_start = vma->vm_start;
+	usbm->vma_use_count = 1;
+
+	if (remap_pfn_range(vma, vma->vm_start,
+			virt_to_phys(usbm->mem) >> PAGE_SHIFT,
+			size, vma->vm_page_prot) < 0) {
+		dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
+		return -EAGAIN;
+	}
+
+	vma->vm_flags |= VM_IO;
+	vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
+	vma->vm_ops = &usbdev_vm_ops;
+	vma->vm_private_data = usbm;
+
+	spin_lock_irqsave(&ps->lock, flags);
+	list_add_tail(&usbm->memlist, &ps->memory_list);
+	spin_unlock_irqrestore(&ps->lock, flags);
+
+	return 0;
+}
+
 static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
 {
 	loff_t ret;
@@ -297,8 +412,13 @@ static void free_async(struct async *as)
 		if (sg_page(&as->urb->sg[i]))
 			kfree(sg_virt(&as->urb->sg[i]));
 	}
+
 	kfree(as->urb->sg);
-	kfree(as->urb->transfer_buffer);
+	if (as->usbm == NULL)
+		kfree(as->urb->transfer_buffer);
+	else
+		dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
+
 	kfree(as->urb->setup_packet);
 	usb_free_urb(as->urb);
 	usbfs_decrease_memory_usage(as->mem_usage);
@@ -910,6 +1030,7 @@ static int usbdev_open(struct inode *inode, struct file *file)
 	INIT_LIST_HEAD(&ps->list);
 	INIT_LIST_HEAD(&ps->async_pending);
 	INIT_LIST_HEAD(&ps->async_completed);
+	INIT_LIST_HEAD(&ps->memory_list);
 	init_waitqueue_head(&ps->wait);
 	ps->discsignr = 0;
 	ps->disc_pid = get_pid(task_pid(current));
@@ -962,6 +1083,13 @@ static int usbdev_release(struct inode *inode, struct file *file)
 		free_async(as);
 		as = async_getcompleted(ps);
 	}
+
+	/* Any elements still left on this list are still in use and cannot
+	 * be deleted here, but will be freed once the counters go to 0
+	 * (see dec_usb_memory_use_count()).
+	 */
+	list_del(&ps->memory_list);
+
 	kfree(ps);
 	return 0;
 }
@@ -1283,6 +1411,31 @@ static int proc_setconfig(struct usb_dev_state *ps, void __user *arg)
 	return status;
 }
 
+static struct usb_memory *
+find_memory_area(struct usb_dev_state *ps, const struct usbdevfs_urb *uurb)
+{
+	struct usb_memory *usbm = NULL, *iter;
+	unsigned long flags;
+	unsigned long uurb_start = (unsigned long)uurb->buffer;
+
+	spin_lock_irqsave(&ps->lock, flags);
+	list_for_each_entry(iter, &ps->memory_list, memlist) {
+		if (uurb_start >= iter->vm_start &&
+				uurb_start < iter->vm_start + iter->size) {
+			if (uurb->buffer_length > iter->vm_start + iter->size -
+					uurb_start) {
+				usbm = ERR_PTR(-EINVAL);
+			} else {
+				usbm = iter;
+				usbm->urb_use_count++;
+				break;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&ps->lock, flags);
+	return usbm;
+}
+
 static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb,
 			struct usbdevfs_iso_packet_desc __user *iso_frame_desc,
 			void __user *arg)
@@ -1372,9 +1525,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
 			uurb->type = USBDEVFS_URB_TYPE_INTERRUPT;
 			goto interrupt_urb;
 		}
-		num_sgs = DIV_ROUND_UP(uurb->buffer_length, USB_SG_SIZE);
-		if (num_sgs == 1 || num_sgs > ps->dev->bus->sg_tablesize)
-			num_sgs = 0;
+		/* do not use SG buffers when memory mapped segments
+		 * are in use
+		 */
+		if (as->usbm) {
+			num_sgs = DIV_ROUND_UP(uurb->buffer_length,
+					USB_SG_SIZE);
+			if (num_sgs == 1 ||
+			    num_sgs > ps->dev->bus->sg_tablesize) {
+				num_sgs = 0;
+			}
+		}
 		if (ep->streams)
 			stream_id = uurb->stream_id;
 		break;
@@ -1445,10 +1606,15 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
 	if (ret)
 		goto error;
 	as->mem_usage = u;
+	as->usbm = find_memory_area(ps, uurb);
+	if (IS_ERR_VALUE(as->usbm)) {
+		ret = PTR_ERR(as->usbm);
+		goto error;
+	}
 
 	if (num_sgs) {
 		as->urb->sg = kmalloc(num_sgs * sizeof(struct scatterlist),
-				      GFP_KERNEL);
+				GFP_KERNEL);
 		if (!as->urb->sg) {
 			ret = -ENOMEM;
 			goto error;
@@ -1476,21 +1642,26 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
 			totlen -= u;
 		}
 	} else if (uurb->buffer_length > 0) {
-		as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
+		if (as->usbm) {
+			as->urb->transfer_buffer = as->usbm->mem;
+		} else {
+			as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
 				GFP_KERNEL);
+		}
 		if (!as->urb->transfer_buffer) {
 			ret = -ENOMEM;
 			goto error;
 		}
 
-		if (!is_in) {
+		if (!is_in && as->usbm == NULL) {
 			if (copy_from_user(as->urb->transfer_buffer,
 					   uurb->buffer,
 					   uurb->buffer_length)) {
 				ret = -EFAULT;
 				goto error;
 			}
-		} else if (uurb->type == USBDEVFS_URB_TYPE_ISO) {
+		} else if (uurb->type == USBDEVFS_URB_TYPE_ISO &&
+				as->usbm == NULL) {
 			/*
 			 * Isochronous input data may end up being
 			 * discontiguous if some of the packets are short.
@@ -1545,10 +1716,18 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
 	isopkt = NULL;
 	as->ps = ps;
 	as->userurb = arg;
-	if (is_in && uurb->buffer_length > 0)
+	if (is_in && uurb->buffer_length > 0 && as->usbm == NULL) {
 		as->userbuffer = uurb->buffer;
-	else
+	} else {
 		as->userbuffer = NULL;
+	}
+	if (as->usbm != NULL) {
+		unsigned long uurb_start = (unsigned long)uurb->buffer;
+
+		as->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+		as->urb->transfer_dma = as->usbm->dma_handle +
+				(uurb_start - as->usbm->vm_start);
+	}
 	as->signr = uurb->signr;
 	as->ifnum = ifnum;
 	as->pid = get_pid(task_pid(current));
@@ -1604,6 +1783,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
 	return 0;
 
  error:
+	if (as && as->usbm)
+		dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
 	kfree(isopkt);
 	kfree(dr);
 	if (as)
@@ -2373,6 +2554,7 @@ const struct file_operations usbdev_file_operations = {
 #ifdef CONFIG_COMPAT
 	.compat_ioctl =   usbdev_compat_ioctl,
 #endif
+	.mmap =           usbdev_mmap,
 	.open =		  usbdev_open,
 	.release =	  usbdev_release,
 };
-- 
2.1.4

Reply via email to