Hi all,
In order to make iPhone USB tethering work with OpenBSD, I ended up
going a bit down the proverbial rabbit's hole.
After initially playing around with trying to build a userspace
'driver' using a tap interface, I found that ugen(4) was entirely
blocking. I had started hacking around this using separate
reader/writer threads, but realized that ugen(4) could be fairly
readily made capable of submitting and tracking multiple asynchronous
operations on a single endpoint without too much effort.
Thus this patch was born. At this point, I'm really looking for
comments as to what the appetite would be for such a drastic change
(while attempting to maintain backwards compatibility for code that
depends on the existing behavior), as well as feedback on the interface
and code itself.
The major changes are:
- Asynchronous USB operations are supported through 3 new ioctls:
USB_ASYNC_SUBMIT - submit a new asynchronous operation
USB_ASYNC_CANCEL - cancel a pending operation (TBD)
USB_ASYNC_FINISH - retrieve the next completed operation
All three operations act on a new 'struct usb_async_op'.
- Control, Bulk and Interrupt endpoints all are supported through the
same API, with Control operations requiring the setup packet be in
the first bytes of the data packet.
Currently broken things:
- The entire synchronous interface (poll-related changes)
- Cancelling pending transfers
- Isochronous transfers
- Kqueue (maybe? I haven't tested it thoroughly yet)
- Locking is somewhat heavy-handed in places
- There's a hard cap of 16 asynchronous operations in-flight per
endpoint at this time. This could be readily made configurable.
My current test cases are a trivial "smoke test" of the API, as well as
using libusb and the libimobiledevice/usbmux and ipheth-pair tools. Of
course, this requires a modified libusb, which can be found at:
https://github.com/pvachon/libusb - see the openbsd-6.0 branch
and some light modifications to usbmuxd:
https://github.com/pvachon/usbmuxd
Otherwise, upstream libimobiledevice, libusbmux, etc. can all be used
as-is.
Any thoughts are appreciated.
Index: sys/dev/usb/ugen.c
===================================================================
RCS file: /cvs/src/sys/dev/usb/ugen.c,v
retrieving revision 1.94
diff -u -p -r1.94 ugen.c
--- sys/dev/usb/ugen.c 24 May 2016 05:35:01 -0000 1.94
+++ sys/dev/usb/ugen.c 26 May 2016 17:16:55 -0000
@@ -10,6 +10,8 @@
* by Lennart Augustsson ([email protected]) at
* Carlstedt Research & Technology.
*
+ * Copyright (C) 2016 Phil Vachon <[email protected]>
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -50,61 +52,97 @@
#include <dev/usb/usbdi.h>
#include <dev/usb/usbdi_util.h>
+#define EPDEV(_ep) (&(_ep)->sc->sc_dev)
+#define SCDEV(_sc) (&(_sc)->sc_dev)
+
#ifdef UGEN_DEBUG
#define DPRINTF(x) do { if (ugendebug) printf x; } while (0)
#define DPRINTFN(n,x) do { if (ugendebug>(n)) printf x; } while (0)
+#define UPRINTF(_dev, msg, ...) do { (void)printf("%s: " msg "\n",
(_dev)->dv_xname, ##__VA_ARGS__); } while (0)
int ugendebug = 0;
#else
#define DPRINTF(x)
#define DPRINTFN(n,x)
+#define UPRINTF(...)
#endif
#define UGEN_CHUNK 128 /* chunk size for read */
#define UGEN_IBSIZE 1020 /* buffer size */
#define UGEN_BBSIZE 1024
-#define UGEN_NISOFRAMES 500 /* 0.5 seconds worth */
-#define UGEN_NISOREQS 6 /* number of outstanding xfer requests */
-#define UGEN_NISORFRMS 4 /* number of frames (milliseconds) per req */
+#define UGEN_MAX_OPS 16 /* Maximum number of in-flight ops allowed */
+
+struct ugen_pending_op {
+ struct ugen_softc *upo_sc; /* The ugen(4) softc */
+ struct ugen_endpoint *upo_ep; /* The endpoint this op is
pending on */
+ struct ugen_endpoint_pipe *upo_pipe; /* The pipe this op is pending
on */
+ struct usbd_xfer *upo_xfer; /* A USB transfer object */
+ char *upo_dma_buf; /* The DMA buffer for this
transfer */
+
+ u_int32_t upo_transfer_len; /* The length of this transfer
*/
+ u_int32_t upo_buffer_len; /* The size of the buffer */
+ u_int8_t upo_flags;
+
+ usbd_status upo_status; /* Status of this transfer */
+
+ struct usb_async_op upo_aop; /* The userspace state for this
op. */
+ TAILQ_ENTRY(ugen_pending_op) upo_node;
+};
+
+/* TODO:
+ * - Add ability to cancel a pending operation
+ * - Prevent insertion of an asynchronous op if there are pending
synchronous ops
+ * - Interrupt transfers probably need a smarter control interface
+ * - Isochronous support
+ */
+
+/*
+ * Pipe for a generic endpoint. There can be up to two of these per
ugen_endpoint; one IN, one OUT
+ */
+struct ugen_endpoint_pipe {
+ usb_endpoint_descriptor_t *uep_edesc;
+ struct usbd_pipe *uep_pipe;
+};
+/*
+ * Generic USB Driver Endpoint. This can represent a pair of IN/OUT endpoints
that have the same
+ * address, and thus are accessed using the same ugen device node.
+ */
struct ugen_endpoint {
struct ugen_softc *sc;
- usb_endpoint_descriptor_t *edesc;
+ TAILQ_HEAD(, ugen_pending_op) ep_op_pending; /* Async ops pending on
this EP */
+ TAILQ_HEAD(, ugen_pending_op) ep_op_completed; /* List of completed
ops for this EP */
+ TAILQ_HEAD(, ugen_pending_op) ep_op_free; /* Operations available
for allocation */
+ struct selinfo rsel;
struct usbd_interface *iface;
+
int state;
-#define UGEN_ASLP 0x02 /* waiting for data */
#define UGEN_SHORT_OK 0x04 /* short xfers are OK */
- struct usbd_pipe *pipeh;
- struct clist q;
- struct selinfo rsel;
- u_char *ibuf; /* start of buffer (circular for isoc) */
- u_char *fill; /* location for input (isoc) */
- u_char *limit; /* end of circular buffer (isoc) */
- u_char *cur; /* current read location (isoc) */
+
u_int32_t timeout;
- struct isoreq {
- struct ugen_endpoint *sce;
- struct usbd_xfer *xfer;
- void *dmabuf;
- u_int16_t sizes[UGEN_NISORFRMS];
- } isoreqs[UGEN_NISOREQS];
+
+ unsigned int pending; /* Number of operations
in-flight (read/write) */
+ unsigned int completed; /* Number of operations
that are pending and completed */
+ unsigned int free; /* Number of operations
that are available for use */
+ unsigned int total; /* Total operations
allocated */
+
+ unsigned int next_token; /* The next transfer
token to be handed out for async ops. */
+
+ struct mutex ep_mtx; /* Mutex for updating
state of the endpoint */
+ struct ugen_endpoint_pipe ue_pipes[2]; /* Can have up to two pipes --
IN and OUT */
+#define OUT 0
+#define IN 1
};
struct ugen_softc {
struct device sc_dev; /* base device */
struct usbd_device *sc_udev;
-
char sc_is_open[USB_MAX_ENDPOINTS];
- struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS][2];
-#define OUT 0
-#define IN 1
-
+ struct ugen_endpoint sc_endpoints[USB_MAX_ENDPOINTS];
int sc_refcnt;
u_char sc_secondary;
};
-void ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status);
-void ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr, usbd_status status);
int ugen_do_read(struct ugen_softc *, int, struct uio *, int);
int ugen_do_write(struct ugen_softc *, int, struct uio *, int);
int ugen_do_ioctl(struct ugen_softc *, int, u_long, caddr_t, int,
@@ -113,6 +151,8 @@ int ugen_do_close(struct ugen_softc *, i
int ugen_set_config(struct ugen_softc *sc, int configno);
int ugen_set_interface(struct ugen_softc *, int, int);
int ugen_get_alt_index(struct ugen_softc *sc, int ifaceidx);
+void ugen_op_complete_async(struct usbd_xfer *xfer, void *priv, usbd_status
status);
+int ugen_op_free(struct ugen_endpoint *ep, struct ugen_pending_op *op);
#define UGENUNIT(n) ((minor(n) >> 4) & 0xf)
#define UGENENDPOINT(n) (minor(n) & 0xf)
@@ -186,11 +226,12 @@ ugen_set_config(struct ugen_softc *sc, i
struct usbd_interface *iface;
usb_endpoint_descriptor_t *ed;
struct ugen_endpoint *sce;
+ struct ugen_endpoint_pipe *sce_pipe;
int ifaceno, endptno, endpt;
int err, dir;
DPRINTFN(1,("ugen_set_config: %s to configno %d, sc=%p\n",
- sc->sc_dev.dv_xname, configno, sc));
+ sc->sc_dev.dv_xname, configno, sc));
/*
* We start at 1, not 0, because we don't care whether the
@@ -218,7 +259,7 @@ ugen_set_config(struct ugen_softc *sc, i
}
}
- memset(sc->sc_endpoints, 0, sizeof sc->sc_endpoints);
+ memset(sc->sc_endpoints, 0, sizeof(sc->sc_endpoints));
for (ifaceno = 0; ifaceno < cdesc->bNumInterface; ifaceno++) {
DPRINTFN(1,("ugen_set_config: ifaceno %d\n", ifaceno));
if (usbd_iface_claimed(sc->sc_udev, ifaceno)) {
@@ -234,19 +275,515 @@ ugen_set_config(struct ugen_softc *sc, i
ed = usbd_interface2endpoint_descriptor(iface,endptno);
endpt = ed->bEndpointAddress;
dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
- sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
+ sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)];
+ sce_pipe = &sce->ue_pipes[dir];
+
DPRINTFN(1,("ugen_set_config: endptno %d, endpt=0x%02x"
- "(%d,%d), sce=%p\n",
- endptno, endpt, UE_GET_ADDR(endpt),
- UE_GET_DIR(endpt), sce));
+ "(%d,%s), sce=%p\n",
+ endptno, endpt, UE_GET_ADDR(endpt),
+ UE_GET_DIR(endpt) == UE_DIR_IN ?
+ "IN" : "OUT", sce));
+
+ sce_pipe->uep_edesc = ed;
sce->sc = sc;
- sce->edesc = ed;
sce->iface = iface;
}
}
+
return (0);
}
+/* Prepare an operation for the endpoint in question */
+int
+ugen_op_alloc(struct ugen_softc *sc,
+ struct ugen_endpoint *ep,
+ struct ugen_endpoint_pipe *pipe,
+ struct ugen_pending_op **pop,
+ u_int32_t buf_len)
+{
+ int err = 0;
+
+ struct ugen_pending_op *op = NULL;
+
+ /* Try to reuse a buffer if we can */
+ mtx_enter(&ep->ep_mtx);
+ if (ep->free != 0) {
+ op = TAILQ_FIRST(&ep->ep_op_free);
+ TAILQ_REMOVE(&ep->ep_op_free, op, upo_node);
+ ep->free--;
+ } else {
+ if (ep->total >= UGEN_MAX_OPS) {
+ /* Abort -- we can't allocate any more operations */
+ err = EBUSY;
+ mtx_leave(&ep->ep_mtx);
+ goto done;
+ }
+ /* Otherwise, increment total to reserve a spot for ourselves */
+ ep->total++;
+ }
+ mtx_leave(&ep->ep_mtx);
+
+ if (NULL == op) {
+ op = malloc(sizeof(struct ugen_pending_op), M_USBDEV,
+ M_WAITOK | M_CANFAIL);
+ if (NULL == op) {
+ err = ENOMEM;
+ goto done;
+ }
+
+ op->upo_xfer = usbd_alloc_xfer(sc->sc_udev);
+ if (NULL == op->upo_xfer) {
+ err = ENOMEM;
+ goto done;
+ }
+ }
+
+ op->upo_sc = sc;
+ op->upo_ep = ep;
+ op->upo_pipe = pipe;
+ op->upo_flags = 0;
+
+ /* Allocate the DMA buffer for this transfer */
+ op->upo_dma_buf = usbd_alloc_buffer(op->upo_xfer, buf_len);
+ if (NULL == op->upo_dma_buf) {
+ err = ENOMEM;
+ goto done;
+ }
+
+ op->upo_buffer_len = buf_len;
+
+ *pop = op;
+
+done:
+ if (0 != err) {
+ if (NULL != op) {
+ (void)ugen_op_free(ep, op);
+ op = NULL;
+ }
+ }
+ return err;
+}
+
+int
+ugen_op_free(struct ugen_endpoint *ep, struct ugen_pending_op *op)
+{
+ int err = 0;
+
+ if (op->upo_status == USBD_IN_PROGRESS) {
+ err = EBUSY;
+ goto done;
+ }
+
+ /* Free the underlying DMA-able buffer */
+ usbd_free_buffer(op->upo_xfer);
+
+ mtx_enter(&ep->ep_mtx);
+ TAILQ_INSERT_HEAD(&ep->ep_op_free, op, upo_node);
+ ep->free++;
+ mtx_leave(&ep->ep_mtx);
+
+done:
+ return err;
+}
+
+int
+ugen_op_cancel(struct ugen_pending_op *op)
+{
+ int err = 0;
+
+ /* TODO: Expose the Pipe's abort method ? */
+
+ return err;
+}
+
+/* Completion callback for the ugen asynchronous driver calls */
+void
+ugen_op_complete_async(struct usbd_xfer *xfer, void *priv, usbd_status status)
+{
+ struct ugen_pending_op *op = priv;
+ struct ugen_endpoint *ep = NULL;
+
+ ep = op->upo_ep;
+
+ op->upo_status = status;
+
+ if (status != USBD_NORMAL_COMPLETION) {
+ op->upo_transfer_len = 0;
+ if (status == USBD_STALLED) {
+ /* Clear the stall and try again */
+ usbd_clear_endpoint_stall_async(op->upo_pipe->uep_pipe);
+ return;
+ }
+ } else {
+ usbd_get_xfer_status(xfer, NULL, NULL,
+ &op->upo_transfer_len, NULL);
+ }
+
+ /* Move the operation to the completion queue */
+ mtx_enter(&ep->ep_mtx);
+ TAILQ_REMOVE(&ep->ep_op_pending, op, upo_node);
+ TAILQ_INSERT_TAIL(&ep->ep_op_completed, op, upo_node);
+ ep->pending--;
+ ep->completed++;
+ mtx_leave(&ep->ep_mtx);
+
+ UPRINTF(EPDEV(ep), "Ep %u Completed %d byte transfer. Status: %s",
+ UE_GET_ADDR(op->upo_aop.uao_ep), op->upo_transfer_len,
+ usbd_errstr(status));
+
+ selwakeup(&ep->rsel);
+}
+
+/* Submit an asynchronous operation that has been populated */
+int
+ugen_op_submit_async(struct ugen_endpoint *ep, struct ugen_pending_op *op)
+{
+ int err = 0;
+
+ usbd_status uerr;
+
+ op->upo_status = uerr = usbd_transfer(op->upo_xfer);
+ if (uerr != USBD_IN_PROGRESS) {
+ UPRINTF(EPDEV(ep), "Failed to initiate transfer: %s",
+ usbd_errstr(uerr));
+ err = EINVAL;
+ goto done;
+ }
+
+ mtx_enter(&ep->ep_mtx);
+ ep->pending++;
+ TAILQ_INSERT_TAIL(&ep->ep_op_pending, op, upo_node);
+ mtx_leave(&ep->ep_mtx);
+
+done:
+ return err;
+}
+
+/* Take a USB async op ioctl and set up the operation and submit it for a bulk
+ * transfer
+ */
+int
+ugen_op_submit_bulk(struct ugen_softc *sc,
+ unsigned endpt,
+ struct usb_async_op *uaop)
+{
+ int err = 0, dir;
+
+ struct ugen_pending_op *op = NULL;
+ struct ugen_endpoint *ep = NULL;
+ struct ugen_endpoint_pipe *ep_pipe = NULL;
+
+ if ((uaop->uao_buf == NULL) ^ (uaop->uao_buf_len == 0)) {
+ return EINVAL;
+ }
+
+ if (UE_GET_ADDR(uaop->uao_ep) != endpt) {
+ return EINVAL;
+ }
+
+ if (usbd_is_dying(sc->sc_udev)) {
+ return EIO;
+ }
+
+ UPRINTF(SCDEV(sc), "ugen_op_submit_bulk: ep=%u dir=%s length=%u",
+ UE_GET_ADDR(uaop->uao_ep),
+ UE_GET_DIR(uaop->uao_ep) == UE_DIR_IN ? "IN" : "OUT",
+ uaop->uao_buf_len);
+
+ dir = UE_GET_DIR(uaop->uao_ep) == UE_DIR_IN ? IN : OUT;
+ ep = &sc->sc_endpoints[endpt];
+ ep_pipe = &ep->ue_pipes[dir];
+
+ if (NULL == ep_pipe->uep_edesc) {
+ err = ENXIO;
+ goto done;
+ }
+
+ if ((err = ugen_op_alloc(sc, ep, ep_pipe, &op, uaop->uao_buf_len))) {
+ goto done;
+ }
+
+ memcpy(&op->upo_aop, uaop, sizeof(struct usb_async_op));
+
+ if (UE_GET_DIR(ep_pipe->uep_edesc->bEndpointAddress) == UE_DIR_OUT) {
+ /* Copy the buffer contents from userspace */
+ if ((err = copyin(uaop->uao_buf, op->upo_dma_buf,
+ uaop->uao_buf_len))) {
+ goto done;
+ }
+ }
+
+ usbd_setup_xfer(op->upo_xfer, ep_pipe->uep_pipe,
+ op, op->upo_dma_buf, uaop->uao_buf_len,
+ USBD_NO_COPY | (uaop->uao_flags & USB_OP_FLAG_ALLOW_SHORT ?
+ USBD_SHORT_XFER_OK : 0),
+ uaop->uao_timeout_ms,
+ ugen_op_complete_async);
+
+ op->upo_buffer_len = uaop->uao_buf_len;
+
+ /* Submit the operation */
+ err = ugen_op_submit_async(ep, op);
+
+done:
+ if (err) {
+ if (op) {
+ ugen_op_free(ep, op);
+ op = NULL;
+ }
+ }
+ return err;
+}
+
+int
+ugen_op_submit_control(struct ugen_softc *sc,
+ struct usb_async_op *uaop)
+{
+ int err = 0;
+ unsigned len = 0;
+ struct ugen_pending_op *op = NULL;
+ struct ugen_endpoint *ep = NULL;
+ u_int32_t transfer_sz = 0;
+ usb_device_request_t req;
+
+ if (usbd_is_dying(sc->sc_udev)) {
+ return EIO;
+ }
+
+ ep = &sc->sc_endpoints[USB_CONTROL_ENDPOINT];
+
+ if (uaop->uao_buf_len < sizeof(usb_device_request_t)) {
+ err = EINVAL;
+ goto done;
+ }
+
+ transfer_sz = uaop->uao_buf_len - sizeof(usb_device_request_t);
+
+ /* Copy over the setup packet and sanitize it */
+ copyin(uaop->uao_buf, &req, sizeof(usb_device_request_t));
+
+ len = UGETW(req.wLength);
+
+ if (transfer_sz != len) {
+ err = EINVAL;
+ goto done;
+ }
+
+ /* Prevent anything insane from happening */
+ if (len > 32767)
+ return EINVAL;
+
+ if ((req.bmRequestType == UT_WRITE_DEVICE &&
+ req.bRequest == UR_SET_ADDRESS) ||
+ (req.bmRequestType == UT_WRITE_DEVICE &&
+ req.bRequest == UR_SET_CONFIG) ||
+ (req.bmRequestType == UT_WRITE_INTERFACE &&
+ req.bRequest == UR_SET_INTERFACE))
+ return EINVAL;
+
+ /* Allocate an appropriately sized transfer */
+ if ((err = ugen_op_alloc(sc, ep, NULL, &op, len))) {
+ goto done;
+ }
+
+ op->upo_buffer_len = transfer_sz;
+ op->upo_transfer_len = 0;
+ op->upo_flags = UE_DIR_IN;
+ memcpy(&op->upo_aop, uaop, sizeof(struct usb_async_op));
+
+ /* If this is a control write, copy out the data from userspace */
+ if (!(req.bmRequestType & UT_READ)) {
+ op->upo_flags = UE_DIR_OUT;
+ copyin(uaop->uao_buf + sizeof(usb_device_request_t),
+ op->upo_dma_buf, len);
+ }
+
+ usbd_setup_default_xfer(op->upo_xfer, sc->sc_udev,
+ op, uaop->uao_timeout_ms, &req,
+ op->upo_dma_buf, len,
+ USBD_NO_COPY |
+ ((uaop->uao_flags & USB_OP_FLAG_ALLOW_SHORT) ?
+ USBD_SHORT_XFER_OK : 0),
+ ugen_op_complete_async);
+
+
+ /* Submit the transfer */
+ err = ugen_op_submit_async(ep, op);
+
+done:
+ if (err) {
+ if (op) {
+ ugen_op_free(ep, op);
+ op = NULL;
+ }
+ }
+ return err;
+}
+
+int
+ugen_op_submit(struct ugen_softc *sc,
+ unsigned endpt,
+ struct usb_async_op *uaop)
+{
+ switch (uaop->uao_type) {
+ case USB_OP_BULK_TRANSFER:
+ case USB_OP_INTERRUPT_TRANSFER:
+ if (endpt == USB_CONTROL_ENDPOINT) {
+ return EINVAL;
+ }
+ return ugen_op_submit_bulk(sc, endpt, uaop);
+ case USB_OP_CONTROL_TRANSFER:
+ if (endpt != USB_CONTROL_ENDPOINT) {
+ return EINVAL;
+ }
+ return ugen_op_submit_control(sc, uaop);
+ default:
+ return EINVAL;
+ }
+}
+
+void
+ugen_op_cleanup(struct ugen_endpoint *ep,
+ struct ugen_pending_op *op)
+{
+ usbd_free_xfer(op->upo_xfer);
+ free(op, M_USBDEV, sizeof(struct ugen_pending_op));
+}
+
+int
+ugen_op_reap_all(struct ugen_softc *sc,
+ struct ugen_endpoint *ep)
+{
+ int err = 0;
+
+ struct ugen_pending_op *_tmp, *cur;
+
+ TAILQ_FOREACH_SAFE(cur, &ep->ep_op_pending, upo_node, _tmp) {
+ TAILQ_REMOVE(&ep->ep_op_pending, cur, upo_node);
+ (void)ugen_op_cleanup(ep, cur);
+ ep->total--;
+ }
+
+ TAILQ_FOREACH_SAFE(cur, &ep->ep_op_completed, upo_node, _tmp) {
+ TAILQ_REMOVE(&ep->ep_op_completed, cur, upo_node);
+ (void)ugen_op_cleanup(ep, cur);
+ ep->total--;
+ }
+
+ TAILQ_FOREACH_SAFE(cur, &ep->ep_op_free, upo_node, _tmp) {
+ TAILQ_REMOVE(&ep->ep_op_free, cur, upo_node);
+ (void)ugen_op_cleanup(ep, cur);
+ ep->total--;
+ }
+
+ if (ep->total) {
+ UPRINTF(SCDEV(sc), "Endpoint still has %u operations in flight",
+ ep->total);
+ }
+
+ return err;
+}
+
+/* Called by a userspace process to clean up the operation and fill in the
given
+ * usb_async_op with results.
+ */
+int
+ugen_op_finish(struct ugen_softc *sc,
+ unsigned endpt,
+ struct usb_async_op *aop_out)
+{
+ int err = 0;
+
+ struct ugen_pending_op *op = NULL;
+ struct ugen_endpoint *ep;
+ struct ugen_endpoint_pipe *ep_pipe;
+ struct usb_async_op *aop;
+ u_int32_t bytes_xfer = 0;
+
+ if (usbd_is_dying(sc->sc_udev)) {
+ err = EIO;
+ goto done;
+ }
+
+ ep = &sc->sc_endpoints[endpt];
+
+ mtx_enter(&ep->ep_mtx);
+ if (!TAILQ_EMPTY(&ep->ep_op_completed)) {
+ op = TAILQ_FIRST(&ep->ep_op_completed);
+ TAILQ_REMOVE(&ep->ep_op_completed, op, upo_node);
+ ep->completed--;
+ }
+ mtx_leave(&ep->ep_mtx);
+
+ if (NULL == op) {
+ err = EAGAIN;
+ goto done;
+ }
+
+ ep = op->upo_ep;
+ aop = &op->upo_aop;
+
+ bytes_xfer = ulmin(aop->uao_buf_len, op->upo_transfer_len);
+
+ if (op->upo_status == USBD_NORMAL_COMPLETION) {
+ switch (op->upo_aop.uao_type) {
+ case USB_OP_BULK_TRANSFER:
+ case USB_OP_INTERRUPT_TRANSFER:
+ ep_pipe = op->upo_pipe;
+ if (UE_GET_DIR(ep_pipe->uep_edesc->bEndpointAddress)
+ == UE_DIR_IN) {
+ /* Copy the output data we can copy */
+ if ((err = copyout(op->upo_dma_buf,
+ aop->uao_buf, bytes_xfer))) {
+ goto done;
+ }
+
+ }
+ case USB_OP_CONTROL_TRANSFER:
+ if (op->upo_flags & UE_DIR_IN) {
+ if ((err = copyout(op->upo_dma_buf,
+ aop->uao_buf + sizeof(usb_device_request_t),
+ bytes_xfer))) {
+ goto done;
+ }
+ }
+ break;
+ default:
+ err = EINVAL;
+ goto done;
+ }
+
+ aop->uao_transfer_size = op->upo_transfer_len;
+ aop->uao_status = USB_OP_STATUS_COMPLETED;
+ } else {
+ aop->uao_status = USB_OP_STATUS_ERROR;
+ }
+
+ /* Copy out the updated state to userspace */
+ memcpy(aop_out, aop, sizeof(struct usb_async_op));
+
+done:
+ if (op) {
+ (void)ugen_op_free(ep, op);
+ }
+
+ return err;
+}
+
+void
+ugen_endpoint_init_alloc(struct ugen_endpoint *sce)
+{
+ /* Initialize the allocator state */
+ sce->free = 0;
+ sce->pending = 0;
+ sce->completed = 0;
+ sce->total = 0;
+ TAILQ_INIT(&sce->ep_op_completed);
+ TAILQ_INIT(&sce->ep_op_pending);
+ TAILQ_INIT(&sce->ep_op_free);
+ mtx_init(&sce->ep_mtx, IPL_USB);
+}
+
int
ugenopen(dev_t dev, int flag, int mode, struct proc *p)
{
@@ -255,11 +792,9 @@ ugenopen(dev_t dev, int flag, int mode,
int endpt = UGENENDPOINT(dev);
usb_endpoint_descriptor_t *edesc;
struct ugen_endpoint *sce;
- int dir, isize;
+ struct ugen_endpoint_pipe *ep_pipe;
+ int dir;
usbd_status err;
- struct usbd_xfer *xfer;
- void *buf;
- int i, j;
if (unit >= ugen_cd.cd_ndevs)
return (ENXIO);
@@ -267,9 +802,6 @@ ugenopen(dev_t dev, int flag, int mode,
if (sc == NULL)
return (ENXIO);
- DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n",
- flag, mode, unit, endpt));
-
if (sc == NULL || usbd_is_dying(sc->sc_udev))
return (ENXIO);
@@ -278,113 +810,57 @@ ugenopen(dev_t dev, int flag, int mode,
if (endpt == USB_CONTROL_ENDPOINT) {
sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1;
+
+ UPRINTF(SCDEV(sc), "ugenopen: setting up control endpoint");
+ sce = &sc->sc_endpoints[USB_CONTROL_ENDPOINT];
+ sce->sc = sc;
+ sce->iface = NULL;
+ ugen_endpoint_init_alloc(sce);
+
return (0);
}
/* Make sure there are pipes for all directions. */
for (dir = OUT; dir <= IN; dir++) {
if (flag & (dir == OUT ? FWRITE : FREAD)) {
- sce = &sc->sc_endpoints[endpt][dir];
- if (sce == 0 || sce->edesc == 0)
+ sce = &sc->sc_endpoints[endpt];
+ ep_pipe = &sce->ue_pipes[dir];
+ if (ep_pipe->uep_edesc == 0)
return (ENXIO);
}
}
- /* Actually open the pipes. */
- /* XXX Should back out properly if it fails. */
+ /* Open the pipe(s) for this endpoint. */
+ sce = &sc->sc_endpoints[endpt];
+ sce->state = 0;
+ sce->timeout = USBD_NO_TIMEOUT;
+ ugen_endpoint_init_alloc(sce);
+
for (dir = OUT; dir <= IN; dir++) {
if (!(flag & (dir == OUT ? FWRITE : FREAD)))
continue;
- sce = &sc->sc_endpoints[endpt][dir];
- sce->state = 0;
- sce->timeout = USBD_NO_TIMEOUT;
- DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n",
- sc, endpt, dir, sce));
- edesc = sce->edesc;
+ ep_pipe = &sce->ue_pipes[dir];
+ DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%s, sce=%p\n",
+ sc, endpt, dir == IN ? "IN" : "OUT", sce));
+ edesc = ep_pipe->uep_edesc;
+
switch (edesc->bmAttributes & UE_XFERTYPE) {
case UE_INTERRUPT:
- if (dir == OUT) {
- err = usbd_open_pipe(sce->iface,
- edesc->bEndpointAddress, 0, &sce->pipeh);
- if (err)
- return (EIO);
- break;
- }
- isize = UGETW(edesc->wMaxPacketSize);
- if (isize == 0) /* shouldn't happen */
- return (EINVAL);
- sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK);
- DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n",
- endpt, isize));
- clalloc(&sce->q, UGEN_IBSIZE, 0);
- err = usbd_open_pipe_intr(sce->iface,
- edesc->bEndpointAddress,
- USBD_SHORT_XFER_OK, &sce->pipeh, sce,
- sce->ibuf, isize, ugenintr,
- USBD_DEFAULT_INTERVAL);
- if (err) {
- free(sce->ibuf, M_USBDEV, 0);
- clfree(&sce->q);
- return (EIO);
- }
- DPRINTFN(5, ("ugenopen: interrupt open done\n"));
- break;
case UE_BULK:
err = usbd_open_pipe(sce->iface,
- edesc->bEndpointAddress, 0, &sce->pipeh);
+ edesc->bEndpointAddress, 0, &ep_pipe->uep_pipe);
if (err)
return (EIO);
break;
case UE_ISOCHRONOUS:
- if (dir == OUT)
- return (EINVAL);
- isize = UGETW(edesc->wMaxPacketSize);
- if (isize == 0) /* shouldn't happen */
- return (EINVAL);
- sce->ibuf = mallocarray(isize, UGEN_NISOFRAMES,
- M_USBDEV, M_WAITOK);
- sce->cur = sce->fill = sce->ibuf;
- sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES;
- DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n",
- endpt, isize));
- err = usbd_open_pipe(sce->iface,
- edesc->bEndpointAddress, 0, &sce->pipeh);
- if (err) {
- free(sce->ibuf, M_USBDEV, 0);
- return (EIO);
- }
- for(i = 0; i < UGEN_NISOREQS; ++i) {
- sce->isoreqs[i].sce = sce;
- xfer = usbd_alloc_xfer(sc->sc_udev);
- if (xfer == 0)
- goto bad;
- sce->isoreqs[i].xfer = xfer;
- buf = usbd_alloc_buffer
- (xfer, isize * UGEN_NISORFRMS);
- if (buf == 0) {
- i++;
- goto bad;
- }
- sce->isoreqs[i].dmabuf = buf;
- for(j = 0; j < UGEN_NISORFRMS; ++j)
- sce->isoreqs[i].sizes[j] = isize;
- usbd_setup_isoc_xfer(xfer, sce->pipeh,
- &sce->isoreqs[i], sce->isoreqs[i].sizes,
- UGEN_NISORFRMS, USBD_NO_COPY |
- USBD_SHORT_XFER_OK, ugen_isoc_rintr);
- (void)usbd_transfer(xfer);
- }
- DPRINTFN(5, ("ugenopen: isoc open done\n"));
- break;
- bad:
- while (--i >= 0) /* implicit buffer free */
- usbd_free_xfer(sce->isoreqs[i].xfer);
- return (ENOMEM);
+ DPRINTFN(5, ("ugenopen: isoc not supported\n"));
+ return ENXIO;
case UE_CONTROL:
sce->timeout = USBD_DEFAULT_TIMEOUT;
return (EINVAL);
}
}
+
sc->sc_is_open[endpt] = 1;
return (0);
}
@@ -414,7 +890,8 @@ int
ugen_do_close(struct ugen_softc *sc, int endpt, int flag)
{
struct ugen_endpoint *sce;
- int dir, i;
+ struct ugen_endpoint_pipe *ep_pipe;
+ int dir;
#ifdef DIAGNOSTIC
if (!sc->sc_is_open[endpt]) {
@@ -423,42 +900,31 @@ ugen_do_close(struct ugen_softc *sc, int
}
#endif
+ sce = &sc->sc_endpoints[endpt];
if (endpt == USB_CONTROL_ENDPOINT) {
- DPRINTFN(5, ("ugenclose: close control\n"));
- sc->sc_is_open[endpt] = 0;
- return (0);
- }
-
- for (dir = OUT; dir <= IN; dir++) {
- if (!(flag & (dir == OUT ? FWRITE : FREAD)))
- continue;
- sce = &sc->sc_endpoints[endpt][dir];
- if (sce == NULL || sce->pipeh == NULL)
- continue;
- DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
- endpt, dir, sce));
+ DPRINTFN(5, ("ugenclose: close control EP\n"));
+ /* TODO: force outstanding control operations to be cancelled */
+ } else {
+ for (dir = OUT; dir <= IN; dir++) {
+ ep_pipe = &sce->ue_pipes[dir];
- usbd_close_pipe(sce->pipeh);
- sce->pipeh = NULL;
+ if (!(flag & (dir == OUT ? FWRITE : FREAD)))
+ continue;
- switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
- case UE_INTERRUPT:
- ndflush(&sce->q, sce->q.c_cc);
- clfree(&sce->q);
- break;
- case UE_ISOCHRONOUS:
- for (i = 0; i < UGEN_NISOREQS; ++i)
- usbd_free_xfer(sce->isoreqs[i].xfer);
+ if (ep_pipe->uep_pipe == NULL)
+ continue;
- default:
- break;
- }
+ DPRINTFN(5, ("ugenclose: endpt=%d dir=%d sce=%p\n",
+ endpt, dir, sce));
- if (sce->ibuf != NULL) {
- free(sce->ibuf, M_USBDEV, 0);
- sce->ibuf = NULL;
+ usbd_close_pipe(ep_pipe->uep_pipe);
+ ep_pipe->uep_pipe = NULL;
}
}
+
+ /* Release the operation tracking resources */
+ (void)ugen_op_reap_all(sc, sce);
+
sc->sc_is_open[endpt] = 0;
return (0);
@@ -467,15 +933,14 @@ ugen_do_close(struct ugen_softc *sc, int
int
ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
{
- struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
+ struct ugen_endpoint *sce = &sc->sc_endpoints[endpt];
+ struct ugen_endpoint_pipe *ep_pipe;
u_int32_t tn;
size_t n;
char buf[UGEN_BBSIZE];
struct usbd_xfer *xfer;
usbd_status err;
- int s;
int flags, error = 0;
- u_char buffer[UGEN_CHUNK];
DPRINTFN(5, ("%s: ugenread: %d\n", sc->sc_dev.dv_xname, endpt));
@@ -485,60 +950,30 @@ ugen_do_read(struct ugen_softc *sc, int
if (endpt == USB_CONTROL_ENDPOINT)
return (ENODEV);
+ mtx_enter(&sce->ep_mtx);
+ if (sce->pending != 0) {
+ mtx_leave(&sce->ep_mtx);
+ return EBUSY;
+ }
+ mtx_leave(&sce->ep_mtx);
+
+ ep_pipe = &sce->ue_pipes[IN];
+
#ifdef DIAGNOSTIC
- if (sce->edesc == NULL) {
+ if (ep_pipe->uep_edesc == NULL) {
printf("ugenread: no edesc\n");
return (EIO);
}
- if (sce->pipeh == NULL) {
+ if (ep_pipe->uep_pipe == NULL) {
printf("ugenread: no pipe\n");
return (EIO);
}
#endif
- switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
+ switch (ep_pipe->uep_edesc->bmAttributes & UE_XFERTYPE) {
case UE_INTERRUPT:
- /* Block until activity occurred. */
- s = splusb();
- while (sce->q.c_cc == 0) {
- if (flag & IO_NDELAY) {
- splx(s);
- return (EWOULDBLOCK);
- }
- sce->state |= UGEN_ASLP;
- DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
- error = tsleep(sce, PZERO | PCATCH, "ugenri",
- (sce->timeout * hz) / 1000);
- sce->state &= ~UGEN_ASLP;
- DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
- if (usbd_is_dying(sc->sc_udev))
- error = EIO;
- if (error == EWOULDBLOCK) { /* timeout, return 0 */
- error = 0;
- break;
- }
- if (error)
- break;
- }
- splx(s);
-
- /* Transfer as many chunks as possible. */
- while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
- n = ulmin(sce->q.c_cc, uio->uio_resid);
- if (n > sizeof(buffer))
- n = sizeof(buffer);
-
- /* Remove a small chunk from the input queue. */
- q_to_b(&sce->q, buffer, n);
- DPRINTFN(5, ("ugenread: got %zu chars\n", n));
-
- /* Copy the data to the user process. */
- error = uiomove(buffer, n, uio);
- if (error)
- break;
- }
- break;
case UE_BULK:
+ /* XXX: Allocate a pending transfer */
xfer = usbd_alloc_xfer(sc->sc_udev);
if (xfer == 0)
return (ENOMEM);
@@ -549,11 +984,11 @@ ugen_do_read(struct ugen_softc *sc, int
flags |= USBD_CATCH;
while ((n = ulmin(UGEN_BBSIZE, uio->uio_resid)) != 0) {
DPRINTFN(1, ("ugenread: start transfer %zu bytes\n",n));
- usbd_setup_xfer(xfer, sce->pipeh, 0, buf, n,
+ usbd_setup_xfer(xfer, ep_pipe->uep_pipe, 0, buf, n,
flags, sce->timeout, NULL);
err = usbd_transfer(xfer);
if (err) {
- usbd_clear_endpoint_stall(sce->pipeh);
+ usbd_clear_endpoint_stall(ep_pipe->uep_pipe);
if (err == USBD_INTERRUPTED)
error = EINTR;
else if (err == USBD_TIMEOUT)
@@ -570,48 +1005,6 @@ ugen_do_read(struct ugen_softc *sc, int
}
usbd_free_xfer(xfer);
break;
- case UE_ISOCHRONOUS:
- s = splusb();
- while (sce->cur == sce->fill) {
- if (flag & IO_NDELAY) {
- splx(s);
- return (EWOULDBLOCK);
- }
- sce->state |= UGEN_ASLP;
- DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
- error = tsleep(sce, PZERO | PCATCH, "ugenri",
- (sce->timeout * hz) / 1000);
- sce->state &= ~UGEN_ASLP;
- DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
- if (usbd_is_dying(sc->sc_udev))
- error = EIO;
- if (error == EWOULDBLOCK) { /* timeout, return 0 */
- error = 0;
- break;
- }
- if (error)
- break;
- }
-
- while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
- if(sce->fill > sce->cur)
- n = ulmin(sce->fill - sce->cur, uio->uio_resid);
- else
- n = ulmin(sce->limit - sce->cur,
uio->uio_resid);
-
- DPRINTFN(5, ("ugenread: isoc got %zu chars\n", n));
-
- /* Copy the data to the user process. */
- error = uiomove(sce->cur, n, uio);
- if (error)
- break;
- sce->cur += n;
- if(sce->cur >= sce->limit)
- sce->cur = sce->ibuf;
- }
- splx(s);
- break;
-
default:
return (ENXIO);
@@ -638,7 +1031,8 @@ ugenread(dev_t dev, struct uio *uio, int
int
ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
{
- struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
+ struct ugen_endpoint *sce = &sc->sc_endpoints[endpt];
+ struct ugen_endpoint_pipe *ep_pipe = &sce->ue_pipes[OUT];
size_t n;
int flags, error = 0;
char buf[UGEN_BBSIZE];
@@ -653,12 +1047,18 @@ ugen_do_write(struct ugen_softc *sc, int
if (endpt == USB_CONTROL_ENDPOINT)
return (ENODEV);
+ mtx_enter(&sce->ep_mtx);
+ if (sce->pending != 0) {
+ mtx_leave(&sce->ep_mtx);
+ return EBUSY;
+ }
+ mtx_leave(&sce->ep_mtx);
#ifdef DIAGNOSTIC
- if (sce->edesc == NULL) {
+ if (ep_pipe->uep_edesc == NULL) {
printf("ugenwrite: no edesc\n");
return (EIO);
}
- if (sce->pipeh == NULL) {
+ if (ep_pipe->uep_pipe == NULL) {
printf("ugenwrite: no pipe\n");
return (EIO);
}
@@ -667,7 +1067,8 @@ ugen_do_write(struct ugen_softc *sc, int
if (sce->timeout == 0)
flags |= USBD_CATCH;
- switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
+ switch (ep_pipe->uep_edesc->bmAttributes & UE_XFERTYPE) {
+ case UE_INTERRUPT:
case UE_BULK:
xfer = usbd_alloc_xfer(sc->sc_udev);
if (xfer == 0)
@@ -677,37 +1078,11 @@ ugen_do_write(struct ugen_softc *sc, int
if (error)
break;
DPRINTFN(1, ("ugenwrite: transfer %zu bytes\n", n));
- usbd_setup_xfer(xfer, sce->pipeh, 0, buf, n,
+ usbd_setup_xfer(xfer, ep_pipe->uep_pipe, 0, buf, n,
flags, sce->timeout, NULL);
err = usbd_transfer(xfer);
if (err) {
- usbd_clear_endpoint_stall(sce->pipeh);
- if (err == USBD_INTERRUPTED)
- error = EINTR;
- else if (err == USBD_TIMEOUT)
- error = ETIMEDOUT;
- else
- error = EIO;
- break;
- }
- }
- usbd_free_xfer(xfer);
- break;
- case UE_INTERRUPT:
- xfer = usbd_alloc_xfer(sc->sc_udev);
- if (xfer == 0)
- return (EIO);
- while ((n = ulmin(UGETW(sce->edesc->wMaxPacketSize),
- uio->uio_resid)) != 0) {
- error = uiomove(buf, n, uio);
- if (error)
- break;
- DPRINTFN(1, ("ugenwrite: transfer %zu bytes\n", n));
- usbd_setup_xfer(xfer, sce->pipeh, 0, buf, n,
- flags, sce->timeout, NULL);
- err = usbd_transfer(xfer);
- if (err) {
- usbd_clear_endpoint_stall(sce->pipeh);
+ usbd_clear_endpoint_stall(ep_pipe->uep_pipe);
if (err == USBD_INTERRUPTED)
error = EINTR;
else if (err == USBD_TIMEOUT)
@@ -746,6 +1121,7 @@ ugen_detach(struct device *self, int fla
{
struct ugen_softc *sc = (struct ugen_softc *)self;
struct ugen_endpoint *sce;
+ struct ugen_endpoint_pipe *ep_pipe;
int i, dir, endptno;
int s, maj, mn;
@@ -753,10 +1129,11 @@ ugen_detach(struct device *self, int fla
/* Abort all pipes. Causes processes waiting for transfer to wake. */
for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
+ sce = &sc->sc_endpoints[i];
for (dir = OUT; dir <= IN; dir++) {
- sce = &sc->sc_endpoints[i][dir];
- if (sce && sce->pipeh)
- usbd_abort_pipe(sce->pipeh);
+ ep_pipe = &sce->ue_pipes[dir];
+ if (ep_pipe->uep_pipe)
+ usbd_abort_pipe(ep_pipe->uep_pipe);
}
}
@@ -764,7 +1141,7 @@ ugen_detach(struct device *self, int fla
if (--sc->sc_refcnt >= 0) {
/* Wake everyone */
for (i = 0; i < USB_MAX_ENDPOINTS; i++)
- wakeup(&sc->sc_endpoints[i][IN]);
+ wakeup(&sc->sc_endpoints[i].ue_pipes[IN]);
/* Wait for processes to go away. */
usb_detach_wait(&sc->sc_dev);
}
@@ -786,100 +1163,7 @@ ugen_detach(struct device *self, int fla
return (0);
}
-void
-ugenintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
-{
- struct ugen_endpoint *sce = addr;
- /*struct ugen_softc *sc = sce->sc;*/
- u_int32_t count;
- u_char *ibuf;
-
- if (status == USBD_CANCELLED)
- return;
-
- if (status != USBD_NORMAL_COMPLETION) {
- DPRINTF(("ugenintr: status=%d\n", status));
- if (status == USBD_STALLED)
- usbd_clear_endpoint_stall_async(sce->pipeh);
- return;
- }
-
- usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
- ibuf = sce->ibuf;
-
- DPRINTFN(5, ("ugenintr: xfer=%p status=%d count=%d\n",
- xfer, status, count));
- DPRINTFN(5, (" data = %02x %02x %02x\n",
- ibuf[0], ibuf[1], ibuf[2]));
-
- (void)b_to_q(ibuf, count, &sce->q);
-
- if (sce->state & UGEN_ASLP) {
- sce->state &= ~UGEN_ASLP;
- DPRINTFN(5, ("ugen_intr: waking %p\n", sce));
- wakeup(sce);
- }
- selwakeup(&sce->rsel);
-}
-
-void
-ugen_isoc_rintr(struct usbd_xfer *xfer, void *addr, usbd_status status)
-{
- struct isoreq *req = addr;
- struct ugen_endpoint *sce = req->sce;
- u_int32_t count, n;
- int i, isize;
-
- /* Return if we are aborting. */
- if (status == USBD_CANCELLED)
- return;
-
- usbd_get_xfer_status(xfer, NULL, NULL, &count, NULL);
- DPRINTFN(5,("%s: xfer %ld, count=%d\n", __func__, req - sce->isoreqs,
- count));
-
- /* throw away oldest input if the buffer is full */
- if(sce->fill < sce->cur && sce->cur <= sce->fill + count) {
- sce->cur += count;
- if(sce->cur >= sce->limit)
- sce->cur = sce->ibuf + (sce->limit - sce->cur);
- DPRINTFN(5, ("%s: throwing away %d bytes\n", __func__, count));
- }
-
- isize = UGETW(sce->edesc->wMaxPacketSize);
- for (i = 0; i < UGEN_NISORFRMS; i++) {
- u_int32_t actlen = req->sizes[i];
- char const *buf = (char const *)req->dmabuf + isize * i;
-
- /* copy data to buffer */
- while (actlen > 0) {
- n = min(actlen, sce->limit - sce->fill);
- memcpy(sce->fill, buf, n);
-
- buf += n;
- actlen -= n;
- sce->fill += n;
- if(sce->fill == sce->limit)
- sce->fill = sce->ibuf;
- }
-
- /* setup size for next transfer */
- req->sizes[i] = isize;
- }
-
- usbd_setup_isoc_xfer(xfer, sce->pipeh, req, req->sizes, UGEN_NISORFRMS,
- USBD_NO_COPY | USBD_SHORT_XFER_OK, ugen_isoc_rintr);
- (void)usbd_transfer(xfer);
-
- if (sce->state & UGEN_ASLP) {
- sce->state &= ~UGEN_ASLP;
- DPRINTFN(5, ("ugen_isoc_rintr: waking %p\n", sce));
- wakeup(sce);
- }
- selwakeup(&sce->rsel);
-}
-
-int
+int
ugen_set_interface(struct ugen_softc *sc, int ifaceidx, int altno)
{
struct usbd_interface *iface;
@@ -887,6 +1171,7 @@ ugen_set_interface(struct ugen_softc *sc
usb_interface_descriptor_t *id;
usb_endpoint_descriptor_t *ed;
struct ugen_endpoint *sce;
+ struct ugen_endpoint_pipe *ep_pipe;
uint8_t endptno, endpt;
int dir, err;
@@ -900,15 +1185,18 @@ ugen_set_interface(struct ugen_softc *sc
err = usbd_device2interface_handle(sc->sc_udev, ifaceidx, &iface);
if (err)
return (err);
+
id = usbd_get_interface_descriptor(iface);
+
for (endptno = 0; endptno < id->bNumEndpoints; endptno++) {
ed = usbd_interface2endpoint_descriptor(iface,endptno);
endpt = ed->bEndpointAddress;
dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
- sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
+ sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)];
sce->sc = 0;
- sce->edesc = 0;
sce->iface = 0;
+ ep_pipe = &sce->ue_pipes[dir];
+ ep_pipe->uep_edesc = 0;
}
/* Try to change setting, if this fails put back the descriptors. */
@@ -919,10 +1207,11 @@ ugen_set_interface(struct ugen_softc *sc
ed = usbd_interface2endpoint_descriptor(iface,endptno);
endpt = ed->bEndpointAddress;
dir = UE_GET_DIR(endpt) == UE_DIR_IN ? IN : OUT;
- sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)][dir];
+ sce = &sc->sc_endpoints[UE_GET_ADDR(endpt)];
sce->sc = sc;
- sce->edesc = ed;
sce->iface = iface;
+ ep_pipe = &sce->ue_pipes[dir];
+ ep_pipe->uep_edesc = ed;
}
return (err);
}
@@ -944,6 +1233,7 @@ ugen_do_ioctl(struct ugen_softc *sc, int
int flag, struct proc *p)
{
struct ugen_endpoint *sce;
+ struct ugen_endpoint_pipe *ep_pipe;
int err;
struct usbd_interface *iface;
struct usb_config_desc *cd;
@@ -967,8 +1257,9 @@ ugen_do_ioctl(struct ugen_softc *sc, int
if (endpt == USB_CONTROL_ENDPOINT)
return (EINVAL);
/* This flag only affects read */
- sce = &sc->sc_endpoints[endpt][IN];
- if (sce == NULL || sce->pipeh == NULL)
+ sce = &sc->sc_endpoints[endpt];
+ ep_pipe = &sce->ue_pipes[IN];
+ if (ep_pipe->uep_pipe == NULL)
return (EINVAL);
if (*(int *)addr)
sce->state |= UGEN_SHORT_OK;
@@ -976,21 +1267,25 @@ ugen_do_ioctl(struct ugen_softc *sc, int
sce->state &= ~UGEN_SHORT_OK;
return (0);
case USB_SET_TIMEOUT:
- sce = &sc->sc_endpoints[endpt][IN];
- if (sce == NULL)
- return (EINVAL);
- sce->timeout = *(int *)addr;
- sce = &sc->sc_endpoints[endpt][OUT];
- if (sce == NULL)
- return (EINVAL);
+ sce = &sc->sc_endpoints[endpt];
sce->timeout = *(int *)addr;
return (0);
+
+ case USB_ASYNC_SUBMIT:
+ return ugen_op_submit(sc, endpt, (struct usb_async_op *)addr);
+ case USB_ASYNC_CANCEL:
+ /* XXX: Need to expose Pipe's cancel function */
+ return EINVAL;
+ case USB_ASYNC_FINISH:
+ return ugen_op_finish(sc, endpt, (struct usb_async_op *)addr);
+
default:
break;
}
- if (endpt != USB_CONTROL_ENDPOINT)
- return (EINVAL);
+ if (endpt != USB_CONTROL_ENDPOINT) {
+ return EINVAL;
+ }
switch (cmd) {
#ifdef UGEN_DEBUG
@@ -1171,7 +1466,7 @@ ugen_do_ioctl(struct ugen_softc *sc, int
goto ret;
}
}
- sce = &sc->sc_endpoints[endpt][IN];
+ sce = &sc->sc_endpoints[endpt];
err = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
if (err) {
@@ -1220,63 +1515,52 @@ ugenioctl(dev_t dev, u_long cmd, caddr_t
}
int
+ugen_check_async_ops(struct ugen_softc *sc,
+ struct ugen_endpoint *ep,
+ int *should_selrecord)
+{
+ int revents = 0;
+
+ *should_selrecord = 0;
+
+ mtx_enter(&ep->ep_mtx);
+ if (!TAILQ_EMPTY(&ep->ep_op_completed)) {
+ revents |= POLLIN;
+ }
+
+ if (!TAILQ_EMPTY(&ep->ep_op_pending)) {
+ *should_selrecord = 1;
+ }
+ mtx_leave(&ep->ep_mtx);
+
+ return revents;
+}
+
+int
ugenpoll(dev_t dev, int events, struct proc *p)
{
struct ugen_softc *sc;
struct ugen_endpoint *sce;
int revents = 0;
int s;
+ int should_selrec = 0;
sc = ugen_cd.cd_devs[UGENUNIT(dev)];
if (usbd_is_dying(sc->sc_udev))
return (POLLERR);
- /* XXX always IN */
- sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
- if (sce == NULL)
- return (POLLERR);
-#ifdef DIAGNOSTIC
- if (!sce->edesc) {
- printf("ugenpoll: no edesc\n");
- return (POLLERR);
- }
- if (!sce->pipeh) {
- printf("ugenpoll: no pipe\n");
- return (POLLERR);
- }
-#endif
s = splusb();
- switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
- case UE_INTERRUPT:
- if (events & (POLLIN | POLLRDNORM)) {
- if (sce->q.c_cc > 0)
- revents |= events & (POLLIN | POLLRDNORM);
- else
- selrecord(p, &sce->rsel);
- }
- break;
- case UE_ISOCHRONOUS:
- if (events & (POLLIN | POLLRDNORM)) {
- if (sce->cur != sce->fill)
- revents |= events & (POLLIN | POLLRDNORM);
- else
- selrecord(p, &sce->rsel);
- }
- break;
- case UE_BULK:
- /*
- * We have no easy way of determining if a read will
- * yield any data or a write will happen.
- * Pretend they will.
- */
- revents |= events &
- (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM);
- break;
- default:
- break;
+ sce = &sc->sc_endpoints[UGENENDPOINT(dev)];
+
+ revents = ugen_check_async_ops(sc, sce, &should_selrec);
+
+ if (should_selrec && revents == 0) {
+ selrecord(p, &sce->rsel);
}
+
splx(s);
+
return (revents);
}
@@ -1297,39 +1581,40 @@ filt_ugenrdetach(struct knote *kn)
}
int
-filt_ugenread_intr(struct knote *kn, long hint)
+filt_ugen_async_read_op(struct knote *kn, long hint)
{
struct ugen_endpoint *sce = (void *)kn->kn_hook;
+ int can_read = 0;
+
+ mtx_enter(&sce->ep_mtx);
+ if (!TAILQ_EMPTY(&sce->ep_op_completed)) {
+ can_read = 1;
+ }
+ mtx_leave(&sce->ep_mtx);
- kn->kn_data = sce->q.c_cc;
- return (kn->kn_data > 0);
+ return can_read;
}
int
-filt_ugenread_isoc(struct knote *kn, long hint)
+filt_ugen_async_write_op(struct knote *kn, long hint)
{
struct ugen_endpoint *sce = (void *)kn->kn_hook;
+ int can_write = 0;
- if (sce->cur == sce->fill)
- return (0);
-
- if (sce->cur < sce->fill)
- kn->kn_data = sce->fill - sce->cur;
- else
- kn->kn_data = (sce->limit - sce->cur) +
- (sce->fill - sce->ibuf);
+ mtx_enter(&sce->ep_mtx);
+ if (sce->pending < UGEN_MAX_OPS) {
+ can_write = 1;
+ }
+ mtx_leave(&sce->ep_mtx);
- return (1);
+ return can_write;
}
-struct filterops ugenread_intr_filtops =
- { 1, NULL, filt_ugenrdetach, filt_ugenread_intr };
+struct filterops ugen_async_read_filtops =
+ { 1, NULL, filt_ugenrdetach, filt_ugen_async_read_op };
-struct filterops ugenread_isoc_filtops =
- { 1, NULL, filt_ugenrdetach, filt_ugenread_isoc };
-
-struct filterops ugen_seltrue_filtops =
- { 1, NULL, filt_ugenrdetach, filt_seltrue };
+struct filterops ugen_async_write_filtops =
+ { 1, NULL, filt_ugenrdetach, filt_ugen_async_write_op };
int
ugenkqfilter(dev_t dev, struct knote *kn)
@@ -1344,53 +1629,17 @@ ugenkqfilter(dev_t dev, struct knote *kn
if (usbd_is_dying(sc->sc_udev))
return (ENXIO);
- /* XXX always IN */
- sce = &sc->sc_endpoints[UGENENDPOINT(dev)][IN];
- if (sce == NULL)
- return (EINVAL);
+ sce = &sc->sc_endpoints[UGENENDPOINT(dev)];
switch (kn->kn_filter) {
case EVFILT_READ:
klist = &sce->rsel.si_note;
- switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
- case UE_INTERRUPT:
- kn->kn_fop = &ugenread_intr_filtops;
- break;
- case UE_ISOCHRONOUS:
- kn->kn_fop = &ugenread_isoc_filtops;
- break;
- case UE_BULK:
- /*
- * We have no easy way of determining if a read will
- * yield any data or a write will happen.
- * So, emulate "seltrue".
- */
- kn->kn_fop = &ugen_seltrue_filtops;
- break;
- default:
- return (EINVAL);
- }
+ kn->kn_fop = &ugen_async_read_filtops;
break;
case EVFILT_WRITE:
klist = &sce->rsel.si_note;
- switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
- case UE_INTERRUPT:
- case UE_ISOCHRONOUS:
- /* XXX poll doesn't support this */
- return (EINVAL);
-
- case UE_BULK:
- /*
- * We have no easy way of determining if a read will
- * yield any data or a write will happen.
- * So, emulate "seltrue".
- */
- kn->kn_fop = &ugen_seltrue_filtops;
- break;
- default:
- return (EINVAL);
- }
+ kn->kn_fop = &ugen_async_write_filtops;
break;
default:
Index: sys/dev/usb/usb.h
===================================================================
RCS file: /cvs/src/sys/dev/usb/usb.h,v
retrieving revision 1.55
diff -u -p -r1.55 usb.h
--- sys/dev/usb/usb.h 24 May 2016 05:35:01 -0000 1.55
+++ sys/dev/usb/usb.h 26 May 2016 17:16:55 -0000
@@ -745,6 +745,34 @@ struct usb_ctl_report {
u_char ucr_data[1024]; /* filled data size will vary */
};
+struct usb_async_op {
+ u_int8_t uao_ep; /* The endpoint number. MSB indicates
IN or OUT operation. */
+ caddr_t uao_buf; /* Buffer containing transfer/target. */
+ u_int32_t uao_buf_len; /* Length of the buffer in bytes */
+ u_int32_t uao_transfer_size; /* Length of the transfer, in bytes
*/
+ /* Note: uao_transfer_size can be larger than uao_buf_len, but if the
target
+ * buffer is too small to contain the entire transfer, the remaining
bytes
+ * will be discarded when the operation is completed.
+ */
+
+ int32_t uao_status; /* Status of the transfer */
+#define USB_OP_STATUS_COMPLETED 0x0
+#define USB_OP_STATUS_ERROR 0x1
+
+ u_int8_t uao_flags; /* Flags controlling this transfer */
+#define USB_OP_FLAG_ALLOW_SHORT 0x01
+
+ u_int8_t uao_type;
+#define USB_OP_CONTROL_TRANSFER 0x0
+#define USB_OP_BULK_TRANSFER 0x1
+#define USB_OP_INTERRUPT_TRANSFER 0x2
+#define USB_OP_ISOCHRONOUS_TRANSFER 0x3
+
+ void *uao_priv; /* Opaque state set by userspace
process. Used to identify the op. */
+ u_int32_t uao_timeout_ms; /* Timeout for the transfer, in
milliseconds */
+#define USB_OP_TIMEOUT_DEFAULT (-1)
+};
+
struct usb_device_stats {
u_long uds_requests[4]; /* indexed by transfer type UE_* */
};
@@ -779,5 +807,10 @@ struct usb_device_stats {
#define USB_GET_DEVICEINFO _IOR ('U', 112, struct usb_device_info)
#define USB_SET_SHORT_XFER _IOW ('U', 113, int)
#define USB_SET_TIMEOUT _IOW ('U', 114, int)
+
+/* Generic USB device asynchronous operations. */
+#define USB_ASYNC_SUBMIT _IOWR('U', 115, struct usb_async_op)
+#define USB_ASYNC_CANCEL _IOWR('U', 116, struct usb_async_op)
+#define USB_ASYNC_FINISH _IOWR('U', 117, struct usb_async_op)
#endif /* _USB_H_ */