Hi, All:
I've refactored the friends of usb_sg_request to permit it being used
by ub. The result is attached FYI. It's probably buggy. I cannot convince
myself that all this is correct.
In case anyone is curious, fields in usb_sg_request were renamed because
their functions morphed in a way which made some of uses incorrect for
each of them. The renaming ensures that programmer examines every use.
Cheers,
-- Pete
diff -urp -X dontdiff linux-2.6.12/drivers/block/ub.c
linux-2.6.12-wip/drivers/block/ub.c
--- linux-2.6.12/drivers/block/ub.c 2005-06-21 12:58:18.000000000 -0700
+++ linux-2.6.12-wip/drivers/block/ub.c 2005-07-09 21:54:37.000000000 -0700
@@ -105,7 +105,7 @@ struct bulk_cs_wrap {
*/
struct ub_dev;
-#define UB_MAX_REQ_SG 1
+#define UB_MAX_REQ_SG 4
#define UB_MAX_SECTORS 64
/*
@@ -172,7 +172,8 @@ struct ub_scsi_cmd {
*/
char *data; /* Requested buffer */
unsigned int len; /* Requested length */
- // struct scatterlist sgv[UB_MAX_REQ_SG];
+ unsigned int nsg; /* ==0 means "look at len" */
+ struct scatterlist sgv[UB_MAX_REQ_SG];
struct ub_lun *lun;
void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
@@ -314,12 +315,14 @@ struct ub_dev {
struct ub_completion work_done;
struct urb work_urb;
+ struct usb_sg_request *work_io;
struct timer_list work_timer;
int last_pipe; /* What might need clearing */
struct bulk_cb_wrap work_bcb;
struct bulk_cs_wrap work_bcs;
struct usb_ctrlrequest work_cr;
+ int sg_stat[UB_MAX_REQ_SG+1];
struct ub_scsi_trace tr;
};
@@ -453,6 +456,13 @@ static ssize_t ub_diag_show(struct devic
cnt += sprintf(page + cnt,
"qlen %d qmax %d\n",
sc->cmd_queue.qlen, sc->cmd_queue.qmax);
+ cnt += sprintf(page + cnt,
+ "sg %d %d %d %d %d\n",
+ sc->sg_stat[0],
+ sc->sg_stat[1],
+ sc->sg_stat[2],
+ sc->sg_stat[3],
+ sc->sg_stat[4]);
list_for_each (p, &sc->luns) {
lun = list_entry(p, struct ub_lun, link);
@@ -587,6 +597,8 @@ static void ub_cleanup(struct ub_dev *sc
kfree(lun);
}
+ usb_sg_free(sc->work_io);
+ // usb_free_urb(sc->work_urb);
kfree(sc);
}
@@ -734,10 +746,8 @@ static int ub_cmd_build_block(struct ub_
struct ub_scsi_cmd *cmd, struct request *rq)
{
int ub_dir;
-#if 0 /* We use rq->buffer for now */
struct scatterlist *sg;
int n_elem;
-#endif
unsigned int block, nblks;
if (rq_data_dir(rq) == WRITE)
@@ -748,42 +758,17 @@ static int ub_cmd_build_block(struct ub_
/*
* get scatterlist from block layer
*/
-#if 0 /* We use rq->buffer for now */
sg = &cmd->sgv[0];
- n_elem = blk_rq_map_sg(q, rq, sg);
+ n_elem = blk_rq_map_sg(lun->disk->queue, rq, sg);
if (n_elem <= 0) {
- ub_put_cmd(lun, cmd);
- ub_end_rq(rq, 0);
- blk_start_queue(q);
- return 0; /* request with no s/g entries? */
+ printk(KERN_INFO "%s: failed request map (%d)\n",
+ sc->name, n_elem); /* P3 */
+ return -1; /* request with no s/g entries? */
}
- if (n_elem != 1) { /* Paranoia */
+ if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */
printk(KERN_WARNING "%s: request with %d segments\n",
sc->name, n_elem);
- ub_put_cmd(lun, cmd);
- ub_end_rq(rq, 0);
- blk_start_queue(q);
- return 0;
- }
-#endif
-
- /*
- * XXX Unfortunately, this check does not work. It is quite possible
- * to get bogus non-null rq->buffer if you allow sg by mistake.
- */
- if (rq->buffer == NULL) {
- /*
- * This must not happen if we set the queue right.
- * The block level must create bounce buffers for us.
- */
- static int do_print = 1;
- if (do_print) {
- printk(KERN_WARNING "%s: unmapped block request"
- " flags 0x%lx sectors %lu\n",
- sc->name, rq->flags, rq->nr_sectors);
- do_print = 0;
- }
return -1;
}
@@ -807,7 +792,8 @@ static int ub_cmd_build_block(struct ub_
cmd->cdb_len = 10;
cmd->dir = ub_dir;
- cmd->data = rq->buffer;
+ cmd->nsg = n_elem;
+ cmd->data = NULL;
cmd->len = rq->nr_sectors * 512;
return 0;
@@ -839,6 +825,7 @@ static int ub_cmd_build_packet(struct ub
else
cmd->dir = UB_DIR_READ;
}
+ cmd->nsg = 0;
cmd->data = rq->data;
cmd->len = rq->data_len;
@@ -994,6 +981,14 @@ static void ub_urb_complete(struct urb *
tasklet_schedule(&sc->tasklet);
}
+static void ub_sg_complete(void *ctx)
+{
+ struct ub_dev *sc = ctx;
+
+ ub_complete(&sc->work_done);
+ tasklet_schedule(&sc->tasklet);
+}
+
static void ub_scsi_action(unsigned long _dev)
{
struct ub_dev *sc = (struct ub_dev *) _dev;
@@ -1036,6 +1031,9 @@ static void ub_scsi_urb_compl(struct ub_
int pipe;
int rc;
+ int status;
+ int length;
+
if (atomic_read(&sc->poison)) {
/* A little too simplistic, I feel... */
goto Bad_End;
@@ -1124,16 +1122,41 @@ static void ub_scsi_urb_compl(struct ub_
else
pipe = sc->send_bulk_pipe;
sc->last_pipe = pipe;
- usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
- cmd->data, cmd->len, ub_urb_complete, sc);
- sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;
- sc->work_urb.actual_length = 0;
- sc->work_urb.error_count = 0;
- sc->work_urb.status = 0;
- if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
+ if (cmd->nsg == 0) {
+ usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
+ cmd->data, cmd->len, ub_urb_complete, sc);
+ sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;
+ sc->work_urb.actual_length = 0;
+ sc->work_urb.error_count = 0;
+ sc->work_urb.status = 0;
+
+ rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC);
/* XXX Clear stalls */
- printk("ub: data #%d submit failed (%d)\n", cmd->tag,
rc); /* P3 */
+ if (rc != 0) {
+ printk("ub: data #%d submit failed (%d)\n",
+ cmd->tag, rc); /* P3 */
+ }
+ } else {
+ rc = usb_sg_setup(sc->work_io, pipe, cmd->sgv, cmd->nsg,
+ cmd->len, ub_sg_complete, sc);
+ if (rc != 0) {
+ /* This should not happen... but it can. */
+ printk("ub: data #%d sg setup failed (%d) "
+ "nsg %d\n",
+ cmd->tag, rc, cmd->nsg); /* P3 */
+ ub_complete(&sc->work_done);
+ ub_state_done(sc, cmd, rc);
+ return;
+ }
+ rc = usb_sg_submit(sc->work_io);
+ if (rc != 0) {
+ printk("ub: data #%d sg submit failed (%d) "
+ "nsg %d\n",
+ cmd->tag, rc, cmd->nsg); /* P3 */
+ }
+ }
+ if (rc != 0) {
ub_complete(&sc->work_done);
ub_state_done(sc, cmd, rc);
return;
@@ -1144,9 +1167,17 @@ static void ub_scsi_urb_compl(struct ub_
cmd->state = UB_CMDST_DATA;
ub_cmdtr_state(sc, cmd);
+ sc->sg_stat[cmd->nsg]++;
} else if (cmd->state == UB_CMDST_DATA) {
- if (urb->status == -EPIPE) {
+ if (cmd->nsg == 0) {
+ status = urb->status;
+ length = urb->actual_length;
+ } else {
+ status = sc->work_io->status;
+ length = sc->work_io->bytes;
+ }
+ if (status == -EPIPE) {
rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
if (rc != 0) {
printk(KERN_NOTICE "%s: "
@@ -1162,17 +1193,17 @@ static void ub_scsi_urb_compl(struct ub_
ub_cmdtr_state(sc, cmd);
return;
}
- if (urb->status == -EOVERFLOW) {
+ if (status == -EOVERFLOW) {
/*
* A babble? Failure, but we must transfer CSW now.
*/
cmd->error = -EOVERFLOW; /* A cheap trick... */
} else {
- if (urb->status != 0)
+ if (status != 0)
goto Bad_End;
}
- cmd->act_len = urb->actual_length;
+ cmd->act_len = length;
ub_cmdtr_act_len(sc, cmd);
ub_state_stat(sc, cmd);
@@ -1368,6 +1399,7 @@ static void ub_state_sense(struct ub_dev
scmd->cdb_len = 6;
scmd->dir = UB_DIR_READ;
scmd->state = UB_CMDST_INIT;
+ scmd->nsg = 0;
scmd->data = sc->top_sense;
scmd->len = UB_SENSE_SIZE;
scmd->lun = cmd->lun;
@@ -1743,6 +1775,7 @@ static int ub_sync_read_cap(struct ub_de
cmd->cdb_len = 10;
cmd->dir = UB_DIR_READ;
cmd->state = UB_CMDST_INIT;
+ cmd->nsg = 0;
cmd->data = p;
cmd->len = 8;
cmd->lun = lun;
@@ -2012,6 +2045,9 @@ static int ub_probe(struct usb_interface
int rc;
int i;
+ struct usb_sg_request *io;
+ // struct urb *urb;
+
rc = -ENOMEM;
if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
goto err_core;
@@ -2036,6 +2072,22 @@ static int ub_probe(struct usb_interface
usb_get_dev(sc->dev);
// usb_get_intf(sc->intf); /* Do we need this? */
+ io = usb_sg_alloc(sc->dev, UB_MAX_REQ_SG, GFP_KERNEL);
+ if (IS_ERR(io)) {
+ rc = PTR_ERR(io);
+ goto err_alloc_sg;
+ }
+ sc->work_io = io;
+
+#if 0 /* XXX Not doing this yet to make S/G patch smaller */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (IS_ERR(urb)) {
+ rc = PTR_ERR(urb);
+ goto err_alloc_urb;
+ }
+ sc->work_urb = urb;
+#endif
+
snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
sc->dev->bus->busnum, sc->dev->devnum);
@@ -2110,6 +2162,10 @@ static int ub_probe(struct usb_interface
/* device_remove_file(&sc->intf->dev, &dev_attr_diag); */
err_diag:
+ // usb_free_urb(sc->work_urb);
+// err_alloc_urb:
+ // usb_sg_free(sc->work_io);
+err_alloc_sg:
usb_set_intfdata(intf, NULL);
// usb_put_intf(sc->intf);
usb_put_dev(sc->dev);
diff -urp -X dontdiff linux-2.6.12/drivers/usb/core/message.c
linux-2.6.12-wip/drivers/usb/core/message.c
--- linux-2.6.12/drivers/usb/core/message.c 2005-06-21 12:58:46.000000000
-0700
+++ linux-2.6.12-wip/drivers/usb/core/message.c 2005-07-09 21:51:06.000000000
-0700
@@ -209,22 +209,29 @@ int usb_bulk_msg(struct usb_device *usb_
/*-------------------------------------------------------------------*/
+/*
+ * The sg_clean is a bracket to both sg_init and sg_alloc. This is only
+ * possible because the two are never called separately, of course.
+ * If sg_clean returned, the memory occupied by usb_sg_request can be freed.
+ */
static void sg_clean (struct usb_sg_request *io)
{
if (io->urbs) {
- while (io->entries--)
- usb_free_urb (io->urbs [io->entries]);
+ while (io->nallocs--)
+ usb_free_urb (io->urbs [io->nallocs]);
kfree (io->urbs);
io->urbs = NULL;
}
if (io->dev->dev.dma_mask != NULL)
- usb_buffer_unmap_sg (io->dev, io->pipe, io->sg, io->nents);
+ usb_buffer_unmap_sg (io->dev, io->pipe, io->sg, io->nmaps);
+ usb_put_dev(io->dev);
io->dev = NULL;
}
static void sg_complete (struct urb *urb, struct pt_regs *regs)
{
struct usb_sg_request *io = (struct usb_sg_request *) urb->context;
+ int done = 0;
spin_lock (&io->lock);
@@ -260,10 +267,10 @@ static void sg_complete (struct urb *urb
* unlink pending urbs so they won't rx/tx bad data.
* careful: unlink can sometimes be synchronous...
*/
- spin_unlock (&io->lock);
- for (i = 0, found = 0; i < io->entries; i++) {
+ for (i = 0, found = 0; i < io->nallocs; i++) {
if (!io->urbs [i] || !io->urbs [i]->dev)
continue;
+ spin_unlock (&io->lock);
if (found) {
status = usb_unlink_urb (io->urbs [i]);
if (status != -EINPROGRESS && status != -EBUSY)
@@ -272,8 +279,8 @@ static void sg_complete (struct urb *urb
__FUNCTION__, status);
} else if (urb == io->urbs [i])
found = 1;
+ spin_lock (&io->lock);
}
- spin_lock (&io->lock);
}
urb->dev = NULL;
@@ -281,11 +288,189 @@ static void sg_complete (struct urb *urb
io->bytes += urb->actual_length;
io->count--;
if (!io->count)
- complete (&io->complete);
+ done = 1;
spin_unlock (&io->lock);
+
+ if (done)
+ (*io->complete) (io->context);
+}
+
+static void sg_complete_sync (void *ctx)
+{
+ struct completion *done = ctx;
+ complete (done);
+}
+
+static void sg_init(
+ struct usb_sg_request *io,
+ struct usb_device *dev)
+{
+ spin_lock_init(&io->lock);
+ usb_get_dev(dev);
+ io->dev = dev;
+}
+
+static int sg_alloc(
+ struct usb_sg_request *io,
+ int alloc_ents,
+ int mem_flags)
+{
+ int i;
+
+ io->urbs = kmalloc(alloc_ents * sizeof(struct urb *), mem_flags);
+ if (!io->urbs)
+ goto nourbs;
+
+ for (i = 0; i < alloc_ents; i++) {
+
+ io->urbs [i] = usb_alloc_urb (0, mem_flags);
+ if (!io->urbs [i]) {
+ while (i-- != 0) {
+ usb_free_urb(io->urbs[i]);
+ }
+ goto nomem;
+ }
+ }
+
+ io->nallocs = alloc_ents;
+ return 0;
+
+nomem:
+ kfree(io->urbs);
+nourbs:
+ return -ENOMEM;
+}
+
+static int sg_setup(
+ struct usb_sg_request *io,
+ unsigned int pipe,
+ struct scatterlist *sg, int nmaps,
+ size_t length,
+ unsigned period,
+ int dma)
+{
+ int urb_flags;
+ int i;
+
+ if (nmaps > io->nallocs)
+ return -EINVAL;
+
+ io->pipe = pipe; /* This has direction encoded in it */
+ io->sg = sg;
+
+ urb_flags = URB_ASYNC_UNLINK | URB_NO_TRANSFER_DMA_MAP
+ | URB_NO_INTERRUPT;
+ if (usb_pipein (pipe))
+ urb_flags |= URB_SHORT_NOT_OK;
+
+ for (i = 0; i < nmaps; i++) {
+ unsigned len;
+ struct urb *urbp = io->urbs[i];
+
+ urbp->dev = NULL;
+ urbp->pipe = pipe;
+ urbp->interval = period;
+ urbp->transfer_flags = urb_flags;
+
+ urbp->complete = sg_complete;
+ urbp->context = io;
+ urbp->status = -EINPROGRESS;
+ urbp->actual_length = 0;
+
+ if (dma) {
+ /* hc may use _only_ transfer_dma */
+ urbp->transfer_dma = sg_dma_address (sg + i);
+ len = sg_dma_len (sg + i);
+ } else {
+ /* hc may use _only_ transfer_buffer */
+ urbp->transfer_buffer =
+ page_address (sg [i].page) + sg [i].offset;
+ len = sg [i].length;
+ }
+
+ if (length) {
+ len = min_t (unsigned, len, length);
+ length -= len;
+ }
+ urbp->transfer_buffer_length = len;
+ if (length == 0) {
+ i++;
+ break;
+ }
+ }
+ io->count = i;
+ io->urbs [--i]->transfer_flags &= ~URB_NO_INTERRUPT;
+
+ /* transaction state */
+ io->status = 0;
+ io->bytes = 0;
+
+ return 0;
+}
+
+/**
+ * usb_sg_alloc - allocate necessary memory and initialize fields for an I/O
+ * This is done once in the lifetime of an I/O request (not a storage request).
+ * This better be called from an process context. Yes, this function takes
+ * memory allocation flags, but think twice. Who needs random failures?
+ *
+ * Returns an ERR_PTR.
+ */
+struct usb_sg_request *usb_sg_alloc(
+ struct usb_device *dev,
+ int max_ents,
+ int mem_flags)
+{
+ struct usb_sg_request *io;
+ int rc;
+
+ rc = -ENOMEM;
+ if ((io = kmalloc(sizeof(struct usb_sg_request), mem_flags)) == NULL)
+ goto err_noio;
+ memset(io, 0, sizeof(struct usb_sg_request));
+ sg_init(io, dev);
+ if ((rc = sg_alloc(io, max_ents, mem_flags)) != 0)
+ goto err_noalloc;
+ return io;
+
+err_noalloc:
+ sg_clean(io);
+ kfree(io);
+err_noio:
+ return ERR_PTR(rc);
}
+/**
+ * usb_sg_setup - initialize per-transfer variables.
+ * This sets the DMA mappings, among other things.
+ * This can be done several times for an I/O request, once for every transfer.
+ * This can be called from an interrupt context.
+ */
+int usb_sg_setup(
+ struct usb_sg_request *io,
+ unsigned int pipe,
+ struct scatterlist *sg, int nents,
+ size_t length,
+ void (*compl)(void *), void *ctx)
+{
+ int dma;
+
+ dma = (io->dev->dev.dma_mask != NULL);
+ if (dma)
+ io->nmaps = usb_buffer_map_sg(io->dev, pipe, sg, nents);
+ else
+ io->nmaps = nents;
+ if (io->nmaps < 0)
+ return io->nmaps;
+ if (io->nmaps == 0)
+ return -EFAULT; /* Don't let this to happen */
+
+ io->complete = compl;
+ io->context = ctx;
+
+ return sg_setup(io, pipe, sg, io->nmaps, length, 0, dma);
+}
/**
* usb_sg_init - initializes scatterlist-based bulk/interrupt I/O request
@@ -323,8 +508,6 @@ int usb_sg_init (
int mem_flags
)
{
- int i;
- int urb_flags;
int dma;
if (!io || !dev || !sg
@@ -333,87 +516,37 @@ int usb_sg_init (
|| nents <= 0)
return -EINVAL;
- spin_lock_init (&io->lock);
- io->dev = dev;
- io->pipe = pipe;
- io->sg = sg;
- io->nents = nents;
+ sg_init(io, dev);
/* not all host controllers use DMA (like the mainstream pci ones);
* they can use PIO (sl811) or be software over another transport.
*/
dma = (dev->dev.dma_mask != NULL);
if (dma)
- io->entries = usb_buffer_map_sg (dev, pipe, sg, nents);
+ io->nmaps = usb_buffer_map_sg (dev, pipe, sg, nents);
else
- io->entries = nents;
+ io->nmaps = nents;
/* initialize all the urbs we'll use */
- if (io->entries <= 0)
- return io->entries;
+ if (io->nmaps <= 0) {
+ sg_clean (io);
+ return io->nmaps;
+ }
- io->count = io->entries;
- io->urbs = kmalloc (io->entries * sizeof *io->urbs, mem_flags);
- if (!io->urbs)
+ if (sg_alloc(io, io->nmaps, mem_flags) != 0)
goto nomem;
- urb_flags = URB_ASYNC_UNLINK | URB_NO_TRANSFER_DMA_MAP
- | URB_NO_INTERRUPT;
- if (usb_pipein (pipe))
- urb_flags |= URB_SHORT_NOT_OK;
-
- for (i = 0; i < io->entries; i++) {
- unsigned len;
-
- io->urbs [i] = usb_alloc_urb (0, mem_flags);
- if (!io->urbs [i]) {
- io->entries = i;
- goto nomem;
- }
+ if (sg_setup(io, pipe, sg, io->nmaps, length, period, dma) != 0)
+ goto nosetup;
- io->urbs [i]->dev = NULL;
- io->urbs [i]->pipe = pipe;
- io->urbs [i]->interval = period;
- io->urbs [i]->transfer_flags = urb_flags;
-
- io->urbs [i]->complete = sg_complete;
- io->urbs [i]->context = io;
- io->urbs [i]->status = -EINPROGRESS;
- io->urbs [i]->actual_length = 0;
-
- if (dma) {
- /* hc may use _only_ transfer_dma */
- io->urbs [i]->transfer_dma = sg_dma_address (sg + i);
- len = sg_dma_len (sg + i);
- } else {
- /* hc may use _only_ transfer_buffer */
- io->urbs [i]->transfer_buffer =
- page_address (sg [i].page) + sg [i].offset;
- len = sg [i].length;
- }
-
- if (length) {
- len = min_t (unsigned, len, length);
- length -= len;
- if (length == 0)
- io->entries = i + 1;
- }
- io->urbs [i]->transfer_buffer_length = len;
- }
- io->urbs [--i]->transfer_flags &= ~URB_NO_INTERRUPT;
-
- /* transaction state */
- io->status = 0;
- io->bytes = 0;
- init_completion (&io->complete);
return 0;
+nosetup:
nomem:
sg_clean (io);
return -ENOMEM;
}
-
/**
* usb_sg_wait - synchronously execute scatter/gather request
* @io: request block handle, as initialized with usb_sg_init().
@@ -455,7 +588,12 @@ nomem:
*/
void usb_sg_wait (struct usb_sg_request *io)
{
- int i, entries = io->entries;
+ int i, entries = io->count;
+ struct completion done;
+
+ init_completion (&done);
+ io->complete = sg_complete_sync;
+ io->context = &done;
/* queue the urbs. */
spin_lock_irq (&io->lock);
@@ -504,18 +642,69 @@ void usb_sg_wait (struct usb_sg_request
}
io->count -= entries - i;
if (io->count == 0)
- complete (&io->complete);
+ complete (&done);
spin_unlock_irq (&io->lock);
- /* OK, yes, this could be packaged as non-blocking.
- * So could the submit loop above ... but it's easier to
- * solve neither problem than to solve both!
- */
- wait_for_completion (&io->complete);
+ wait_for_completion (&done);
sg_clean (io);
}
+/*
+ * The usb_sg_submit is largely the same as the head part of usb_sg_wait.
+ * This can be called from an interrupt context.
+ * The cosmic law of submits is obeyed (return code == 0: callback will happen,
+ * return code != 0: no callback; no exceptions).
+ */
+int usb_sg_submit(struct usb_sg_request *io)
+{
+ int start_count = io->count;
+ int i;
+ unsigned long flags;
+ struct urb *urbp;
+ int retval;
+
+ /* queue the urbs. */
+ spin_lock_irqsave(&io->lock, flags);
+ for (i = 0; i < start_count; i++) {
+ if (io->status != 0)
+ break;
+ urbp = io->urbs[i];
+
+ urbp->dev = io->dev;
+ retval = usb_submit_urb(urbp, SLAB_ATOMIC);
+ spin_unlock_irqrestore(&io->lock, flags);
+ if (retval) {
+ urbp->dev = NULL;
+ urbp->status = retval;
+ /*
+ * Now, things are getting interesting. Why do we not
+ * call usb_sg_cancel here? The current usb_sg_cancel
+ * does not wait, so we could. But it accomplishes that
+ * by using URB_ASYNC_UNLINK. This means that callbacks
+ * may arrive some time in the future, after we return.
+ * This means that we have to return zero.
+ * But if so, we might as well just exit.
+ */
+ // usb_sg_cancel (io);
+ } else {
+ cpu_relax ();
+ }
+
+ spin_lock_irqsave(&io->lock, flags);
+ if (retval && (io->status == 0 || io->status == -ECONNRESET))
+ io->status = retval;
+ }
+ io->count -= start_count - i;
+ retval = 0;
+ if (io->count == 0) { /* No callbacks outstanding */
+ retval = io->status;
+ }
+ spin_unlock_irqrestore(&io->lock, flags);
+
+ return retval;
+}
+
/**
* usb_sg_cancel - stop scatter/gather i/o issued by usb_sg_wait()
* @io: request block, initialized with usb_sg_init()
@@ -536,7 +725,7 @@ void usb_sg_cancel (struct usb_sg_reques
io->status = -ECONNRESET;
spin_unlock (&io->lock);
- for (i = 0; i < io->entries; i++) {
+ for (i = 0; i < io->nmaps; i++) {
int retval;
if (!io->urbs [i]->dev)
@@ -551,6 +740,14 @@ void usb_sg_cancel (struct usb_sg_reques
spin_unlock_irqrestore (&io->lock, flags);
}
+/*
+ */
+void usb_sg_free(struct usb_sg_request *io)
+{
+ sg_clean(io);
+ kfree(io);
+}
+
/*-------------------------------------------------------------------*/
/**
@@ -658,8 +855,12 @@ static int usb_string_sub(struct usb_dev
{
int rc;
- /* Try to read the string descriptor by asking for the maximum
- * possible number of bytes */
+ /*
+ * Try to read the string descriptor by asking for the maximum
+ * possible number of bytes.
+ * A request for an odd number of bytes of a UTF-16 string
+ * may seem odd, but this is what Windows does, so let's be safe.
+ */
rc = usb_get_string(dev, langid, index, buf, 255);
/* If that failed try to read the descriptor length, then
@@ -1449,6 +1650,10 @@ EXPORT_SYMBOL(usb_bulk_msg);
EXPORT_SYMBOL(usb_sg_init);
EXPORT_SYMBOL(usb_sg_cancel);
EXPORT_SYMBOL(usb_sg_wait);
+EXPORT_SYMBOL(usb_sg_alloc);
+EXPORT_SYMBOL(usb_sg_setup);
+EXPORT_SYMBOL(usb_sg_submit);
+EXPORT_SYMBOL(usb_sg_free);
// synchronous control message convenience routines
EXPORT_SYMBOL(usb_get_descriptor);
diff -urp -X dontdiff linux-2.6.12/include/linux/usb.h
linux-2.6.12-wip/include/linux/usb.h
--- linux-2.6.12/include/linux/usb.h 2005-06-21 12:59:17.000000000 -0700
+++ linux-2.6.12-wip/include/linux/usb.h 2005-07-09 20:29:30.000000000
-0700
@@ -1039,15 +1039,20 @@ struct usb_sg_request {
struct usb_device *dev;
int pipe;
struct scatterlist *sg;
- int nents;
+ /* int nents; */ /* Original size before map */
+ int nmaps; /* nmaps >= count */
- int entries;
- struct urb **urbs;
+ int nallocs; /* nallocs >= nmaps */
+ struct urb **urbs; /* urbs[nallocs] */
int count;
- struct completion complete;
+ void (*complete)(void *context);
+ void *context;
};
+/*
+ * This is "old" or "synchronous" interface to drive usb_sg_request
+ */
int usb_sg_init (
struct usb_sg_request *io,
struct usb_device *dev,
@@ -1061,6 +1066,23 @@ int usb_sg_init (
void usb_sg_cancel (struct usb_sg_request *io);
void usb_sg_wait (struct usb_sg_request *io);
+/*
+ * This is "new" or "asynchronous" interface to usb_sg_request.
+ * No different "cancel" routine in the "new" set, so use usb_sg_cancel().
+ * The I/O is unsetup upon callback.
+ */
+struct usb_sg_request *usb_sg_alloc(
+ struct usb_device *dev,
+ int max_ents,
+ int mem_flags);
+int usb_sg_setup(
+ struct usb_sg_request *io,
+ unsigned int pipe,
+ struct scatterlist *sg, int nents,
+ size_t length,
+ void (*compl)(void *), void *ctx);
+int usb_sg_submit(struct usb_sg_request *io);
+void usb_sg_free(struct usb_sg_request *io);
/* --------------------------------------------------------------------------
*/
-------------------------------------------------------
This SF.Net email is sponsored by the 'Do More With Dual!' webinar happening
July 14 at 8am PDT/11am EDT. We invite you to explore the latest in dual
core and dual graphics technology at this free one hour event hosted by HP,
AMD, and NVIDIA. To register visit http://www.hp.com/go/dualwebinar
_______________________________________________
[email protected]
To unsubscribe, use the last form field at:
https://lists.sourceforge.net/lists/listinfo/linux-usb-devel