Re: bufq massage
Date: Sun, 29 Aug 2010 22:26:14 +1000 From: David Gwynne l...@animata.net this diff is largely a mechanical change. firstly, it makes struct bufq a member of the softc for devices that use it, rather than it being a pointer to something that needs to be allocated at attach. since all these devices need a bufq to operate, it makes sense to have it allocated as part of the softc and get bufq_init to just initialise all its fields. it also gets rid of the possibility that you wont be able to allocate teh bufq struct during attach, which is something you dont want to happen. secondly, it consistently implements a split between wrapper functions and the per discipline implementation of the bufq handlers. it consistently does the locking in the wrappers rather than doing half in the wrappers and the other half in the implementations. it also consistently handles the outstanding bufq bq pointer in the wrappers. this hides most of the implementation inside kern_bufq.c. the only stuff left in buf.h is for the bits each implementation needs to put bufs on their queues. ive tested this extensively on sd(4) and thib has tested this on wd(4). we'd like some wider exposure, especially over suspends and resumes on a variety of machines. i have tried to preserve the locking semantics, but testing would be lovely. ok? I realise you committed this already, but can you elaborate on those XXX's in there?
bufq massage
this diff is largely a mechanical change. firstly, it makes struct bufq a member of the softc for devices that use it, rather than it being a pointer to something that needs to be allocated at attach. since all these devices need a bufq to operate, it makes sense to have it allocated as part of the softc and get bufq_init to just initialise all its fields. it also gets rid of the possibility that you wont be able to allocate teh bufq struct during attach, which is something you dont want to happen. secondly, it consistently implements a split between wrapper functions and the per discipline implementation of the bufq handlers. it consistently does the locking in the wrappers rather than doing half in the wrappers and the other half in the implementations. it also consistently handles the outstanding bufq bq pointer in the wrappers. this hides most of the implementation inside kern_bufq.c. the only stuff left in buf.h is for the bits each implementation needs to put bufs on their queues. ive tested this extensively on sd(4) and thib has tested this on wd(4). we'd like some wider exposure, especially over suspends and resumes on a variety of machines. i have tried to preserve the locking semantics, but testing would be lovely. ok? dlg Index: dev/ata/wd.c === RCS file: /cvs/src/sys/dev/ata/wd.c,v retrieving revision 1.85 diff -u -p -r1.85 wd.c --- dev/ata/wd.c28 Jun 2010 08:35:46 - 1.85 +++ dev/ata/wd.c25 Aug 2010 12:05:33 - @@ -121,7 +121,7 @@ struct wd_softc { /* General disk infos */ struct device sc_dev; struct disk sc_dk; - struct bufq *sc_bufq; + struct bufq sc_bufq; /* IDE disk soft states */ struct ata_bio sc_wdc_bio; /* current transfer */ @@ -369,7 +369,7 @@ wdattach(struct device *parent, struct d */ wd-sc_dk.dk_driver = wddkdriver; wd-sc_dk.dk_name = wd-sc_dev.dv_xname; - wd-sc_bufq = bufq_init(BUFQ_DEFAULT); + bufq_init(wd-sc_bufq, BUFQ_DEFAULT); wd-sc_sdhook = shutdownhook_establish(wd_shutdown, wd); if (wd-sc_sdhook == NULL) printf(%s: WARNING: unable to establish shutdown hook\n, @@ -413,7 +413,7 @@ wddetach(struct device *self, int flags) /* Remove unprocessed buffers from queue */ s = splbio(); - while ((bp = BUFQ_DEQUEUE(sc-sc_bufq)) != NULL) { + while ((bp = bufq_dequeue(sc-sc_bufq)) != NULL) { bp-b_error = ENXIO; bp-b_flags |= B_ERROR; biodone(bp); @@ -435,7 +435,7 @@ wddetach(struct device *self, int flags) shutdownhook_disestablish(sc-sc_sdhook); /* Detach disk. */ - bufq_destroy(sc-sc_bufq); + bufq_destroy(sc-sc_bufq); disk_detach(sc-sc_dk); return (0); @@ -486,7 +486,7 @@ wdstrategy(struct buf *bp) (wd-sc_flags (WDF_WLABEL|WDF_LABELLING)) != 0) = 0) goto done; /* Queue transfer on drive, activate drive and controller if idle. */ - BUFQ_QUEUE(wd-sc_bufq, bp); + bufq_queue(wd-sc_bufq, bp); s = splbio(); wdstart(wd); splx(s); @@ -518,7 +518,7 @@ wdstart(void *arg) while (wd-openings 0) { /* Is there a buf for us ? */ - if ((bp = BUFQ_DEQUEUE(wd-sc_bufq)) == NULL) + if ((bp = bufq_dequeue(wd-sc_bufq)) == NULL) return; /* * Make the command. First lock the device Index: kern/kern_bufq.c === RCS file: /cvs/src/sys/kern/kern_bufq.c,v retrieving revision 1.14 diff -u -p -r1.14 kern_bufq.c --- kern/kern_bufq.c19 Jul 2010 21:39:15 - 1.14 +++ kern/kern_bufq.c25 Aug 2010 12:05:33 - @@ -30,45 +30,70 @@ SLIST_HEAD(, bufq) bufqs = SLIST_HEAD_IN struct mutex bufqs_mtx = MUTEX_INITIALIZER(IPL_NONE); intbufqs_stop; -struct buf *(*bufq_dequeuev[BUFQ_HOWMANY])(struct bufq *, int) = { - bufq_disksort_dequeue, - bufq_fifo_dequeue +struct bufq_impl { + void*(*impl_create)(void); + void (*impl_destroy)(void *); + + void (*impl_queue)(void *, struct buf *); + struct buf *(*impl_dequeue)(void *); + void (*impl_requeue)(void *, struct buf *); + int (*impl_peek)(void *); }; -void (*bufq_queuev[BUFQ_HOWMANY])(struct bufq *, struct buf *) = { + +void *bufq_disksort_create(void); +voidbufq_disksort_destroy(void *); +voidbufq_disksort_queue(void *, struct buf *); +struct buf *bufq_disksort_dequeue(void *); +voidbufq_disksort_requeue(void *, struct buf *); +int bufq_disksort_peek(void *); + +struct bufq_impl bufq_impl_disksort = { + bufq_disksort_create, +
Re: bufq massage
On Sun, Aug 29, 2010 at 10:26:14PM +1000, David Gwynne wrote: this diff is largely a mechanical change. firstly, it makes struct bufq a member of the softc for devices that use it, rather than it being a pointer to something that needs to be allocated at attach. since all these devices need a bufq to operate, it makes sense to have it allocated as part of the softc and get bufq_init to just initialise all its fields. it also gets rid of the possibility that you wont be able to allocate teh bufq struct during attach, which is something you dont want to happen. secondly, it consistently implements a split between wrapper functions and the per discipline implementation of the bufq handlers. it consistently does the locking in the wrappers rather than doing half in the wrappers and the other half in the implementations. it also consistently handles the outstanding bufq bq pointer in the wrappers. this hides most of the implementation inside kern_bufq.c. the only stuff left in buf.h is for the bits each implementation needs to put bufs on their queues. ive tested this extensively on sd(4) and thib has tested this on wd(4). we'd like some wider exposure, especially over suspends and resumes on a variety of machines. i have tried to preserve the locking semantics, but testing would be lovely. ok? I like this a lot, and I will do some testing on st and cd at least as soon as I can rebuild the appropriate box. Ken dlg Index: dev/ata/wd.c === RCS file: /cvs/src/sys/dev/ata/wd.c,v retrieving revision 1.85 diff -u -p -r1.85 wd.c --- dev/ata/wd.c 28 Jun 2010 08:35:46 - 1.85 +++ dev/ata/wd.c 25 Aug 2010 12:05:33 - @@ -121,7 +121,7 @@ struct wd_softc { /* General disk infos */ struct device sc_dev; struct disk sc_dk; - struct bufq *sc_bufq; + struct bufq sc_bufq; /* IDE disk soft states */ struct ata_bio sc_wdc_bio; /* current transfer */ @@ -369,7 +369,7 @@ wdattach(struct device *parent, struct d */ wd-sc_dk.dk_driver = wddkdriver; wd-sc_dk.dk_name = wd-sc_dev.dv_xname; - wd-sc_bufq = bufq_init(BUFQ_DEFAULT); + bufq_init(wd-sc_bufq, BUFQ_DEFAULT); wd-sc_sdhook = shutdownhook_establish(wd_shutdown, wd); if (wd-sc_sdhook == NULL) printf(%s: WARNING: unable to establish shutdown hook\n, @@ -413,7 +413,7 @@ wddetach(struct device *self, int flags) /* Remove unprocessed buffers from queue */ s = splbio(); - while ((bp = BUFQ_DEQUEUE(sc-sc_bufq)) != NULL) { + while ((bp = bufq_dequeue(sc-sc_bufq)) != NULL) { bp-b_error = ENXIO; bp-b_flags |= B_ERROR; biodone(bp); @@ -435,7 +435,7 @@ wddetach(struct device *self, int flags) shutdownhook_disestablish(sc-sc_sdhook); /* Detach disk. */ - bufq_destroy(sc-sc_bufq); + bufq_destroy(sc-sc_bufq); disk_detach(sc-sc_dk); return (0); @@ -486,7 +486,7 @@ wdstrategy(struct buf *bp) (wd-sc_flags (WDF_WLABEL|WDF_LABELLING)) != 0) = 0) goto done; /* Queue transfer on drive, activate drive and controller if idle. */ - BUFQ_QUEUE(wd-sc_bufq, bp); + bufq_queue(wd-sc_bufq, bp); s = splbio(); wdstart(wd); splx(s); @@ -518,7 +518,7 @@ wdstart(void *arg) while (wd-openings 0) { /* Is there a buf for us ? */ - if ((bp = BUFQ_DEQUEUE(wd-sc_bufq)) == NULL) + if ((bp = bufq_dequeue(wd-sc_bufq)) == NULL) return; /* * Make the command. First lock the device Index: kern/kern_bufq.c === RCS file: /cvs/src/sys/kern/kern_bufq.c,v retrieving revision 1.14 diff -u -p -r1.14 kern_bufq.c --- kern/kern_bufq.c 19 Jul 2010 21:39:15 - 1.14 +++ kern/kern_bufq.c 25 Aug 2010 12:05:33 - @@ -30,45 +30,70 @@ SLIST_HEAD(, bufq)bufqs = SLIST_HEAD_IN struct mutex bufqs_mtx = MUTEX_INITIALIZER(IPL_NONE); int bufqs_stop; -struct buf *(*bufq_dequeuev[BUFQ_HOWMANY])(struct bufq *, int) = { - bufq_disksort_dequeue, - bufq_fifo_dequeue +struct bufq_impl { + void*(*impl_create)(void); + void (*impl_destroy)(void *); + + void (*impl_queue)(void *, struct buf *); + struct buf *(*impl_dequeue)(void *); + void (*impl_requeue)(void *, struct buf *); + int (*impl_peek)(void *); }; -void (*bufq_queuev[BUFQ_HOWMANY])(struct bufq *, struct buf *) = { + +void *bufq_disksort_create(void); +void bufq_disksort_destroy(void *); +void bufq_disksort_queue(void *, struct buf *); +struct buf *bufq_disksort_dequeue(void *); +void