Main implementation:

include/linux/blkdev.h:

struct request_queue
{
        /*
         * Together with queue_head for cacheline sharing
         */
        struct list_head        queue_head;
        struct request          *last_merge;
        struct elevator_queue   *elevator;

        /*
         * the queue request freelist, one for reads and one for writes
         */
        struct request_list     rq;

        request_fn_proc         *request_fn;
        make_request_fn         *make_request_fn;
        prep_rq_fn              *prep_rq_fn;
        unplug_fn               *unplug_fn;
        prepare_discard_fn      *prepare_discard_fn;
        merge_bvec_fn           *merge_bvec_fn;
        prepare_flush_fn        *prepare_flush_fn;
        softirq_done_fn         *softirq_done_fn;
        rq_timed_out_fn         *rq_timed_out_fn;
        dma_drain_needed_fn     *dma_drain_needed;
        lld_busy_fn             *lld_busy_fn;

        /*
         * Dispatch queue sorting
         */
        sector_t                end_sector;
        struct request          *boundary_rq;

        /*
         * Auto-unplugging state
         */
        struct timer_list       unplug_timer;
        int                     unplug_thresh;  /* After this many requests */
        unsigned long           unplug_delay;   /* After this many jiffies */
        struct work_struct      unplug_work;

        struct backing_dev_info backing_dev_info;

        /*
         * The queue owner gets to use this for whatever they like.
         * ll_rw_blk doesn't touch it.
         */
            void                    *queuedata;

        /*
         * queue needs bounce pages for pages above this limit
         */
        unsigned long           bounce_pfn;
        gfp_t                   bounce_gfp;

        /*
         * various queue flags, see QUEUE_* below
         */
        unsigned long           queue_flags;

        /*
         * protects queue structures from reentrancy. ->__queue_lock should
         * _never_ be used directly, it is queue private. always use
         * ->queue_lock.
         */
        spinlock_t              __queue_lock;
        spinlock_t              *queue_lock;

        /*
         * queue kobject
         */
        struct kobject kobj;

        /*
         * queue settings
         */
        unsigned long           nr_requests;    /* Max # of requests */
        unsigned int            nr_congestion_on;
        unsigned int            nr_congestion_off;
        unsigned int            nr_batching;

        unsigned int            max_sectors;
        unsigned int            max_hw_sectors;
        unsigned short          max_phys_segments;
        unsigned short          max_hw_segments;
        unsigned short          hardsect_size;
        unsigned int            max_segment_size;
        unsigned long           seg_boundary_mask;
        void                    *dma_drain_buffer;
        unsigned int            dma_drain_size;
        unsigned int            dma_pad_mask;
        unsigned int            dma_alignment;

        struct blk_queue_tag    *queue_tags;
        struct list_head        tag_busy_list;

        unsigned int            nr_sorted;
        unsigned int            in_flight;

        unsigned int            rq_timeout;
        struct timer_list       timeout;
        struct list_head        timeout_list;

        /*
         * sg stuff
         */
        unsigned int            sg_timeout;
        unsigned int            sg_reserved_size;
        int                     node;
#ifdef CONFIG_BLK_DEV_IO_TRACE
        struct blk_trace        *blk_trace;
#endif
        /*
         * reserved for flush operations
         */
        unsigned int            ordered, next_ordered, ordseq;
        int                     orderr, ordcolor;
        struct request          pre_flush_rq, bar_rq, post_flush_rq;
        struct request          *orig_bar_rq;

        struct mutex            sysfs_lock;

#if defined(CONFIG_BLK_DEV_BSG)
        struct bsg_class_device bsg_dev;
#endif
        struct blk_cmd_filter cmd_filter;
};

struct elv_fs_entry {
        struct attribute attr;         
        ssize_t (*show)(struct elevator_queue *, char *);
        ssize_t (*store)(struct elevator_queue *, const char *, size_t);
};

/*
 * identifies an elevator type, such as AS or deadline
 */
struct elevator_type
{      
        struct list_head list;
        struct elevator_ops ops;
        struct elv_fs_entry *elevator_attrs;
        char elevator_name[ELV_NAME_MAX];
        struct module *elevator_owner;
};     
        
/*      
 * each queue has an elevator_queue associated with it
 */    
struct elevator_queue
{
        struct elevator_ops *ops;
        void *elevator_data;
        struct kobject kobj;
        struct elevator_type *elevator_type;
        struct mutex sysfs_lock;
        struct hlist_head *hash;
};     

/*     
 * block elevator interface    
 */    
extern void elv_dispatch_sort(struct request_queue *, struct request *);
extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
extern void elv_add_request(struct request_queue *, struct request *, int, int);
extern void __elv_add_request(struct request_queue *, struct request *, int, int);
extern void elv_insert(struct request_queue *, struct request *, int);
extern int elv_merge(struct request_queue *, struct request **, struct bio *);
extern void elv_merge_requests(struct request_queue *, struct request *,
                               struct request *);
extern void elv_merged_request(struct request_queue *, struct request *, int);
extern void elv_dequeue_request(struct request_queue *, struct request *);
extern void elv_requeue_request(struct request_queue *, struct request *);
extern int elv_queue_empty(struct request_queue *);
extern struct request *elv_next_request(struct request_queue *q);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
extern int elv_register_queue(struct request_queue *q);
extern void elv_unregister_queue(struct request_queue *q);
extern int elv_may_queue(struct request_queue *, int);
extern void elv_abort_queue(struct request_queue *);
extern void elv_completed_request(struct request_queue *, struct request *);
extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
extern void elv_put_request(struct request_queue *, struct request *);

/*
 * io scheduler registration
 */
extern void elv_register(struct elevator_type *);
extern void elv_unregister(struct elevator_type *);

/*
 * io scheduler sysfs switching
 */
extern ssize_t elv_iosched_show(struct request_queue *, char *);
extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);

extern int elevator_init(struct request_queue *, char *);
extern void elevator_exit(struct elevator_queue *);
extern int elv_rq_merge_ok(struct request *, struct bio *);

/*
 * Helper functions.
 */
extern struct request *elv_rb_former_request(struct request_queue *, struct request *);
extern struct request *elv_rb_latter_request(struct request_queue *, struct request *);

/*
 * rb support functions.
 */
extern struct request *elv_rb_add(struct rb_root *, struct request *);
extern void elv_rb_del(struct rb_root *, struct request *);
extern struct request *elv_rb_find(struct rb_root *, sector_t);

/*
 * Return values from elevator merger
 */
#define ELEVATOR_NO_MERGE       0
#define ELEVATOR_FRONT_MERGE    1
#define ELEVATOR_BACK_MERGE     2

/*
 * Insertion selection
 */
#define ELEVATOR_INSERT_FRONT   1
#define ELEVATOR_INSERT_BACK    2
#define ELEVATOR_INSERT_SORT    3
#define ELEVATOR_INSERT_REQUEUE 4

/*
 * return values from elevator_may_queue_fn
 */


struct elevator_ops
{
        elevator_merge_fn *elevator_merge_fn;
        elevator_merged_fn *elevator_merged_fn;
        elevator_merge_req_fn *elevator_merge_req_fn;
        elevator_allow_merge_fn *elevator_allow_merge_fn;

        elevator_dispatch_fn *elevator_dispatch_fn;
        elevator_add_req_fn *elevator_add_req_fn;
        elevator_activate_req_fn *elevator_activate_req_fn;
        elevator_deactivate_req_fn *elevator_deactivate_req_fn;

        elevator_queue_empty_fn *elevator_queue_empty_fn;
        elevator_completed_req_fn *elevator_completed_req_fn;

        elevator_request_list_fn *elevator_former_req_fn;
        elevator_request_list_fn *elevator_latter_req_fn;

        elevator_set_req_fn *elevator_set_req_fn;
        elevator_put_req_fn *elevator_put_req_fn;

        elevator_may_queue_fn *elevator_may_queue_fn;

        elevator_init_fn *elevator_init_fn;
        elevator_exit_fn *elevator_exit_fn;
        void (*trim)(struct io_context *);
};

/*
 * bio bi_rw flags
 *
 * bit 0 -- data direction
 *      If not set, bio is a read from device. If set, it's a write to device.
 * bit 1 -- rw-ahead when set
 * bit 2 -- barrier
 *      Insert a serialization point in the IO queue, forcing previously
 *      submitted IO to be completed before this one is issued.
 * bit 3 -- synchronous I/O hint: the block layer will unplug immediately
 *      Note that this does NOT indicate that the IO itself is sync, just
 *      that the block layer will not postpone issue of this IO by plugging.
 * bit 4 -- metadata request
 *      Used for tracing to differentiate metadata and data IO. May also
 *      get some preferential treatment in the IO scheduler
 * bit 5 -- discard sectors
 *      Informs the lower level device that this range of sectors is no longer
 *      used by the file system and may thus be freed by the device. Used
 *      for flash based storage.
 * bit 6 -- fail fast device errors
 * bit 7 -- fail fast transport errors
 * bit 8 -- fail fast driver errors
 *      Don't want driver retries for any fast fail whatever the reason.
 */
#define BIO_RW          0       /* Must match RW in req flags (blkdev.h) */
#define BIO_RW_AHEAD    1       /* Must match FAILFAST in req flags */
#define BIO_RW_BARRIER  2
#define BIO_RW_SYNCIO   3
#define BIO_RW_UNPLUG   4
#define BIO_RW_META     5
#define BIO_RW_DISCARD  6
#define BIO_RW_FAILFAST_DEV             7
#define BIO_RW_FAILFAST_TRANSPORT       8
#define BIO_RW_FAILFAST_DRIVER          9

#define BIO_RW_SYNC     (BIO_RW_SYNCIO | BIO_RW_UNPLUG)

#define bio_rw_flagged(bio, flag)       ((bio)->bi_rw & (1 << (flag)))

Enumeration logics:

/*
 * drivers should not use the __ version unless they _really_ want to
 * run through the entire bio and not just pending pieces
 */
#define __bio_for_each_segment(bvl, bio, i, start_idx)                  \
        for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx);  \
             i < (bio)->bi_vcnt;                                        \
             bvl++, i++)

#define bio_for_each_segment(bvl, bio, i)                               \
        __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)

/*
 * get a reference to a bio, so it won't disappear. the intended use is
 * something like:
 *
 * bio_get(bio);
 * submit_bio(rw, bio);
 * if (bio->bi_flags ...)
 *      do_something
 * bio_put(bio);
 *
 * without the bio_get(), it could potentially complete I/O before submit_bio
 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
 * runs
 */
#define bio_get(bio)    atomic_inc(&(bio)->bi_cnt)


./drivers/s390/block/xpram.c:
    bio_for_each_segment(bvec, bio, i) {

./drivers/s390/block/dcssblk.c:
    bio_for_each_segment(bvec, bio, i) {

./drivers/md/raid1.c:
    bio_for_each_segment(bvec, bio, i) {
            __bio_for_each_segment(bvec, mbio, j, 0)
                    __bio_for_each_segment(bvec, bio, j, 0)

./drivers/md/raid5.c:
    bio_for_each_segment(bvl, bio, i) {
    bio_for_each_segment(bvl, bio, i) {

./drivers/block/brd.c:
    bio_for_each_segment(bvec, bio, i) {

./drivers/block/loop.c:
    bio_for_each_segment(bvec, bio, i) {
    bio_for_each_segment(bvec, bio, i) {

./arch/powerpc/sysdev/axonram.c:
    bio_for_each_segment(vec, bio, idx) {

./mm/bounce.c:
    __bio_for_each_segment(tovec, to, i, 0) {
    __bio_for_each_segment(bvec, bio, i, 0) {
    bio_for_each_segment(from, *bio_orig, i) {
    __bio_for_each_segment(from, *bio_orig, i, 0) {

./fs/btrfs/disk-io.c:
    bio_for_each_segment(bvec, bio, i) {

./fs/bio-integrity.c:
    bio_for_each_segment(bv, bio, i) {
    bio_for_each_segment(bv, bio, i) {

./fs/bio.c:
    bio_for_each_segment(bv, bio, i) {
    __bio_for_each_segment(bvec, bio, i, 0) {
        bio_for_each_segment(bvec, bio, i)
    __bio_for_each_segment(bvec, bio, i, 0) {
    __bio_for_each_segment(bvec, bio, i, 0) {
        bio_for_each_segment(bvec, bio, i) {
    __bio_for_each_segment(bv, bio, i, 0) {

queue processing logic:

./ide/ide-io.c:
        rq = elv_next_request(drive->queue);
         * blk_stop_queue() doesn't prevent the elv_next_request()

./s390/char/tape_block.c:
        elv_next_request(device->blk_data.request_queue))
        elv_next_request(queue)   &&
        req = elv_next_request(queue);

./s390/block/dasd.c:
           elv_next_request(queue)) {
        req = elv_next_request(queue);
    while ((req = elv_next_request(block->request_queue))) {

./sbus/char/jsflash.c:
    while ((req = elv_next_request(q)) != NULL) {
^C
/root/linux-2.6/drivers>sss elv_next_request
+ mysearch.pl elv_next_request cpp.dat

./ide/ide-io.c:
        rq = elv_next_request(drive->queue);
         * blk_stop_queue() doesn't prevent the elv_next_request()

./s390/char/tape_block.c:
        elv_next_request(device->blk_data.request_queue))
        elv_next_request(queue)   &&
        req = elv_next_request(queue);

./s390/block/dasd.c:
           elv_next_request(queue)) {
        req = elv_next_request(queue);
    while ((req = elv_next_request(block->request_queue))) {

./sbus/char/jsflash.c:
    while ((req = elv_next_request(q)) != NULL) {

./cdrom/gdrom.c:
    while ((req = elv_next_request(rq)) != NULL) {

./cdrom/viocd.c:
    while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {

./memstick/core/mspro_block.c:
    msb->block_req = elv_next_request(msb->queue);
        while ((req = elv_next_request(q)) != NULL)

./message/i2o/i2o_block.c:
        req = elv_next_request(q);


./block/cpqarray.c:
    creq = elv_next_request(q);

./block/ps3disk.c:
    while ((req = elv_next_request(q))) {

./block/floppy.c:
    if (elv_next_request(floppy_queue))
            req = elv_next_request(floppy_queue);

./block/sx8.c:
        rq = elv_next_request(q);
    rq = elv_next_request(q);

./block/ub.c:
    while ((rq = elv_next_request(q)) != NULL) {

./block/xsysace.c:
    while ((req = elv_next_request(q)) != NULL) {

./block/ataflop.c:
#define CURRENT elv_next_request(floppy_queue)

./block/z2ram.c:
    while ((req = elv_next_request(q)) != NULL) {

./block/viodasd.c:
        req = elv_next_request(q);

./block/amiflop.c:
#define CURRENT elv_next_request(floppy_queue)

./block/cciss.c:
    creq = elv_next_request(q);

./block/virtio_blk.c:
    while ((req = elv_next_request(q)) != NULL) {

./block/swim3.c:
    while (fs->state == idle && (req = elv_next_request(swim3_queue))) {

./block/DAC960.c:
    Request = elv_next_request(req_q);

./block/xen-blkfront.c:
    while ((req = elv_next_request(rq)) != NULL) {

./block/sunvdc.c:
        struct request *req = elv_next_request(q);

./block/xd.c:
    while ((req = elv_next_request(q)) != NULL) {

./block/hd.c:
#define CURRENT elv_next_request(hd_queue)
    if (elv_next_request(QUEUE))

./block/paride/pcd.c:
        pcd_req = elv_next_request(q);

./block/paride/pd.c:
                pd_req = elv_next_request(pd_queue);
    pd_req = elv_next_request(q);

./block/paride/pf.c:
    pf_req = elv_next_request(q);
        pf_req = elv_next_request(pf_queue);

./block/nbd.c:
    while ((req = elv_next_request(q)) != NULL) {

./mmc/card/queue.c:
            req = elv_next_request(q);
        while ((req = elv_next_request(q)) != NULL) {

./mtd/mtd_blkdevs.c:
        req = elv_next_request(rq);

./scsi/scsi_lib.c:
         * If we defer, the elv_next_request() returns NULL, but the
        while ((req = elv_next_request(q)) != NULL)
        req = elv_next_request(q);

./scsi/scsi_transport_sas.c:
        req = elv_next_request(q);

Reply via email to