Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=336cdb4003200a90f4fc52a4e9ccc2baa570fffb
Commit:     336cdb4003200a90f4fc52a4e9ccc2baa570fffb
Parent:     91525300baf162e83e923b09ca286f9205e21522
Author:     Kiyoshi Ueda <[EMAIL PROTECTED]>
AuthorDate: Tue Dec 11 17:40:30 2007 -0500
Committer:  Jens Axboe <[EMAIL PROTECTED]>
CommitDate: Mon Jan 28 10:35:53 2008 +0100

    blk_end_request: add new request completion interface (take 4)
    
    This patch adds 2 new interfaces for request completion:
      o blk_end_request()   : called without queue lock
      o __blk_end_request() : called with queue lock held
    
    blk_end_request takes 'error' as an argument instead of 'uptodate',
    which current end_that_request_* take.
    The meanings of values are below and the value is used when bio is
    completed.
        0 : success
      < 0 : error
    
    Some device drivers call some generic functions below between
    end_that_request_{first/chunk} and end_that_request_last().
      o add_disk_randomness()
      o blk_queue_end_tag()
      o blkdev_dequeue_request()
    These are called in the blk_end_request interfaces as a part of
    generic request completion.
    So all device drivers become to call above functions.
    To decide whether to call blkdev_dequeue_request(), blk_end_request
    uses list_empty(&rq->queuelist) (blk_queued_rq() macro is added for it).
    So drivers must re-initialize it using list_init() or so before calling
    blk_end_request if drivers use it for its specific purpose.
    (Currently, there is no driver which completes request without
     re-initializing the queuelist after used it.  So rq->queuelist
     can be used for the purpose above.)
    
    "Normal" drivers can be converted to use blk_end_request()
    in a standard way shown below.
    
     a) end_that_request_{chunk/first}
        spin_lock_irqsave()
        (add_disk_randomness(), blk_queue_end_tag(), blkdev_dequeue_request())
        end_that_request_last()
        spin_unlock_irqrestore()
        => blk_end_request()
    
     b) spin_lock_irqsave()
        end_that_request_{chunk/first}
        (add_disk_randomness(), blk_queue_end_tag(), blkdev_dequeue_request())
        end_that_request_last()
        spin_unlock_irqrestore()
        => spin_lock_irqsave()
           __blk_end_request()
           spin_unlock_irqsave()
    
     c) spin_lock_irqsave()
        (add_disk_randomness(), blk_queue_end_tag(), blkdev_dequeue_request())
        end_that_request_last()
        spin_unlock_irqrestore()
        => blk_end_request()   or   spin_lock_irqsave()
                                    __blk_end_request()
                                    spin_unlock_irqrestore()
    
    Signed-off-by: Kiyoshi Ueda <[EMAIL PROTECTED]>
    Signed-off-by: Jun'ichi Nomura <[EMAIL PROTECTED]>
    Signed-off-by: Jens Axboe <[EMAIL PROTECTED]>
---
 block/ll_rw_blk.c      |   96 ++++++++++++++++++++++++++++++++++++++++++++++++
 include/linux/blkdev.h |    4 ++
 2 files changed, 100 insertions(+), 0 deletions(-)

diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 3d0422f..5c01911 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3791,6 +3791,102 @@ void end_request(struct request *req, int uptodate)
 }
 EXPORT_SYMBOL(end_request);
 
+static void complete_request(struct request *rq, int error)
+{
+       /*
+        * REMOVEME: This conversion is transitional and will be removed
+        *           when old end_that_request_* are unexported.
+        */
+       int uptodate = 1;
+       if (error)
+               uptodate = (error == -EIO) ? 0 : error;
+
+       if (blk_rq_tagged(rq))
+               blk_queue_end_tag(rq->q, rq);
+
+       if (blk_queued_rq(rq))
+               blkdev_dequeue_request(rq);
+
+       end_that_request_last(rq, uptodate);
+}
+
+/**
+ * blk_end_request - Helper function for drivers to complete the request.
+ * @rq:       the request being processed
+ * @error:    0 for success, < 0 for error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ *     Ends I/O on a number of bytes attached to @rq.
+ *     If @rq has leftover, sets it up for the next range of segments.
+ *
+ * Return:
+ *     0 - we are done with this request
+ *     1 - still buffers pending for this request
+ **/
+int blk_end_request(struct request *rq, int error, int nr_bytes)
+{
+       struct request_queue *q = rq->q;
+       unsigned long flags = 0UL;
+       /*
+        * REMOVEME: This conversion is transitional and will be removed
+        *           when old end_that_request_* are unexported.
+        */
+       int uptodate = 1;
+       if (error)
+               uptodate = (error == -EIO) ? 0 : error;
+
+       if (blk_fs_request(rq) || blk_pc_request(rq)) {
+               if (__end_that_request_first(rq, uptodate, nr_bytes))
+                       return 1;
+       }
+
+       add_disk_randomness(rq->rq_disk);
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       complete_request(rq, error);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_end_request);
+
+/**
+ * __blk_end_request - Helper function for drivers to complete the request.
+ * @rq:       the request being processed
+ * @error:    0 for success, < 0 for error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ *     Must be called with queue lock held unlike blk_end_request().
+ *
+ * Return:
+ *     0 - we are done with this request
+ *     1 - still buffers pending for this request
+ **/
+int __blk_end_request(struct request *rq, int error, int nr_bytes)
+{
+       /*
+        * REMOVEME: This conversion is transitional and will be removed
+        *           when old end_that_request_* are unexported.
+        */
+       int uptodate = 1;
+       if (error)
+               uptodate = (error == -EIO) ? 0 : error;
+
+       if (blk_fs_request(rq) || blk_pc_request(rq)) {
+               if (__end_that_request_first(rq, uptodate, nr_bytes))
+                       return 1;
+       }
+
+       add_disk_randomness(rq->rq_disk);
+
+       complete_request(rq, error);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(__blk_end_request);
+
 static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                            struct bio *bio)
 {
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 49b7a4c..3b212f0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -537,6 +537,8 @@ enum {
 #define blk_fua_rq(rq)         ((rq)->cmd_flags & REQ_FUA)
 #define blk_bidi_rq(rq)                ((rq)->next_rq != NULL)
 #define blk_empty_barrier(rq)  (blk_barrier_rq(rq) && blk_fs_request(rq) && 
!(rq)->hard_nr_sectors)
+/* rq->queuelist of dequeued request must be list_empty() */
+#define blk_queued_rq(rq)      (!list_empty(&(rq)->queuelist))
 
 #define list_entry_rq(ptr)     list_entry((ptr), struct request, queuelist)
 
@@ -724,6 +726,8 @@ static inline void blk_run_address_space(struct 
address_space *mapping)
  * for parts of the original function. This prevents
  * code duplication in drivers.
  */
+extern int blk_end_request(struct request *rq, int error, int nr_bytes);
+extern int __blk_end_request(struct request *rq, int error, int nr_bytes);
 extern int end_that_request_first(struct request *, int, int);
 extern int end_that_request_chunk(struct request *, int, int);
 extern void end_that_request_last(struct request *, int);
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to