Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=7749a8d423c483a51983b666613acda1a4dd9c1b
Commit:     7749a8d423c483a51983b666613acda1a4dd9c1b
Parent:     445722f97a0ecd3aed3f53d9f0dcaacaef8c6223
Author:     Jens Axboe <[EMAIL PROTECTED]>
AuthorDate: Wed Dec 13 13:02:26 2006 +0100
Committer:  Jens Axboe <[EMAIL PROTECTED]>
CommitDate: Wed Dec 13 13:02:26 2006 +0100

    [PATCH] Propagate down request sync flag
    
    We need to do this, otherwise the io schedulers don't get access to the
    sync flag. Then they cannot tell the difference between a regular write
    and an O_DIRECT write, which can cause a performance loss.
    
    Signed-off-by: Jens Axboe <[EMAIL PROTECTED]>
---
 block/cfq-iosched.c |   18 ++++++++++++------
 block/ll_rw_blk.c   |   28 ++++++++++++++++++++--------
 2 files changed, 32 insertions(+), 14 deletions(-)

diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 78c6b31..533a293 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -219,9 +219,12 @@ static int cfq_queue_empty(request_queue_t *q)
        return !cfqd->busy_queues;
 }
 
-static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
+static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int 
is_sync)
 {
-       if (rw == READ || rw == WRITE_SYNC)
+       /*
+        * Use the per-process queue, for read requests and syncronous writes
+        */
+       if (!(rw & REQ_RW) || is_sync)
                return task->pid;
 
        return CFQ_KEY_ASYNC;
@@ -473,7 +476,7 @@ static struct request *
 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
 {
        struct task_struct *tsk = current;
-       pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
+       pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
        struct cfq_queue *cfqq;
 
        cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
@@ -1748,6 +1751,9 @@ static int cfq_may_queue(request_queue_t *q, int rw)
        struct cfq_data *cfqd = q->elevator->elevator_data;
        struct task_struct *tsk = current;
        struct cfq_queue *cfqq;
+       unsigned int key;
+
+       key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
 
        /*
         * don't force setup of a queue from here, as a call to may_queue
@@ -1755,7 +1761,7 @@ static int cfq_may_queue(request_queue_t *q, int rw)
         * so just lookup a possibly existing queue, or return 'may queue'
         * if that fails
         */
-       cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);
+       cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
        if (cfqq) {
                cfq_init_prio_data(cfqq);
                cfq_prio_boost(cfqq);
@@ -1798,10 +1804,10 @@ cfq_set_request(request_queue_t *q, struct request *rq, 
gfp_t gfp_mask)
        struct task_struct *tsk = current;
        struct cfq_io_context *cic;
        const int rw = rq_data_dir(rq);
-       pid_t key = cfq_queue_pid(tsk, rw);
+       const int is_sync = rq_is_sync(rq);
+       pid_t key = cfq_queue_pid(tsk, rw, is_sync);
        struct cfq_queue *cfqq;
        unsigned long flags;
-       int is_sync = key != CFQ_KEY_ASYNC;
 
        might_sleep_if(gfp_mask & __GFP_WAIT);
 
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index a541b42..79807db 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2058,15 +2058,16 @@ static void freed_request(request_queue_t *q, int rw, 
int priv)
  * Returns NULL on failure, with queue_lock held.
  * Returns !NULL on success, with queue_lock *not held*.
  */
-static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
-                                  gfp_t gfp_mask)
+static struct request *get_request(request_queue_t *q, int rw_flags,
+                                  struct bio *bio, gfp_t gfp_mask)
 {
        struct request *rq = NULL;
        struct request_list *rl = &q->rq;
        struct io_context *ioc = NULL;
+       const int rw = rw_flags & 0x01;
        int may_queue, priv;
 
-       may_queue = elv_may_queue(q, rw);
+       may_queue = elv_may_queue(q, rw_flags);
        if (may_queue == ELV_MQUEUE_NO)
                goto rq_starved;
 
@@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, 
int rw, struct bio *bio,
 
        spin_unlock_irq(q->queue_lock);
 
-       rq = blk_alloc_request(q, rw, priv, gfp_mask);
+       rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
        if (unlikely(!rq)) {
                /*
                 * Allocation failed presumably due to memory. Undo anything
@@ -2162,12 +2163,13 @@ out:
  *
  * Called with q->queue_lock held, and returns with it unlocked.
  */
-static struct request *get_request_wait(request_queue_t *q, int rw,
+static struct request *get_request_wait(request_queue_t *q, int rw_flags,
                                        struct bio *bio)
 {
+       const int rw = rw_flags & 0x01;
        struct request *rq;
 
-       rq = get_request(q, rw, bio, GFP_NOIO);
+       rq = get_request(q, rw_flags, bio, GFP_NOIO);
        while (!rq) {
                DEFINE_WAIT(wait);
                struct request_list *rl = &q->rq;
@@ -2175,7 +2177,7 @@ static struct request *get_request_wait(request_queue_t 
*q, int rw,
                prepare_to_wait_exclusive(&rl->wait[rw], &wait,
                                TASK_UNINTERRUPTIBLE);
 
-               rq = get_request(q, rw, bio, GFP_NOIO);
+               rq = get_request(q, rw_flags, bio, GFP_NOIO);
 
                if (!rq) {
                        struct io_context *ioc;
@@ -2910,6 +2912,7 @@ static int __make_request(request_queue_t *q, struct bio 
*bio)
        int el_ret, nr_sectors, barrier, err;
        const unsigned short prio = bio_prio(bio);
        const int sync = bio_sync(bio);
+       int rw_flags;
 
        nr_sectors = bio_sectors(bio);
 
@@ -2984,10 +2987,19 @@ static int __make_request(request_queue_t *q, struct 
bio *bio)
 
 get_rq:
        /*
+        * This sync check and mask will be re-done in init_request_from_bio(),
+        * but we need to set it earlier to expose the sync flag to the
+        * rq allocator and io schedulers.
+        */
+       rw_flags = bio_data_dir(bio);
+       if (sync)
+               rw_flags |= REQ_RW_SYNC;
+
+       /*
         * Grab a free request. This is might sleep but can not fail.
         * Returns with the queue unlocked.
         */
-       req = get_request_wait(q, bio_data_dir(bio), bio);
+       req = get_request_wait(q, rw_flags, bio);
 
        /*
         * After dropping the lock and possibly sleeping here, our request
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to