drivers/block/Kconfig.iosched |    9 
 drivers/block/Makefile        |    1 
 drivers/block/ps-iosched.c    | 1852 ++++++++++++++++++++++++++++++++++++++++++
 include/linux/blkdev.h        |   16 
 4 files changed, 1878 insertions(+)

Signed-off-by:  Shailabh Nagar <[EMAIL PROTECTED]>
Signed-off-by:  Chandra Seetharaman <[EMAIL PROTECTED]> 

Index: linux-2.6.12-rc3/drivers/block/Kconfig.iosched
===================================================================
--- linux-2.6.12-rc3.orig/drivers/block/Kconfig.iosched
+++ linux-2.6.12-rc3/drivers/block/Kconfig.iosched
@@ -38,4 +38,13 @@ config IOSCHED_CFQ
          among all processes in the system. It should provide a fair
          working environment, suitable for desktop systems.
 
+config IOSCHED_PS
+       tristate "Proportional share I/O scheduler"
+       default y
+       ---help---
+         The PS I/O scheduler apportions disk I/O bandwidth amongst classes
+         defined through CKRM (Class-based Kernel Resource Management). It
+         is based on CFQ but differs in the interface used (CKRM) and 
+         implementation of differentiated service. 
+
 endmenu
Index: linux-2.6.12-rc3/drivers/block/Makefile
===================================================================
--- linux-2.6.12-rc3.orig/drivers/block/Makefile
+++ linux-2.6.12-rc3/drivers/block/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_IOSCHED_NOOP)    += noop-iosch
 obj-$(CONFIG_IOSCHED_AS)       += as-iosched.o
 obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
 obj-$(CONFIG_IOSCHED_CFQ)      += cfq-iosched.o
+obj-$(CONFIG_IOSCHED_PS)       += ps-iosched.o
 obj-$(CONFIG_MAC_FLOPPY)       += swim3.o
 obj-$(CONFIG_BLK_DEV_FD)       += floppy.o
 obj-$(CONFIG_BLK_DEV_FD98)     += floppy98.o
Index: linux-2.6.12-rc3/drivers/block/ps-iosched.c
===================================================================
--- /dev/null
+++ linux-2.6.12-rc3/drivers/block/ps-iosched.c
@@ -0,0 +1,1852 @@
+/*
+ *  linux/drivers/block/ps-iosched.c
+ *
+ *  PS, or proportional share disk scheduler for use with 
+ *  Class-based Kernel Resource Management (CKRM).
+ *
+ *  Very similar to Completely Fair Queueing (CFQ) disk scheduler
+ *  written by Jens Axboe.
+ *
+ *  Copyright (C) 2005 Shailabh Nagar <[EMAIL PROTECTED]>
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/hash.h>
+#include <linux/rbtree.h>
+#include <linux/mempool.h>
+
+static unsigned long max_elapsed_prq;
+static unsigned long max_elapsed_dispatch;
+
+/*
+ * tunables
+ */
+static int ps_quantum = 4;             /* max queue in one round of service */
+static int ps_queued = 8;              /* minimum rq allocate limit per-queue*/
+static int ps_service = HZ;            /* period over which service is avg */
+static int ps_fifo_expire_r = HZ / 2;  /* fifo timeout for sync requests */
+static int ps_fifo_expire_w = 5 * HZ;  /* fifo timeout for async requests */
+static int ps_fifo_rate = HZ / 8;      /* fifo expiry rate */
+static int ps_back_max = 16 * 1024;    /* maximum backwards seek, in KiB */
+static int ps_back_penalty = 2;        /* penalty of a backwards seek */
+
+/*
+ * for the hash of psq inside the psd
+ */
+#define PS_QHASH_SHIFT         6
+#define PS_QHASH_ENTRIES       (1 << PS_QHASH_SHIFT)
+#define list_entry_qhash(entry)        hlist_entry((entry), struct ps_queue, 
ps_hash)
+
+/*
+ * for the hash of prq inside the psq
+ */
+#define PS_MHASH_SHIFT         6
+#define PS_MHASH_BLOCK(sec)    ((sec) >> 3)
+#define PS_MHASH_ENTRIES       (1 << PS_MHASH_SHIFT)
+#define PS_MHASH_FN(sec)       hash_long(PS_MHASH_BLOCK(sec), PS_MHASH_SHIFT)
+#define rq_hash_key(rq)                ((rq)->sector + (rq)->nr_sectors)
+#define list_entry_hash(ptr)   hlist_entry((ptr), struct ps_rq, hash)
+
+#define list_entry_psq(ptr)    list_entry((ptr), struct ps_queue, ps_list)
+
+#define RQ_DATA(rq)            (rq)->elevator_private
+
+/*
+ * rb-tree defines
+ */
+#define RB_NONE                        (2)
+#define RB_EMPTY(node)         ((node)->rb_node == NULL)
+#define RB_CLEAR_COLOR(node)   (node)->rb_color = RB_NONE
+#define RB_CLEAR(node)         do {    \
+       (node)->rb_parent = NULL;       \
+       RB_CLEAR_COLOR((node));         \
+       (node)->rb_right = NULL;        \
+       (node)->rb_left = NULL;         \
+} while (0)
+#define RB_CLEAR_ROOT(root)    ((root)->rb_node = NULL)
+#define ON_RB(node)            ((node)->rb_color != RB_NONE)
+#define rb_entry_prq(node)     rb_entry((node), struct ps_rq, rb_node)
+#define rq_rb_key(rq)          (rq)->sector
+
+/*
+ * threshold for switching off non-tag accounting
+ */
+#define PS_MAX_TAG             (4)
+
+/*
+ * sort key types and names
+ */
+enum {
+       PS_KEY_PGID,
+       PS_KEY_TGID,
+       PS_KEY_UID,
+       PS_KEY_GID,
+       PS_KEY_LAST,
+};
+
+static char *ps_key_types[] = { "pgid", "tgid", "uid", "gid", NULL };
+
+static kmem_cache_t *prq_pool;
+static kmem_cache_t *ps_pool;
+static kmem_cache_t *ps_ioc_pool;
+
+struct ps_data {
+       struct list_head rr_list;
+       struct list_head empty_list;
+
+       struct hlist_head *ps_hash;
+       struct hlist_head *prq_hash;
+
+       /* queues on rr_list (ie they have pending requests */
+       unsigned int busy_queues;
+
+       unsigned int max_queued;
+
+       atomic_t ref;
+
+       int key_type;
+
+       mempool_t *prq_pool;
+
+       request_queue_t *queue;
+
+       sector_t last_sector;
+
+       int rq_in_driver;
+
+       /*
+        * tunables, see top of file
+        */
+       unsigned int ps_quantum;
+       unsigned int ps_queued;
+       unsigned int ps_fifo_expire_r;
+       unsigned int ps_fifo_expire_w;
+       unsigned int ps_fifo_batch_expire;
+       unsigned int ps_back_penalty;
+       unsigned int ps_back_max;
+       unsigned int find_best_prq;
+
+       unsigned int ps_tagged;
+};
+
+struct ps_queue {
+       /* reference count */
+       atomic_t ref;
+       /* parent ps_data */
+       struct ps_data *psd;
+       /* hash of mergeable requests */
+       struct hlist_node ps_hash;
+       /* hash key */
+       unsigned long key;
+       /* whether queue is on rr (or empty) list */
+       int on_rr;
+       /* on either rr or empty list of psd */
+       struct list_head ps_list;
+       /* sorted list of pending requests */
+       struct rb_root sort_list;
+       /* if fifo isn't expired, next request to serve */
+       struct ps_rq *next_prq;
+       /* requests queued in sort_list */
+       int queued[2];
+       /* currently allocated requests */
+       int allocated[2];
+       /* fifo list of requests in sort_list */
+       struct list_head fifo[2];
+       /* last time fifo expired */
+       unsigned long last_fifo_expire;
+
+       int key_type;
+
+       unsigned long service_start;
+       unsigned long service_used;
+
+       unsigned int max_rate;
+
+       /* number of requests that have been handed to the driver */
+       int in_flight;
+       /* number of currently allocated requests */
+       int alloc_limit[2];
+};
+
+struct ps_rq {
+       struct rb_node rb_node;
+       sector_t rb_key;
+       struct request *request;
+       struct hlist_node hash;
+
+       struct ps_queue *ps_queue;
+       struct ps_io_context *io_context;
+
+       unsigned long service_start;
+       unsigned long queue_start;
+
+       unsigned int in_flight : 1;
+       unsigned int accounted : 1;
+       unsigned int is_sync   : 1;
+       unsigned int is_write  : 1;
+};
+
+static struct ps_queue *ps_find_ps_hash(struct ps_data *, unsigned long);
+static void ps_dispatch_sort(request_queue_t *, struct ps_rq *);
+static void ps_update_next_prq(struct ps_rq *);
+static void ps_put_psd(struct ps_data *psd);
+
+/*
+ * what the fairness is based on (ie how processes are grouped and
+ * differentiated)
+ */
+static inline unsigned long
+ps_hash_key(struct ps_data *psd, struct task_struct *tsk)
+{
+       /*
+        * optimize this so that ->key_type is the offset into the struct
+        */
+       switch (psd->key_type) {
+               case PS_KEY_PGID:
+                       return process_group(tsk);
+               default:
+               case PS_KEY_TGID:
+                       return tsk->tgid;
+               case PS_KEY_UID:
+                       return tsk->uid;
+               case PS_KEY_GID:
+                       return tsk->gid;
+       }
+}
+
+/*
+ * lots of deadline iosched dupes, can be abstracted later...
+ */
+static inline void ps_del_prq_hash(struct ps_rq *prq)
+{
+       hlist_del_init(&prq->hash);
+}
+
+static void ps_remove_merge_hints(request_queue_t *q, struct ps_rq *prq)
+{
+       ps_del_prq_hash(prq);
+
+       if (q->last_merge == prq->request)
+               q->last_merge = NULL;
+
+       ps_update_next_prq(prq);
+}
+
+static inline void ps_add_prq_hash(struct ps_data *psd, struct ps_rq *prq)
+{
+       const int hash_idx = PS_MHASH_FN(rq_hash_key(prq->request));
+
+       BUG_ON(!hlist_unhashed(&prq->hash));
+
+       hlist_add_head(&prq->hash, &psd->prq_hash[hash_idx]);
+}
+
+static struct request *ps_find_rq_hash(struct ps_data *psd, sector_t offset)
+{
+       struct hlist_head *hash_list = &psd->prq_hash[PS_MHASH_FN(offset)];
+       struct hlist_node *entry, *next;
+
+       hlist_for_each_safe(entry, next, hash_list) {
+               struct ps_rq *prq = list_entry_hash(entry);
+               struct request *__rq = prq->request;
+
+               BUG_ON(hlist_unhashed(&prq->hash));
+
+               if (!rq_mergeable(__rq)) {
+                       ps_del_prq_hash(prq);
+                       continue;
+               }
+
+               if (rq_hash_key(__rq) == offset)
+                       return __rq;
+       }
+
+       return NULL;
+}
+
+/*
+ * Lifted from AS - choose which of prq1 and prq2 that is best served now.
+ * We choose the request that is closest to the head right now. Distance
+ * behind the head are penalized and only allowed to a certain extent.
+ */
+static struct ps_rq *
+ps_choose_req(struct ps_data *psd, struct ps_rq *prq1, struct ps_rq *prq2)
+{
+       sector_t last, s1, s2, d1 = 0, d2 = 0;
+       int r1_wrap = 0, r2_wrap = 0;   /* requests are behind the disk head */
+       unsigned long back_max;
+
+       if (prq1 == NULL || prq1 == prq2)
+               return prq2;
+       if (prq2 == NULL)
+               return prq1;
+
+       s1 = prq1->request->sector;
+       s2 = prq2->request->sector;
+
+       last = psd->last_sector;
+
+#if 0
+       if (!list_empty(&psd->queue->queue_head)) {
+               struct list_head *entry = &psd->queue->queue_head;
+               unsigned long distance = ~0UL;
+               struct request *rq;
+
+               while ((entry = entry->prev) != &psd->queue->queue_head) {
+                       rq = list_entry_rq(entry);
+
+                       if (blk_barrier_rq(rq))
+                               break;
+
+                       if (distance < abs(s1 - rq->sector + rq->nr_sectors)) {
+                               distance = abs(s1 - rq->sector +rq->nr_sectors);
+                               last = rq->sector + rq->nr_sectors;
+                       }
+                       if (distance < abs(s2 - rq->sector + rq->nr_sectors)) {
+                               distance = abs(s2 - rq->sector +rq->nr_sectors);
+                               last = rq->sector + rq->nr_sectors;
+                       }
+               }
+       }
+#endif
+
+       /*
+        * by definition, 1KiB is 2 sectors
+        */
+       back_max = psd->ps_back_max * 2;
+
+       /*
+        * Strict one way elevator _except_ in the case where we allow
+        * short backward seeks which are biased as twice the cost of a
+        * similar forward seek.
+        */
+       if (s1 >= last)
+               d1 = s1 - last;
+       else if (s1 + back_max >= last)
+               d1 = (last - s1) * psd->ps_back_penalty;
+       else
+               r1_wrap = 1;
+
+       if (s2 >= last)
+               d2 = s2 - last;
+       else if (s2 + back_max >= last)
+               d2 = (last - s2) * psd->ps_back_penalty;
+       else
+               r2_wrap = 1;
+
+       /* Found required data */
+       if (!r1_wrap && r2_wrap)
+               return prq1;
+       else if (!r2_wrap && r1_wrap)
+               return prq2;
+       else if (r1_wrap && r2_wrap) {
+               /* both behind the head */
+               if (s1 <= s2)
+                       return prq1;
+               else
+                       return prq2;
+       }
+
+       /* Both requests in front of the head */
+       if (d1 < d2)
+               return prq1;
+       else if (d2 < d1)
+               return prq2;
+       else {
+               if (s1 >= s2)
+                       return prq1;
+               else
+                       return prq2;
+       }
+}
+
+/*
+ * would be nice to take fifo expire time into account as well
+ */
+static struct ps_rq *
+ps_find_next_prq(struct ps_data *psd, struct ps_queue *psq,
+                 struct ps_rq *last)
+{
+       struct ps_rq *prq_next = NULL, *prq_prev = NULL;
+       struct rb_node *rbnext, *rbprev;
+
+       if (!ON_RB(&last->rb_node))
+               return NULL;
+
+       if ((rbnext = rb_next(&last->rb_node)) == NULL)
+               rbnext = rb_first(&psq->sort_list);
+
+       rbprev = rb_prev(&last->rb_node);
+
+       if (rbprev)
+               prq_prev = rb_entry_prq(rbprev);
+       if (rbnext)
+               prq_next = rb_entry_prq(rbnext);
+
+       return ps_choose_req(psd, prq_next, prq_prev);
+}
+
+static void ps_update_next_prq(struct ps_rq *prq)
+{
+       struct ps_queue *psq = prq->ps_queue;
+
+       if (psq->next_prq == prq)
+               psq->next_prq = ps_find_next_prq(psq->psd, psq, prq);
+}
+
+static int ps_check_sort_rr_list(struct ps_queue *psq)
+{
+       struct list_head *head = &psq->psd->rr_list;
+       struct list_head *next, *prev;
+
+       /*
+        * list might still be ordered
+        */
+       next = psq->ps_list.next;
+       if (next != head) {
+               struct ps_queue *cnext = list_entry_psq(next);
+
+               if (psq->service_used > cnext->service_used)
+                       return 1;
+       }
+
+       prev = psq->ps_list.prev;
+       if (prev != head) {
+               struct ps_queue *cprev = list_entry_psq(prev);
+
+               if (psq->service_used < cprev->service_used)
+                       return 1;
+       }
+
+       return 0;
+}
+
+static void ps_sort_rr_list(struct ps_queue *psq, int new_queue)
+{
+       struct list_head *entry = &psq->psd->rr_list;
+
+       if (!psq->on_rr)
+               return;
+       if (!new_queue && !ps_check_sort_rr_list(psq))
+               return;
+
+       list_del(&psq->ps_list);
+
+       /*
+        * sort by our mean service_used, sub-sort by in-flight requests
+        */
+       while ((entry = entry->prev) != &psq->psd->rr_list) {
+               struct ps_queue *__psq = list_entry_psq(entry);
+
+               if (psq->service_used > __psq->service_used)
+                       break;
+               else if (psq->service_used == __psq->service_used) {
+                       struct list_head *prv;
+
+                       while ((prv = entry->prev) != &psq->psd->rr_list) {
+                               __psq = list_entry_psq(prv);
+
+                               WARN_ON(__psq->service_used > 
psq->service_used);
+                               if (psq->service_used != __psq->service_used)
+                                       break;
+                               if (psq->in_flight > __psq->in_flight)
+                                       break;
+
+                               entry = prv;
+                       }
+               }
+       }
+
+       list_add(&psq->ps_list, entry);
+}
+
+/*
+ * add to busy list of queues for service, trying to be fair in ordering
+ * the pending list according to requests serviced
+ */
+static inline void
+ps_add_psq_rr(struct ps_data *psd, struct ps_queue *psq)
+{
+       /*
+        * it's currently on the empty list
+        */
+       psq->on_rr = 1;
+       psd->busy_queues++;
+
+       if (time_after(jiffies, psq->service_start + ps_service))
+               psq->service_used >>= 3;
+
+       ps_sort_rr_list(psq, 1);
+}
+
+static inline void
+ps_del_psq_rr(struct ps_data *psd, struct ps_queue *psq)
+{
+       list_move(&psq->ps_list, &psd->empty_list);
+       psq->on_rr = 0;
+
+       BUG_ON(!psd->busy_queues);
+       psd->busy_queues--;
+}
+
+/*
+ * rb tree support functions
+ */
+static inline void ps_del_prq_rb(struct ps_rq *prq)
+{
+       struct ps_queue *psq = prq->ps_queue;
+
+       if (ON_RB(&prq->rb_node)) {
+               struct ps_data *psd = psq->psd;
+
+               BUG_ON(!psq->queued[prq->is_sync]);
+
+               ps_update_next_prq(prq);
+
+               psq->queued[prq->is_sync]--;
+               rb_erase(&prq->rb_node, &psq->sort_list);
+               RB_CLEAR_COLOR(&prq->rb_node);
+
+               if (RB_EMPTY(&psq->sort_list) && psq->on_rr)
+                       ps_del_psq_rr(psd, psq);
+       }
+}
+
+static struct ps_rq *
+__ps_add_prq_rb(struct ps_rq *prq)
+{
+       struct rb_node **p = &prq->ps_queue->sort_list.rb_node;
+       struct rb_node *parent = NULL;
+       struct ps_rq *__prq;
+
+       while (*p) {
+               parent = *p;
+               __prq = rb_entry_prq(parent);
+
+               if (prq->rb_key < __prq->rb_key)
+                       p = &(*p)->rb_left;
+               else if (prq->rb_key > __prq->rb_key)
+                       p = &(*p)->rb_right;
+               else
+                       return __prq;
+       }
+
+       rb_link_node(&prq->rb_node, parent, p);
+       return NULL;
+}
+
+static void ps_add_prq_rb(struct ps_rq *prq)
+{
+       struct ps_queue *psq = prq->ps_queue;
+       struct ps_data *psd = psq->psd;
+       struct request *rq = prq->request;
+       struct ps_rq *__alias;
+
+       prq->rb_key = rq_rb_key(rq);
+       psq->queued[prq->is_sync]++;
+
+       /*
+        * looks a little odd, but the first insert might return an alias.
+        * if that happens, put the alias on the dispatch list
+        */
+       while ((__alias = __ps_add_prq_rb(prq)) != NULL)
+               ps_dispatch_sort(psd->queue, __alias);
+
+       rb_insert_color(&prq->rb_node, &psq->sort_list);
+
+       if (!psq->on_rr)
+               ps_add_psq_rr(psd, psq);
+
+       /*
+        * check if this request is a better next-serve candidate
+        */
+       psq->next_prq = ps_choose_req(psd, psq->next_prq, prq);
+}
+
+static inline void
+ps_reposition_prq_rb(struct ps_queue *psq, struct ps_rq *prq)
+{
+       if (ON_RB(&prq->rb_node)) {
+               rb_erase(&prq->rb_node, &psq->sort_list);
+               psq->queued[prq->is_sync]--;
+       }
+
+       ps_add_prq_rb(prq);
+}
+
+static struct request *
+ps_find_rq_rb(struct ps_data *psd, sector_t sector)
+{
+       const unsigned long key = ps_hash_key(psd, current);
+       struct ps_queue *psq = ps_find_ps_hash(psd, key);
+       struct rb_node *n;
+
+       if (!psq)
+               goto out;
+
+       n = psq->sort_list.rb_node;
+       while (n) {
+               struct ps_rq *prq = rb_entry_prq(n);
+
+               if (sector < prq->rb_key)
+                       n = n->rb_left;
+               else if (sector > prq->rb_key)
+                       n = n->rb_right;
+               else
+                       return prq->request;
+       }
+
+out:
+       return NULL;
+}
+
+/*
+ * make sure the service time gets corrected on reissue of this request
+ */
+static void ps_requeue_request(request_queue_t *q, struct request *rq)
+{
+       struct ps_rq *prq = RQ_DATA(rq);
+
+       if (prq) {
+               struct ps_queue *psq = prq->ps_queue;
+
+               if (psq->psd->ps_tagged) {
+                       psq->service_used--;
+                       ps_sort_rr_list(psq, 0);
+               }
+
+               if (prq->accounted) {
+                       prq->accounted = 0;
+                       psq->psd->rq_in_driver--;
+               }
+       }
+       list_add(&rq->queuelist, &q->queue_head);
+}
+
+static void ps_remove_request(request_queue_t *q, struct request *rq)
+{
+       struct ps_rq *prq = RQ_DATA(rq);
+
+       if (prq) {
+               ps_remove_merge_hints(q, prq);
+               list_del_init(&rq->queuelist);
+
+               if (prq->ps_queue)
+                       ps_del_prq_rb(prq);
+       }
+}
+
+static int
+ps_merge(request_queue_t *q, struct request **req, struct bio *bio)
+{
+       struct ps_data *psd = q->elevator->elevator_data;
+       struct request *__rq;
+       int ret;
+
+       ret = elv_try_last_merge(q, bio);
+       if (ret != ELEVATOR_NO_MERGE) {
+               __rq = q->last_merge;
+               goto out_insert;
+       }
+
+       __rq = ps_find_rq_hash(psd, bio->bi_sector);
+       if (__rq) {
+               BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
+
+               if (elv_rq_merge_ok(__rq, bio)) {
+                       ret = ELEVATOR_BACK_MERGE;
+                       goto out;
+               }
+       }
+
+       __rq = ps_find_rq_rb(psd, bio->bi_sector + bio_sectors(bio));
+       if (__rq) {
+               if (elv_rq_merge_ok(__rq, bio)) {
+                       ret = ELEVATOR_FRONT_MERGE;
+                       goto out;
+               }
+       }
+
+       return ELEVATOR_NO_MERGE;
+out:
+       q->last_merge = __rq;
+out_insert:
+       *req = __rq;
+       return ret;
+}
+
+static void ps_merged_request(request_queue_t *q, struct request *req)
+{
+       struct ps_data *psd = q->elevator->elevator_data;
+       struct ps_rq *prq = RQ_DATA(req);
+
+       ps_del_prq_hash(prq);
+       ps_add_prq_hash(psd, prq);
+
+       if (ON_RB(&prq->rb_node) && (rq_rb_key(req) != prq->rb_key)) {
+               struct ps_queue *psq = prq->ps_queue;
+
+               ps_update_next_prq(prq);
+               ps_reposition_prq_rb(psq, prq);
+       }
+
+       q->last_merge = req;
+}
+
+static void
+ps_merged_requests(request_queue_t *q, struct request *rq,
+                   struct request *next)
+{
+       struct ps_rq *prq = RQ_DATA(rq);
+       struct ps_rq *cnext = RQ_DATA(next);
+
+       ps_merged_request(q, rq);
+
+       if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
+               if (time_before(cnext->queue_start, prq->queue_start)) {
+                       list_move(&rq->queuelist, &next->queuelist);
+                       prq->queue_start = cnext->queue_start;
+               }
+       }
+
+       ps_update_next_prq(cnext);
+       ps_remove_request(q, next);
+}
+
+/*
+ * we dispatch psd->ps_quantum requests in total from the rr_list queues,
+ * this function sector sorts the selected request to minimize seeks. we start
+ * at psd->last_sector, not 0.
+ */
+static void ps_dispatch_sort(request_queue_t *q, struct ps_rq *prq)
+{
+       struct ps_data *psd = q->elevator->elevator_data;
+       struct ps_queue *psq = prq->ps_queue;
+       struct list_head *head = &q->queue_head, *entry = head;
+       struct request *__rq;
+       sector_t last;
+
+       ps_del_prq_rb(prq);
+       ps_remove_merge_hints(q, prq);
+       list_del(&prq->request->queuelist);
+
+       last = psd->last_sector;
+       while ((entry = entry->prev) != head) {
+               __rq = list_entry_rq(entry);
+
+               if (blk_barrier_rq(prq->request))
+                       break;
+               if (!blk_fs_request(prq->request))
+                       break;
+
+               if (prq->request->sector > __rq->sector)
+                       break;
+               if (__rq->sector > last && prq->request->sector < last) {
+                       last = prq->request->sector;
+                       break;
+               }
+       }
+
+       psd->last_sector = last;
+       prq->in_flight = 1;
+       psq->in_flight++;
+       list_add(&prq->request->queuelist, entry);
+}
+
+/*
+ * return expired entry, or NULL to just start from scratch in rbtree
+ */
+static inline struct ps_rq *ps_check_fifo(struct ps_queue *psq)
+{
+       struct ps_data *psd = psq->psd;
+       const int reads = !list_empty(&psq->fifo[0]);
+       const int writes = !list_empty(&psq->fifo[1]);
+       unsigned long now = jiffies;
+       struct ps_rq *prq;
+
+       if (time_before(now, psq->last_fifo_expire + psd->ps_fifo_batch_expire))
+               return NULL;
+
+       prq = RQ_DATA(list_entry(psq->fifo[0].next, struct request, queuelist));
+       if (reads && time_after(now, prq->queue_start + psd->ps_fifo_expire_r)) 
{
+               psq->last_fifo_expire = now;
+               return prq;
+       }
+
+       prq = RQ_DATA(list_entry(psq->fifo[1].next, struct request, queuelist));
+       if (writes && time_after(now, prq->queue_start + 
psd->ps_fifo_expire_w)) {
+               psq->last_fifo_expire = now;
+               return prq;
+       }
+
+       return NULL;
+}
+
+/*
+ * dispatch a single request from given queue
+ */
+static inline void
+ps_dispatch_request(request_queue_t *q, struct ps_data *psd,
+                    struct ps_queue *psq)
+{
+       struct ps_rq *prq;
+
+       /*
+        * follow expired path, else get first next available
+        */
+       if ((prq = ps_check_fifo(psq)) == NULL) {
+               if (psd->find_best_prq)
+                       prq = psq->next_prq;
+               else
+                       prq = rb_entry_prq(rb_first(&psq->sort_list));
+       }
+
+       psd->last_sector = prq->request->sector + prq->request->nr_sectors;
+
+       /*
+        * finally, insert request into driver list
+        */
+       ps_dispatch_sort(q, prq);
+}
+
+static int ps_dispatch_requests(request_queue_t *q, int max_dispatch)
+{
+       struct ps_data *psd = q->elevator->elevator_data;
+       struct ps_queue *psq;
+       struct list_head *entry, *tmp;
+       int queued, busy_queues, first_round;
+
+       if (list_empty(&psd->rr_list))
+               return 0;
+
+       queued = 0;
+       first_round = 1;
+restart:
+       busy_queues = 0;
+       list_for_each_safe(entry, tmp, &psd->rr_list) {
+               psq = list_entry_psq(entry);
+
+               BUG_ON(RB_EMPTY(&psq->sort_list));
+
+               /*
+                * first round of queueing, only select from queues that
+                * don't already have io in-flight
+                */
+               if (first_round && psq->in_flight)
+                       continue;
+
+               ps_dispatch_request(q, psd, psq);
+
+               if (!RB_EMPTY(&psq->sort_list))
+                       busy_queues++;
+
+               queued++;
+       }
+
+       if ((queued < max_dispatch) && (busy_queues || first_round)) {
+               first_round = 0;
+               goto restart;
+       }
+
+       return queued;
+}
+
+static inline void ps_account_dispatch(struct ps_rq *prq)
+{
+       struct ps_queue *psq = prq->ps_queue;
+       struct ps_data *psd = psq->psd;
+       unsigned long now, elapsed;
+
+       if (!blk_fs_request(prq->request))
+               return;
+
+       /*
+        * accounted bit is necessary since some drivers will call
+        * elv_next_request() many times for the same request (eg ide)
+        */
+       if (prq->accounted)
+               return;
+
+       now = jiffies;
+       if (psq->service_start == ~0UL)
+               psq->service_start = now;
+
+       /*
+        * on drives with tagged command queueing, command turn-around time
+        * doesn't necessarily reflect the time spent processing this very
+        * command inside the drive. so do the accounting differently there,
+        * by just sorting on the number of requests
+        */
+       if (psd->ps_tagged) {
+               if (time_after(now, psq->service_start + ps_service)) {
+                       psq->service_start = now;
+                       psq->service_used /= 10;
+               }
+
+               psq->service_used++;
+               ps_sort_rr_list(psq, 0);
+       }
+
+       elapsed = now - prq->queue_start;
+       if (elapsed > max_elapsed_dispatch)
+               max_elapsed_dispatch = elapsed;
+
+       prq->accounted = 1;
+       prq->service_start = now;
+
+       if (++psd->rq_in_driver >= PS_MAX_TAG && !psd->ps_tagged) {
+               psq->psd->ps_tagged = 1;
+               printk("ps: depth %d reached, tagging now on\n", PS_MAX_TAG);
+       }
+}
+
+static inline void
+ps_account_completion(struct ps_queue *psq, struct ps_rq *prq)
+{
+       struct ps_data *psd = psq->psd;
+
+       if (!prq->accounted)
+               return;
+
+       WARN_ON(!psd->rq_in_driver);
+       psd->rq_in_driver--;
+
+       if (!psd->ps_tagged) {
+               unsigned long now = jiffies;
+               unsigned long duration = now - prq->service_start;
+
+               if (time_after(now, psq->service_start + ps_service)) {
+                       psq->service_start = now;
+                       psq->service_used >>= 3;
+               }
+
+               psq->service_used += duration;
+               ps_sort_rr_list(psq, 0);
+
+               if (duration > max_elapsed_prq)
+                       max_elapsed_prq = duration;
+       }
+}
+
+static struct request *ps_next_request(request_queue_t *q)
+{
+       struct ps_data *psd = q->elevator->elevator_data;
+       struct request *rq;
+
+       if (!list_empty(&q->queue_head)) {
+               struct ps_rq *prq;
+dispatch:
+               rq = list_entry_rq(q->queue_head.next);
+
+               if ((prq = RQ_DATA(rq)) != NULL) {
+                       ps_remove_merge_hints(q, prq);
+                       ps_account_dispatch(prq);
+               }
+
+               return rq;
+       }
+
+       if (ps_dispatch_requests(q, psd->ps_quantum))
+               goto dispatch;
+
+       return NULL;
+}
+
+/*
+ * task holds one reference to the queue, dropped when task exits. each prq
+ * in-flight on this queue also holds a reference, dropped when prq is freed.
+ *
+ * queue lock must be held here.
+ */
+static void ps_put_queue(struct ps_queue *psq)
+{
+       BUG_ON(!atomic_read(&psq->ref));
+
+       if (!atomic_dec_and_test(&psq->ref))
+               return;
+
+       BUG_ON(rb_first(&psq->sort_list));
+       BUG_ON(psq->on_rr);
+
+       ps_put_psd(psq->psd);
+
+       /*
+        * it's on the empty list and still hashed
+        */
+       list_del(&psq->ps_list);
+       hlist_del(&psq->ps_hash);
+       kmem_cache_free(ps_pool, psq);
+}
+
+static inline struct ps_queue *
+__ps_find_ps_hash(struct ps_data *psd, unsigned long key, const int hashval)
+{
+       struct hlist_head *hash_list = &psd->ps_hash[hashval];
+       struct hlist_node *entry, *next;
+
+       hlist_for_each_safe(entry, next, hash_list) {
+               struct ps_queue *__psq = list_entry_qhash(entry);
+
+               if (__psq->key == key)
+                       return __psq;
+       }
+
+       return NULL;
+}
+
+static struct ps_queue *
+ps_find_ps_hash(struct ps_data *psd, unsigned long key)
+{
+       return __ps_find_ps_hash(psd, key, hash_long(key, PS_QHASH_SHIFT));
+}
+
+static inline void
+ps_rehash_psq(struct ps_data *psd, struct ps_queue **psq,
+               struct ps_io_context *pic)
+{
+       unsigned long hashkey = ps_hash_key(psd, current);
+       unsigned long hashval = hash_long(hashkey, PS_QHASH_SHIFT);
+       struct ps_queue *__psq;
+       unsigned long flags;
+
+       spin_lock_irqsave(psd->queue->queue_lock, flags);
+
+       hlist_del(&(*psq)->ps_hash);
+
+       __psq = __ps_find_ps_hash(psd, hashkey, hashval);
+       if (!__psq || __psq == *psq) {
+               __psq = *psq;
+               hlist_add_head(&__psq->ps_hash, &psd->ps_hash[hashval]);
+               __psq->key_type = psd->key_type;
+       } else {
+               atomic_inc(&__psq->ref);
+               pic->psq = __psq;
+               ps_put_queue(*psq);
+               *psq = __psq;
+       }
+
+       pic->psq = __psq;
+       spin_unlock_irqrestore(psd->queue->queue_lock, flags);
+}
+
+static void ps_free_io_context(struct ps_io_context *pic)
+{
+       kmem_cache_free(ps_ioc_pool, pic);
+}
+
+/*
+ * locking hierarchy is: io_context lock -> queue locks
+ */
+static void ps_exit_io_context(struct ps_io_context *pic)
+{
+       struct ps_queue *psq = pic->psq;
+       struct list_head *entry = &pic->list;
+       request_queue_t *q;
+       unsigned long flags;
+
+       /*
+        * put the reference this task is holding to the various queues
+        */
+       spin_lock_irqsave(&pic->ioc->lock, flags);
+       while ((entry = pic->list.next) != &pic->list) {
+               struct ps_io_context *__pic;
+
+               __pic = list_entry(entry, struct ps_io_context, list);
+               list_del(entry);
+
+               q = __pic->psq->psd->queue;
+               spin_lock(q->queue_lock);
+               ps_put_queue(__pic->psq);
+               spin_unlock(q->queue_lock);
+       }
+
+       q = psq->psd->queue;
+       spin_lock(q->queue_lock);
+       ps_put_queue(psq);
+       spin_unlock(q->queue_lock);
+
+       pic->psq = NULL;
+       spin_unlock_irqrestore(&pic->ioc->lock, flags);
+}
+
+static struct ps_io_context *ps_alloc_io_context(int gfp_flags)
+{
+       struct ps_io_context *pic = kmem_cache_alloc(ps_ioc_pool, gfp_flags);
+
+       if (pic) {
+               pic->dtor = ps_free_io_context;
+               pic->exit = ps_exit_io_context;
+               INIT_LIST_HEAD(&pic->list);
+               pic->psq = NULL;
+       }
+
+       return pic;
+}
+
+/*
+ * Setup general io context and ps io context. There can be several ps
+ * io contexts per general io context, if this process is doing io to more
+ * than one device managed by ps. Note that caller is holding a reference to
+ * psq, so we don't need to worry about it disappearing
+ */
+static struct ps_io_context *
+ps_get_io_context(struct ps_queue **psq, int gfp_flags)
+{
+       struct ps_data *psd = (*psq)->psd;
+       struct ps_queue *__psq = *psq;
+       struct ps_io_context *pic;
+       struct io_context *ioc;
+
+       might_sleep_if(gfp_flags & __GFP_WAIT);
+
+       ioc = get_io_context(gfp_flags);
+       if (!ioc)
+               return NULL;
+
+       if ((pic = ioc->pic) == NULL) {
+               pic = ps_alloc_io_context(gfp_flags);
+
+               if (pic == NULL)
+                       goto err;
+
+               ioc->pic = pic;
+               pic->ioc = ioc;
+               pic->psq = __psq;
+               atomic_inc(&__psq->ref);
+       } else {
+               struct ps_io_context *__pic;
+               unsigned long flags;
+
+               /*
+                * since the first pic on the list is actually the head
+                * itself, need to check this here or we'll duplicate an
+                * pic per ioc for no reason
+                */
+               if (pic->psq == __psq)
+                       goto out;
+
+               /*
+                * pic exists, check if we already are there. linear search
+                * should be ok here, the list will usually not be more than
+                * 1 or a few entries long
+                */
+               spin_lock_irqsave(&ioc->lock, flags);
+               list_for_each_entry(__pic, &pic->list, list) {
+                       /*
+                        * this process is already holding a reference to
+                        * this queue, so no need to get one more
+                        */
+                       if (__pic->psq == __psq) {
+                               pic = __pic;
+                               spin_unlock_irqrestore(&ioc->lock, flags);
+                               goto out;
+                       }
+               }
+               spin_unlock_irqrestore(&ioc->lock, flags);
+
+               /*
+                * nope, process doesn't have a pic assoicated with this
+                * psq yet. get a new one and add to list
+                */
+               __pic = ps_alloc_io_context(gfp_flags);
+               if (__pic == NULL)
+                       goto err;
+
+               __pic->ioc = ioc;
+               __pic->psq = __psq;
+               atomic_inc(&__psq->ref);
+               spin_lock_irqsave(&ioc->lock, flags);
+               list_add(&__pic->list, &pic->list);
+               spin_unlock_irqrestore(&ioc->lock, flags);
+
+               pic = __pic;
+               *psq = __psq;
+       }
+
+out:
+       /*
+        * if key_type has been changed on the fly, we lazily rehash
+        * each queue at lookup time
+        */
+       if ((*psq)->key_type != psd->key_type)
+               ps_rehash_psq(psd, psq, pic);
+
+       return pic;
+err:
+       put_io_context(ioc);
+       return NULL;
+}
+
+static struct ps_queue *
+__ps_get_queue(struct ps_data *psd, unsigned long key, int gfp_mask)
+{
+       const int hashval = hash_long(key, PS_QHASH_SHIFT);
+       struct ps_queue *psq, *new_psq = NULL;
+
+retry:
+       psq = __ps_find_ps_hash(psd, key, hashval);
+
+       if (!psq) {
+               if (new_psq) {
+                       psq = new_psq;
+                       new_psq = NULL;
+               } else if (gfp_mask & __GFP_WAIT) {
+                       spin_unlock_irq(psd->queue->queue_lock);
+                       new_psq = kmem_cache_alloc(ps_pool, gfp_mask);
+                       spin_lock_irq(psd->queue->queue_lock);
+                       goto retry;
+               } else
+                       goto out;
+
+               memset(psq, 0, sizeof(*psq));
+
+               INIT_HLIST_NODE(&psq->ps_hash);
+               INIT_LIST_HEAD(&psq->ps_list);
+               RB_CLEAR_ROOT(&psq->sort_list);
+               INIT_LIST_HEAD(&psq->fifo[0]);
+               INIT_LIST_HEAD(&psq->fifo[1]);
+
+               psq->key = key;
+               hlist_add_head(&psq->ps_hash, &psd->ps_hash[hashval]);
+               atomic_set(&psq->ref, 0);
+               psq->psd = psd;
+               atomic_inc(&psd->ref);
+               psq->key_type = psd->key_type;
+               psq->service_start = ~0UL;
+       }
+
+       if (new_psq)
+               kmem_cache_free(ps_pool, new_psq);
+
+       atomic_inc(&psq->ref);
+out:
+       WARN_ON((gfp_mask & __GFP_WAIT) && !psq);
+       return psq;
+}
+
+static void ps_enqueue(struct ps_data *psd, struct ps_rq *prq)
+{
+       prq->is_sync = 0;
+       if (rq_data_dir(prq->request) == READ || current->flags & PF_SYNCWRITE)
+               prq->is_sync = 1;
+
+       ps_add_prq_rb(prq);
+       prq->queue_start = jiffies;
+
+       list_add_tail(&prq->request->queuelist, 
&prq->ps_queue->fifo[prq->is_sync]);
+}
+
+static void
+ps_insert_request(request_queue_t *q, struct request *rq, int where)
+{
+       struct ps_data *psd = q->elevator->elevator_data;
+       struct ps_rq *prq = RQ_DATA(rq);
+
+       switch (where) {
+               case ELEVATOR_INSERT_BACK:
+                       while (ps_dispatch_requests(q, psd->ps_quantum))
+                               ;
+                       list_add_tail(&rq->queuelist, &q->queue_head);
+                       break;
+               case ELEVATOR_INSERT_FRONT:
+                       list_add(&rq->queuelist, &q->queue_head);
+                       break;
+               case ELEVATOR_INSERT_SORT:
+                       BUG_ON(!blk_fs_request(rq));
+                       ps_enqueue(psd, prq);
+                       break;
+               default:
+                       printk("%s: bad insert point %d\n", __FUNCTION__,where);
+                       return;
+       }
+
+       if (rq_mergeable(rq)) {
+               ps_add_prq_hash(psd, prq);
+
+               if (!q->last_merge)
+                       q->last_merge = rq;
+       }
+}
+
+static int ps_queue_empty(request_queue_t *q)
+{
+       struct ps_data *psd = q->elevator->elevator_data;
+
+       return list_empty(&q->queue_head) && list_empty(&psd->rr_list);
+}
+
+static void ps_completed_request(request_queue_t *q, struct request *rq)
+{
+       struct ps_rq *prq = RQ_DATA(rq);
+       struct ps_queue *psq;
+
+       if (unlikely(!blk_fs_request(rq)))
+               return;
+
+       psq = prq->ps_queue;
+
+       if (prq->in_flight) {
+               WARN_ON(!psq->in_flight);
+               psq->in_flight--;
+       }
+
+       ps_account_completion(psq, prq);
+}
+
+static struct request *
+ps_former_request(request_queue_t *q, struct request *rq)
+{
+       struct ps_rq *prq = RQ_DATA(rq);
+       struct rb_node *rbprev = rb_prev(&prq->rb_node);
+
+       if (rbprev)
+               return rb_entry_prq(rbprev)->request;
+
+       return NULL;
+}
+
+static struct request *
+ps_latter_request(request_queue_t *q, struct request *rq)
+{
+       struct ps_rq *prq = RQ_DATA(rq);
+       struct rb_node *rbnext = rb_next(&prq->rb_node);
+
+       if (rbnext)
+               return rb_entry_prq(rbnext)->request;
+
+       return NULL;
+}
+
+static int ps_may_queue(request_queue_t *q, int rw)
+{
+       struct ps_data *psd = q->elevator->elevator_data;
+       struct ps_queue *psq;
+       int ret = ELV_MQUEUE_MAY;
+
+       if (current->flags & PF_MEMALLOC)
+               return ELV_MQUEUE_MAY;
+
+       psq = ps_find_ps_hash(psd, ps_hash_key(psd, current));
+       if (psq) {
+               int limit = psd->max_queued;
+
+               if (psq->allocated[rw] < psd->ps_queued)
+                       return ELV_MQUEUE_MUST;
+
+               if (psd->busy_queues)
+                       limit = q->nr_requests / psd->busy_queues;
+
+               if (limit < psd->ps_queued)
+                       limit = psd->ps_queued;
+               else if (limit > psd->max_queued)
+                       limit = psd->max_queued;
+
+               if (psq->allocated[rw] >= limit) {
+                       if (limit > psq->alloc_limit[rw])
+                               psq->alloc_limit[rw] = limit;
+
+                       ret = ELV_MQUEUE_NO;
+               }
+       }
+
+       return ret;
+}
+
+static void ps_check_waiters(request_queue_t *q, struct ps_queue *psq)
+{
+       struct request_list *rl = &q->rq;
+       const int write = waitqueue_active(&rl->wait[WRITE]);
+       const int read = waitqueue_active(&rl->wait[READ]);
+
+       if (read && psq->allocated[READ] < psq->alloc_limit[READ])
+               wake_up(&rl->wait[READ]);
+       if (write && psq->allocated[WRITE] < psq->alloc_limit[WRITE])
+               wake_up(&rl->wait[WRITE]);
+}
+
+/*
+ * queue lock held here
+ */
+static void ps_put_request(request_queue_t *q, struct request *rq)
+{
+       struct ps_data *psd = q->elevator->elevator_data;
+       struct ps_rq *prq = RQ_DATA(rq);
+
+       if (prq) {
+               struct ps_queue *psq = prq->ps_queue;
+
+               BUG_ON(q->last_merge == rq);
+               BUG_ON(!hlist_unhashed(&prq->hash));
+
+               if (prq->io_context)
+                       put_io_context(prq->io_context->ioc);
+
+               BUG_ON(!psq->allocated[prq->is_write]);
+               psq->allocated[prq->is_write]--;
+
+               mempool_free(prq, psd->prq_pool);
+               rq->elevator_private = NULL;
+
+               smp_mb();
+               ps_check_waiters(q, psq);
+               ps_put_queue(psq);
+       }
+}
+
+/*
+ * Allocate ps data structures associated with this request. A queue and
+ */
+static int ps_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
+{
+       struct ps_data *psd = q->elevator->elevator_data;
+       struct ps_io_context *pic;
+       const int rw = rq_data_dir(rq);
+       struct ps_queue *psq, *saved_psq;
+       struct ps_rq *prq;
+       unsigned long flags;
+
+       might_sleep_if(gfp_mask & __GFP_WAIT);
+
+       spin_lock_irqsave(q->queue_lock, flags);
+
+       psq = __ps_get_queue(psd, ps_hash_key(psd, current), gfp_mask);
+       if (!psq)
+               goto out_lock;
+
+repeat:
+       if (psq->allocated[rw] >= psd->max_queued)
+               goto out_lock;
+
+       psq->allocated[rw]++;
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       /*
+        * if hashing type has changed, the ps_queue might change here.
+        */
+       saved_psq = psq;
+       pic = ps_get_io_context(&psq, gfp_mask);
+       if (!pic)
+               goto err;
+
+       /*
+        * repeat allocation checks on queue change
+        */
+       if (unlikely(saved_psq != psq)) {
+               spin_lock_irqsave(q->queue_lock, flags);
+               saved_psq->allocated[rw]--;
+               goto repeat;
+       }
+
+       prq = mempool_alloc(psd->prq_pool, gfp_mask);
+       if (prq) {
+               RB_CLEAR(&prq->rb_node);
+               prq->rb_key = 0;
+               prq->request = rq;
+               INIT_HLIST_NODE(&prq->hash);
+               prq->ps_queue = psq;
+               prq->io_context = pic;
+               prq->service_start = prq->queue_start = 0;
+               prq->in_flight = prq->accounted = prq->is_sync = 0;
+               prq->is_write = rw;
+               rq->elevator_private = prq;
+               psq->alloc_limit[rw] = 0;
+               return 0;
+       }
+
+       put_io_context(pic->ioc);
+err:
+       spin_lock_irqsave(q->queue_lock, flags);
+       psq->allocated[rw]--;
+       ps_put_queue(psq);
+out_lock:
+       spin_unlock_irqrestore(q->queue_lock, flags);
+       return 1;
+}
+
+static void ps_put_psd(struct ps_data *psd)
+{
+       request_queue_t *q = psd->queue;
+
+       if (!atomic_dec_and_test(&psd->ref))
+               return;
+
+       blk_put_queue(q);
+
+       mempool_destroy(psd->prq_pool);
+       kfree(psd->prq_hash);
+       kfree(psd->ps_hash);
+       kfree(psd);
+}
+
+static void ps_exit_queue(elevator_t *e)
+{
+       ps_put_psd(e->elevator_data);
+}
+
+static int ps_init_queue(request_queue_t *q, elevator_t *e)
+{
+       struct ps_data *psd;
+       int i;
+
+       psd = kmalloc(sizeof(*psd), GFP_KERNEL);
+       if (!psd)
+               return -ENOMEM;
+
+       memset(psd, 0, sizeof(*psd));
+       INIT_LIST_HEAD(&psd->rr_list);
+       INIT_LIST_HEAD(&psd->empty_list);
+
+       psd->prq_hash = kmalloc(sizeof(struct hlist_head) * PS_MHASH_ENTRIES, 
GFP_KERNEL);
+       if (!psd->prq_hash)
+               goto out_prqhash;
+
+       psd->ps_hash = kmalloc(sizeof(struct hlist_head) * PS_QHASH_ENTRIES, 
GFP_KERNEL);
+       if (!psd->ps_hash)
+               goto out_pshash;
+
+       psd->prq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, 
mempool_free_slab, prq_pool);
+       if (!psd->prq_pool)
+               goto out_prqpool;
+
+       for (i = 0; i < PS_MHASH_ENTRIES; i++)
+               INIT_HLIST_HEAD(&psd->prq_hash[i]);
+       for (i = 0; i < PS_QHASH_ENTRIES; i++)
+               INIT_HLIST_HEAD(&psd->ps_hash[i]);
+
+       e->elevator_data = psd;
+
+       psd->queue = q;
+       atomic_inc(&q->refcnt);
+
+       /*
+        * just set it to some high value, we want anyone to be able to queue
+        * some requests. fairness is handled differently
+        */
+       q->nr_requests = 1024;
+       psd->max_queued = q->nr_requests / 16;
+       q->nr_batching = ps_queued;
+       psd->key_type = PS_KEY_TGID;
+       psd->find_best_prq = 1;
+       atomic_set(&psd->ref, 1);
+
+       psd->ps_queued = ps_queued;
+       psd->ps_quantum = ps_quantum;
+       psd->ps_fifo_expire_r = ps_fifo_expire_r;
+       psd->ps_fifo_expire_w = ps_fifo_expire_w;
+       psd->ps_fifo_batch_expire = ps_fifo_rate;
+       psd->ps_back_max = ps_back_max;
+       psd->ps_back_penalty = ps_back_penalty;
+
+       return 0;
+out_prqpool:
+       kfree(psd->ps_hash);
+out_pshash:
+       kfree(psd->prq_hash);
+out_prqhash:
+       kfree(psd);
+       return -ENOMEM;
+}
+
+static void ps_slab_kill(void)
+{
+       if (prq_pool)
+               kmem_cache_destroy(prq_pool);
+       if (ps_pool)
+               kmem_cache_destroy(ps_pool);
+       if (ps_ioc_pool)
+               kmem_cache_destroy(ps_ioc_pool);
+}
+
+static int __init ps_slab_setup(void)
+{
+       prq_pool = kmem_cache_create("prq_pool", sizeof(struct ps_rq), 0, 0,
+                                       NULL, NULL);
+       if (!prq_pool)
+               goto fail;
+
+       ps_pool = kmem_cache_create("ps_pool", sizeof(struct ps_queue), 0, 0,
+                                       NULL, NULL);
+       if (!ps_pool)
+               goto fail;
+
+       ps_ioc_pool = kmem_cache_create("ps_ioc_pool",
+                       sizeof(struct ps_io_context), 0, 0, NULL, NULL);
+       if (!ps_ioc_pool)
+               goto fail;
+
+       return 0;
+fail:
+       ps_slab_kill();
+       return -ENOMEM;
+}
+
+
+/*
+ * sysfs parts below -->
+ */
+struct ps_fs_entry {
+       struct attribute attr;
+       ssize_t (*show)(struct ps_data *, char *);
+       ssize_t (*store)(struct ps_data *, const char *, size_t);
+};
+
+static ssize_t
+ps_var_show(unsigned int var, char *page)
+{
+       return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+ps_var_store(unsigned int *var, const char *page, size_t count)
+{
+       char *p = (char *) page;
+
+       *var = simple_strtoul(p, &p, 10);
+       return count;
+}
+
+static ssize_t
+ps_clear_elapsed(struct ps_data *psd, const char *page, size_t count)
+{
+       max_elapsed_dispatch = max_elapsed_prq = 0;
+       return count;
+}
+
+static ssize_t
+ps_set_key_type(struct ps_data *psd, const char *page, size_t count)
+{
+       spin_lock_irq(psd->queue->queue_lock);
+       if (!strncmp(page, "pgid", 4))
+               psd->key_type = PS_KEY_PGID;
+       else if (!strncmp(page, "tgid", 4))
+               psd->key_type = PS_KEY_TGID;
+       else if (!strncmp(page, "uid", 3))
+               psd->key_type = PS_KEY_UID;
+       else if (!strncmp(page, "gid", 3))
+               psd->key_type = PS_KEY_GID;
+       spin_unlock_irq(psd->queue->queue_lock);
+       return count;
+}
+
+static ssize_t
+ps_read_key_type(struct ps_data *psd, char *page)
+{
+       ssize_t len = 0;
+       int i;
+
+       for (i = PS_KEY_PGID; i < PS_KEY_LAST; i++) {
+               if (psd->key_type == i)
+                       len += sprintf(page+len, "[%s] ", ps_key_types[i]);
+               else
+                       len += sprintf(page+len, "%s ", ps_key_types[i]);
+       }
+       len += sprintf(page+len, "\n");
+       return len;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                           \
+static ssize_t __FUNC(struct ps_data *psd, char *page)         \
+{                                                                      \
+       unsigned int __data = __VAR;                                    \
+       if (__CONV)                                                     \
+               __data = jiffies_to_msecs(__data);                      \
+       return ps_var_show(__data, (page));                             \
+}
+SHOW_FUNCTION(ps_quantum_show, psd->ps_quantum, 0);
+SHOW_FUNCTION(ps_queued_show, psd->ps_queued, 0);
+SHOW_FUNCTION(ps_fifo_expire_r_show, psd->ps_fifo_expire_r, 1);
+SHOW_FUNCTION(ps_fifo_expire_w_show, psd->ps_fifo_expire_w, 1);
+SHOW_FUNCTION(ps_fifo_batch_expire_show, psd->ps_fifo_batch_expire, 1);
+SHOW_FUNCTION(ps_find_best_show, psd->find_best_prq, 0);
+SHOW_FUNCTION(ps_back_max_show, psd->ps_back_max, 0);
+SHOW_FUNCTION(ps_back_penalty_show, psd->ps_back_penalty, 0);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        
\
+static ssize_t __FUNC(struct ps_data *psd, const char *page, size_t count)     
\
+{                                                                      \
+       unsigned int __data;                                            \
+       int ret = ps_var_store(&__data, (page), count);         \
+       if (__data < (MIN))                                             \
+               __data = (MIN);                                         \
+       else if (__data > (MAX))                                        \
+               __data = (MAX);                                         \
+       if (__CONV)                                                     \
+               *(__PTR) = msecs_to_jiffies(__data);                    \
+       else                                                            \
+               *(__PTR) = __data;                                      \
+       return ret;                                                     \
+}
+STORE_FUNCTION(ps_quantum_store, &psd->ps_quantum, 1, UINT_MAX, 0);
+STORE_FUNCTION(ps_queued_store, &psd->ps_queued, 1, UINT_MAX, 0);
+STORE_FUNCTION(ps_fifo_expire_r_store, &psd->ps_fifo_expire_r, 1, UINT_MAX, 1);
+STORE_FUNCTION(ps_fifo_expire_w_store, &psd->ps_fifo_expire_w, 1, UINT_MAX, 1);
+STORE_FUNCTION(ps_fifo_batch_expire_store, &psd->ps_fifo_batch_expire, 0, 
UINT_MAX, 1);
+STORE_FUNCTION(ps_find_best_store, &psd->find_best_prq, 0, 1, 0);
+STORE_FUNCTION(ps_back_max_store, &psd->ps_back_max, 0, UINT_MAX, 0);
+STORE_FUNCTION(ps_back_penalty_store, &psd->ps_back_penalty, 1, UINT_MAX, 0);
+#undef STORE_FUNCTION
+
+static struct ps_fs_entry ps_quantum_entry = {
+       .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR },
+       .show = ps_quantum_show,
+       .store = ps_quantum_store,
+};
+static struct ps_fs_entry ps_queued_entry = {
+       .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR },
+       .show = ps_queued_show,
+       .store = ps_queued_store,
+};
+static struct ps_fs_entry ps_fifo_expire_r_entry = {
+       .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR },
+       .show = ps_fifo_expire_r_show,
+       .store = ps_fifo_expire_r_store,
+};
+static struct ps_fs_entry ps_fifo_expire_w_entry = {
+       .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR },
+       .show = ps_fifo_expire_w_show,
+       .store = ps_fifo_expire_w_store,
+};
+static struct ps_fs_entry ps_fifo_batch_expire_entry = {
+       .attr = {.name = "fifo_batch_expire", .mode = S_IRUGO | S_IWUSR },
+       .show = ps_fifo_batch_expire_show,
+       .store = ps_fifo_batch_expire_store,
+};
+static struct ps_fs_entry ps_find_best_entry = {
+       .attr = {.name = "find_best_prq", .mode = S_IRUGO | S_IWUSR },
+       .show = ps_find_best_show,
+       .store = ps_find_best_store,
+};
+static struct ps_fs_entry ps_back_max_entry = {
+       .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR },
+       .show = ps_back_max_show,
+       .store = ps_back_max_store,
+};
+static struct ps_fs_entry ps_back_penalty_entry = {
+       .attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR },
+       .show = ps_back_penalty_show,
+       .store = ps_back_penalty_store,
+};
+static struct ps_fs_entry ps_clear_elapsed_entry = {
+       .attr = {.name = "clear_elapsed", .mode = S_IWUSR },
+       .store = ps_clear_elapsed,
+};
+static struct ps_fs_entry ps_key_type_entry = {
+       .attr = {.name = "key_type", .mode = S_IRUGO | S_IWUSR },
+       .show = ps_read_key_type,
+       .store = ps_set_key_type,
+};
+
+static struct attribute *default_attrs[] = {
+       &ps_quantum_entry.attr,
+       &ps_queued_entry.attr,
+       &ps_fifo_expire_r_entry.attr,
+       &ps_fifo_expire_w_entry.attr,
+       &ps_fifo_batch_expire_entry.attr,
+       &ps_key_type_entry.attr,
+       &ps_find_best_entry.attr,
+       &ps_back_max_entry.attr,
+       &ps_back_penalty_entry.attr,
+       &ps_clear_elapsed_entry.attr,
+       NULL,
+};
+
+#define to_ps(atr) container_of((atr), struct ps_fs_entry, attr)
+
+static ssize_t
+ps_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+       elevator_t *e = container_of(kobj, elevator_t, kobj);
+       struct ps_fs_entry *entry = to_ps(attr);
+
+       if (!entry->show)
+               return 0;
+
+       return entry->show(e->elevator_data, page);
+}
+
+static ssize_t
+ps_attr_store(struct kobject *kobj, struct attribute *attr,
+              const char *page, size_t length)
+{
+       elevator_t *e = container_of(kobj, elevator_t, kobj);
+       struct ps_fs_entry *entry = to_ps(attr);
+
+       if (!entry->store)
+               return -EINVAL;
+
+       return entry->store(e->elevator_data, page, length);
+}
+
+static struct sysfs_ops ps_sysfs_ops = {
+       .show   = ps_attr_show,
+       .store  = ps_attr_store,
+};
+
+struct kobj_type ps_ktype = {
+       .sysfs_ops      = &ps_sysfs_ops,
+       .default_attrs  = default_attrs,
+};
+
+static struct elevator_type iosched_ps = {
+       .ops = {
+               .elevator_merge_fn =            ps_merge,
+               .elevator_merged_fn =           ps_merged_request,
+               .elevator_merge_req_fn =        ps_merged_requests,
+               .elevator_next_req_fn =         ps_next_request,
+               .elevator_add_req_fn =          ps_insert_request,
+               .elevator_remove_req_fn =       ps_remove_request,
+               .elevator_requeue_req_fn =      ps_requeue_request,
+               .elevator_queue_empty_fn =      ps_queue_empty,
+               .elevator_completed_req_fn =    ps_completed_request,
+               .elevator_former_req_fn =       ps_former_request,
+               .elevator_latter_req_fn =       ps_latter_request,
+               .elevator_set_req_fn =          ps_set_request,
+               .elevator_put_req_fn =          ps_put_request,
+               .elevator_may_queue_fn =        ps_may_queue,
+               .elevator_init_fn =             ps_init_queue,
+               .elevator_exit_fn =             ps_exit_queue,
+       },
+       .elevator_ktype =       &ps_ktype,
+       .elevator_name =        "ps",
+       .elevator_owner =       THIS_MODULE,
+};
+
+int ps_init(void)
+{
+       int ret;
+
+       if (ps_slab_setup())
+               return -ENOMEM;
+
+       ret = elv_register(&iosched_ps);
+       if (!ret) {
+               __module_get(THIS_MODULE);
+               return 0;
+       }
+
+       ps_slab_kill();
+       return ret;
+}
+
+static void __exit ps_exit(void)
+{
+       ps_slab_kill();
+       elv_unregister(&iosched_ps);
+}
+
+module_init(ps_init);
+module_exit(ps_exit);
+
+MODULE_AUTHOR("Shailabh Nagar");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CKRM Proportional Share IO scheduler");
Index: linux-2.6.12-rc3/include/linux/blkdev.h
===================================================================
--- linux-2.6.12-rc3.orig/include/linux/blkdev.h
+++ linux-2.6.12-rc3/include/linux/blkdev.h
@@ -66,6 +66,21 @@ struct cfq_io_context {
        struct cfq_queue *cfqq;
 };
 
+struct ps_queue;
+struct ps_io_context {
+       void (*dtor)(struct ps_io_context *);
+       void (*exit)(struct ps_io_context *);
+
+       struct io_context *ioc;
+
+       /*
+        * circular list of cfq_io_contexts belonging to a process io context
+        */
+       struct list_head list;
+       struct ps_queue *psq;
+};
+
+
 /*
  * This is the per-process I/O subsystem state.  It is refcounted and
  * kmalloc'ed. Currently all fields are modified in process io context
@@ -85,6 +100,7 @@ struct io_context {
 
        struct as_io_context *aic;
        struct cfq_io_context *cic;
+       struct ps_io_context *pic;
 };
 
 void put_io_context(struct io_context *ioc);


-------------------------------------------------------
This SF.Net email is sponsored by Oracle Space Sweepstakes
Want to be the first software developer in space?
Enter now for the Oracle Space Sweepstakes!
http://ads.osdn.com/?ad_id=7393&alloc_id=16281&op=click
_______________________________________________
ckrm-tech mailing list
https://lists.sourceforge.net/lists/listinfo/ckrm-tech

Reply via email to