Add a set of worker threads to send data back to the kernel.  The kernel
can sleep during our write() call while it maps pages.  This should help
eliminate this bottleneck in the codepath.

Signed-off-by: [EMAIL PROTECTED]

---
 usr/tgtif.c |   76 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 71 insertions(+), 5 deletions(-)

Index: tgt/usr/tgtif.c
===================================================================
--- tgt.orig/usr/tgtif.c        2007-10-23 15:58:37.000000000 -0500
+++ tgt/usr/tgtif.c     2007-10-23 16:26:08.000000000 -0500
@@ -23,6 +23,7 @@
 #include <fcntl.h>
 #include <getopt.h>
 #include <inttypes.h>
+#include <pthread.h>
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -45,10 +46,20 @@
 #define barrier() __asm__ __volatile__("": : :"memory")
 
 struct uring {
+       pthread_mutex_t idx_lock;
        uint32_t idx;
        char *buf;
 };
 
+#define NR_WORKER_THREADS      4
+struct kreq_send_worker_info {
+       pthread_t thread[NR_WORKER_THREADS];
+
+       pthread_cond_t cond;
+       pthread_mutex_t lock;
+       int outstanding;
+} kreq_send_worker;
+
 static struct uring kuring, ukring;
 static int chrfd;
 
@@ -70,22 +81,50 @@
        return (struct tgt_event *) (ring->buf + pos);
 }
 
+static void *kreq_send_worker_fn(void *arg)
+{
+       while (1) {
+               pthread_mutex_lock(&kreq_send_worker.lock);
+test:
+               if(!kreq_send_worker.outstanding) {
+                       pthread_cond_wait(&kreq_send_worker.cond,
+                                         &kreq_send_worker.lock);
+                       goto test;
+               }
+
+               kreq_send_worker.outstanding--;
+               pthread_mutex_unlock(&kreq_send_worker.lock);
+               write(chrfd, &kreq_send_worker.outstanding, 1);
+       }
+}
+
 static int kreq_send(struct tgt_event *p)
 {
        struct tgt_event *ev;
+       int ret = 0;
 
+       pthread_mutex_lock(&ukring.idx_lock);
        ev = head_ring_hdr(&ukring);
-       if (ev->hdr.status)
-               return -ENOMEM;
+       if (ev->hdr.status) {
+               pthread_mutex_unlock(&ukring.idx_lock);
+               ret = -ENOMEM;
+               goto out;
+       }
 
        ring_index_inc(&ukring);
+       pthread_mutex_unlock(&ukring.idx_lock);;
 
        memcpy(ev, p, sizeof(*p));
        barrier();
        ev->hdr.status = 1;
-       write(chrfd, ev, 1);
 
-       return 0;
+       pthread_mutex_lock(&kreq_send_worker.lock);
+       kreq_send_worker.outstanding++;
+       pthread_mutex_unlock(&kreq_send_worker.lock);
+       pthread_cond_signal(&kreq_send_worker.cond);
+
+out:
+       return ret;
 }
 
 int kspace_send_tsk_mgmt_res(struct mgmt_req *mreq)
@@ -330,7 +369,7 @@
 
 int kreq_init(void)
 {
-       int err, size = TGT_RING_SIZE;
+       int i, err, size = TGT_RING_SIZE;
        char *buf;
 
        err = tgt_miscdev_init(CHRDEV_PATH, &chrfd);
@@ -355,7 +394,19 @@
        kuring.buf = buf;
        ukring.buf = buf + size;
 
+       pthread_cond_init(&kreq_send_worker.cond, NULL);
+       pthread_mutex_init(&kreq_send_worker.lock, NULL);
+       kreq_send_worker.outstanding = 0;
+       for (i = 0; i < NR_WORKER_THREADS; i++) {
+               err = pthread_create(&kreq_send_worker.thread[i], NULL,
+                                    kreq_send_worker_fn, NULL);
+               if (err)
+                       goto out;
+       }
+
        err = tgt_event_add(chrfd, EPOLLIN, kern_event_handler, NULL);
+
+out:
        if (err)
                close(chrfd);
        return err;
_______________________________________________
Stgt-devel mailing list
[email protected]
https://lists.berlios.de/mailman/listinfo/stgt-devel

Reply via email to