This project is stopped due to impossibility to compete with synchronous
buffered reading. Analysis below.
The maximum achieved results are 2100 Mb/s for buffered reading vs. 
2000 Mb/s for AIO with the same CPU usage on 2.4 Ghz Xeon with 1Gb RAM
and ext3 filesystem. 
Actually both cases are synchronous nonblocking reading from VFS cache either 
using
copy_to_user() or memcpy() into userspace pages.

Attached performance graph, which compares copy_to_user() for i386 with
(optimized) memcpy().
mempcy() transfer is splitted into chunks of 1, 4 or 32 pages per
memcpy() call.

Vertical axis is number of megabytes per second transferred using either
memcpy() with different number of pages per call or copy_to_user() which
transfers the whole buffer at once. Horizontal axis is number of pages
transferred each system call (in case of memcpy() this transfer is
splitted into 32, 4, or 1 pages).

As we can see on the graph, if we transfer data using memcpy() using
only one page per call, it is completely impossible to have the same
performance as copy_to_user() if it transfers the whole buffer at once.

Since pinned userspace pages can only be accessed one by one, it will be
the 'memcpy_1_page', i.e. purple graph.

If we add here kmap_atomic()/kunmap_atomic() overhead on highmem enabled
kernels, which halves performace picture becomes even worse.

For interested reader attached kevent based AIO patch for ext3 system.
It does not populate read pages into VFS cache.

Signed-off-by: Evgeniy Polyakov <[EMAIL PROTECTED]>

diff --git a/fs/bio.c b/fs/bio.c
index 460554b..2bbaf6a 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -118,7 +118,7 @@ void bio_free(struct bio *bio, struct bi
 /*
  * default destructor for a bio allocated with bio_alloc_bioset()
  */
-static void bio_fs_destructor(struct bio *bio)
+void bio_fs_destructor(struct bio *bio)
 {
        bio_free(bio, fs_bio_set);
 }
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 8824e84..02d04c9 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -787,7 +787,7 @@ out:
        return err;
 }
 
-static int ext3_get_block(struct inode *inode, sector_t iblock,
+int ext3_get_block(struct inode *inode, sector_t iblock,
                        struct buffer_head *bh_result, int create)
 {
        handle_t *handle = NULL;
diff --git a/fs/file_table.c b/fs/file_table.c
index 7d73a2b..9be1420 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -90,7 +90,7 @@ struct file *get_empty_filp(void)
                goto fail_sec;
 
 #ifdef CONFIG_KEVENT_POLL
-       kevent_storage_init(KEVENT_POLL, KEVENT_MASK_EMPTY, f, &f->st);
+       kevent_storage_init(f, &f->st);
 #endif
        eventpoll_init_file(f);
        atomic_set(&f->f_count, 1);
diff --git a/fs/inode.c b/fs/inode.c
index 185bd67..11bbca4 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -166,8 +166,8 @@ static struct inode *alloc_inode(struct 
                }
                memset(&inode->u, 0, sizeof(inode->u));
                inode->i_mapping = mapping;
-#if defined CONFIG_KEVENT_INODE || defined CONFIG_KEVENT_SOCKET
-               kevent_storage_init(KEVENT_INODE, KEVENT_MASK_EMPTY, inode, 
&inode->st);
+#if defined CONFIG_KEVENT
+               kevent_storage_init(inode, &inode->st);
 #endif
        }
        return inode;
diff --git a/include/linux/kevent.h b/include/linux/kevent.h
index 64926bc..376fedc 100644
--- a/include/linux/kevent.h
+++ b/include/linux/kevent.h
@@ -43,6 +43,7 @@ enum {
        KEVENT_TIMER,
        KEVENT_POLL,
        KEVENT_NAIO,
+       KEVENT_AIO,
 
        KEVENT_MAX,
 };
@@ -95,6 +96,13 @@ enum {
        KEVENT_POLL_POLLREMOVE  = 0x1000,
 };
 
+/*
+ * Asynchronous IO events.
+ */
+enum {
+       KEVENT_AIO_BIO          = 0x1,
+};
+
 #define KEVENT_MASK_ALL                0xffffffff      /* Mask of all possible 
event values. */
 #define KEVENT_MASK_EMPTY      0x0             /* Empty mask of ready events. 
*/
 
@@ -198,6 +206,11 @@ struct kevent_user
        wait_queue_head_t       wait;                   /* Wait until some 
events are ready. */
 
        atomic_t                refcnt;                 /* Reference counter, 
increased for each new kevent. */
+#ifdef CONFIG_KEVENT_USER_STAT
+       unsigned long           im_num;
+       unsigned long           wait_num;
+       unsigned long           total;
+#endif
 };
 
 #define KEVENT_MAX_REQUESTS            PAGE_SIZE/sizeof(struct kevent)
@@ -208,8 +221,6 @@ int kevent_enqueue(struct kevent *k);
 int kevent_dequeue(struct kevent *k);
 int kevent_init(struct kevent *k);
 void kevent_requeue(struct kevent *k);
-int kevent_storage_enqueue(struct kevent_storage *st, struct kevent *k);
-void kevent_storage_dequeue(struct kevent_storage *st, struct kevent *k);
 
 #define list_for_each_entry_reverse_safe(pos, n, head, member)                 
\
        for (pos = list_entry((head)->prev, typeof(*pos), member),      \
@@ -225,10 +236,13 @@ int kevent_init_inode(struct kevent *k);
 int kevent_init_timer(struct kevent *k);
 int kevent_init_poll(struct kevent *k);
 int kevent_init_naio(struct kevent *k);
+int kevent_init_aio(struct kevent *k);
 
 void kevent_storage_ready(struct kevent_storage *st, kevent_callback_t 
ready_callback, u32 event);
-int kevent_storage_init(__u32 type, __u32 event, void *origin, struct 
kevent_storage *st);
+int kevent_storage_init(void *origin, struct kevent_storage *st);
 void kevent_storage_fini(struct kevent_storage *st);
+int kevent_storage_enqueue(struct kevent_storage *st, struct kevent *k);
+void kevent_storage_dequeue(struct kevent_storage *st, struct kevent *k);
 
 int kevent_user_add_ukevent(struct ukevent *uk, struct kevent_user *u);
 
diff --git a/include/linux/kevent_storage.h b/include/linux/kevent_storage.h
index 7c68170..bd891f0 100644
--- a/include/linux/kevent_storage.h
+++ b/include/linux/kevent_storage.h
@@ -3,15 +3,6 @@
 
 struct kevent_storage
 {
-       __u32                   type;                   /* Event type, e.g. 
KEVENT_SOCK, KEVENT_INODE, KEVENT_TIMER and so on... */
-       __u32                   event;                  /* Event itself, e.g. 
SOCK_ACCEPT, INODE_CREATED, TIMER_FIRED, 
-                                                        * which were NOT 
updated by any kevent in origin's queue,
-                                                        * i.e. when new event 
happens and there are no requests in
-                                                        * origin's queue for 
that event, it will be placed here.
-                                                        * New events are ORed 
with old one, so when new kevent is being added
-                                                        * into origin's queue, 
it just needs to check if requested event
-                                                        * is in this mask, and 
if so, return positive value from ->enqueu()
-                                                        */
        void                    *origin;                /* Originator's 
pointer, e.g. struct sock or struct file. Can be NULL. */
        struct list_head        list;                   /* List of queued 
kevents. */
        unsigned int            qlen;                   /* Number of queued 
kevents. */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 8788625..c14ae91 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -952,7 +952,7 @@ static __inline__ int tcp_prequeue(struc
                        tp->ucopy.memory = 0;
                } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
                        wake_up_interruptible(sk->sk_sleep);
-                       kevent_socket_notify(sk, KEVENT_SOCKET_RECV);
+                       kevent_socket_notify(sk, 
KEVENT_SOCKET_RECV|KEVENT_SOCKET_SEND);
                        if (!inet_csk_ack_scheduled(sk))
                                inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
                                                          (3 * TCP_RTO_MIN) / 4,
diff --git a/kernel/kevent/Kconfig b/kernel/kevent/Kconfig
index a52a86f..88b35af 100644
--- a/kernel/kevent/Kconfig
+++ b/kernel/kevent/Kconfig
@@ -5,6 +5,17 @@ config KEVENT
          It can be used as replacement for poll()/select(), AIO callback 
invocations,
          advanced timer notifications and other kernel object status changes.
 
+config KEVENT_USER_STAT
+       bool "Kevent user statistic"
+       depends on KEVENT
+       default N
+       help
+         This option will turn kevent_user statistic collection on.
+         Statistic data includes total number of kevent, number of kevents 
which are ready
+         immediately at insertion time and number of kevents which were 
removed through
+         readiness completion. It will be printed each time control kevent 
descriptor
+         is closed.
+
 config KEVENT_SOCKET
        bool "Kernel event notifications for sockets"
        depends on NET && KEVENT
@@ -34,6 +45,13 @@ config KEVENT_POLL
 
 config KEVENT_NAIO
        bool "Network asynchronous IO"
-       depends on KEVENT_SOCKET
+       depends on KEVENT && KEVENT_SOCKET
        help
          This option enables kevent based network asynchronous IO subsystem.
+
+config KEVENT_AIO
+       bool "Asynchronous IO"
+       depends on KEVENT
+       help
+         This option allows to use kevent subsystem for AIO operations.
+         AIO read is currently supported.
diff --git a/kernel/kevent/Makefile b/kernel/kevent/Makefile
index 2bc7135..7dcd651 100644
--- a/kernel/kevent/Makefile
+++ b/kernel/kevent/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_KEVENT_INODE) += kevent_ino
 obj-$(CONFIG_KEVENT_TIMER) += kevent_timer.o
 obj-$(CONFIG_KEVENT_POLL) += kevent_poll.o
 obj-$(CONFIG_KEVENT_NAIO) += kevent_naio.o
+obj-$(CONFIG_KEVENT_AIO) += kevent_aio.o
diff --git a/kernel/kevent/kevent.c b/kernel/kevent/kevent.c
index 5c3a141..9bccc66 100644
--- a/kernel/kevent/kevent.c
+++ b/kernel/kevent/kevent.c
@@ -102,6 +102,9 @@ int kevent_init(struct kevent *k)
                case KEVENT_POLL:
                        err = kevent_init_poll(k);
                        break;
+               case KEVENT_AIO:
+                       err = kevent_init_aio(k);
+                       break;
                default:
                        err = -ENODEV;
        }
@@ -110,52 +113,19 @@ int kevent_init(struct kevent *k)
 }
 
 /*
- * Checks if "event" is requested by given kevent and if so setup kevent's 
ret_data.
- * Also updates storage's mask of pending events.
- * Returns set of events requested by user which are ready.
- */
-static inline u32 kevent_set_event(struct kevent_storage *st, struct kevent 
*k, u32 event)
-{
-       u32 ev = event & k->event.event;
-
-       st->event &= ~ev;
-#if 0
-       if (ev)
-               k->event.ret_data[1] = ev;
-#endif
-       return ev;
-}
-
-/*
  * Called from ->enqueue() callback when reference counter for given
  * origin (socket, inode...) has been increased.
  */
 int kevent_storage_enqueue(struct kevent_storage *st, struct kevent *k)
 {
        unsigned long flags;
-       u32 ev;
 
        k->st = st;
        spin_lock_irqsave(&st->lock, flags);
-       
-       spin_lock(&k->lock);
-       ev = kevent_set_event(st, k, st->event);
-       spin_unlock(&k->lock);
-       
        list_add_tail(&k->storage_entry, &st->list);
        st->qlen++;
-       
        spin_unlock_irqrestore(&st->lock, flags);
-#if 0
-       if (ev) {
-               spin_lock_irqsave(&k->user->ready_lock, flags);
-               list_add_tail(&k->ready_entry, &k->user->ready_list);
-               k->user->ready_num++;
-               spin_unlock_irqrestore(&k->user->ready_lock, flags);
-               wake_up(&k->user->wait);
-       }
-#endif 
-       return !!ev;
+       return 0;
 }
 
 /*
@@ -179,12 +149,11 @@ void kevent_storage_dequeue(struct keven
 static void __kevent_requeue(struct kevent *k, u32 event)
 {
        int err, rem = 0;
-               
-       wake_up(&k->user->wait);
+       unsigned long flags;
 
        err = k->callback(k);
 
-       spin_lock(&k->lock);
+       spin_lock_irqsave(&k->lock, flags);
        if (err > 0) {
                k->event.ret_flags |= KEVENT_RET_DONE;
        } else if (err < 0) {
@@ -192,19 +161,21 @@ static void __kevent_requeue(struct keve
                k->event.ret_flags |= KEVENT_RET_DONE;
        }
        rem = (k->event.req_flags & KEVENT_REQ_ONESHOT);
-       spin_unlock(&k->lock);
+       spin_unlock_irqrestore(&k->lock, flags);
 
        if (err) {
                if (rem) {
                        list_del(&k->storage_entry);
                        k->st->qlen--;
                }
-               spin_lock(&k->user->ready_lock);
+               
+               spin_lock_irqsave(&k->user->ready_lock, flags);
                if (k->ready_entry.next == LIST_POISON1) {
                        list_add_tail(&k->ready_entry, &k->user->ready_list);
                        k->user->ready_num++;
                }
-               spin_unlock(&k->user->ready_lock);
+               spin_unlock_irqrestore(&k->user->ready_lock, flags);
+               wake_up(&k->user->wait);
        }
 }
 
@@ -222,39 +193,22 @@ void kevent_requeue(struct kevent *k)
  */
 void kevent_storage_ready(struct kevent_storage *st, kevent_callback_t 
ready_callback, u32 event)
 {
-       //unsigned long flags;
        struct kevent *k, *n;
-       unsigned int qlen;
-       u32 ev = 0;
 
-       spin_lock_bh(&st->lock);
-       st->event |= event;
-       qlen = st->qlen;
-       
-       if (qlen) {
-               list_for_each_entry_safe(k, n, &st->list, storage_entry) {
-                       if (qlen-- <= 0)
-                               break;
-
-                       if (ready_callback)
-                               ready_callback(k);
-
-                       ev |= (event & k->event.event);
+       spin_lock(&st->lock);
+       list_for_each_entry_safe(k, n, &st->list, storage_entry) {
+               if (ready_callback)
+                       ready_callback(k);
 
-                       if (event & k->event.event)
-                               __kevent_requeue(k, event);
-               }
+               if (event & k->event.event)
+                       __kevent_requeue(k, event);
        }
-       
-       st->event &= ~ev;
-       spin_unlock_bh(&st->lock);
+       spin_unlock(&st->lock);
 }
 
-int kevent_storage_init(__u32 type, __u32 event, void *origin, struct 
kevent_storage *st)
+int kevent_storage_init(void *origin, struct kevent_storage *st)
 {
        spin_lock_init(&st->lock);
-       st->type = type;
-       st->event = event;
        st->origin = origin;
        st->qlen = 0;
        INIT_LIST_HEAD(&st->list);
@@ -280,6 +234,8 @@ struct kevent *kevent_alloc(gfp_t mask)
 
 void kevent_free(struct kevent *k)
 {
+       memset(k, 0xab, sizeof(struct kevent));
+
        if (kevent_cache)
                kmem_cache_free(kevent_cache, k);
        else
diff --git a/kernel/kevent/kevent_aio.c b/kernel/kevent/kevent_aio.c
new file mode 100644
index 0000000..3f76803
--- /dev/null
+++ b/kernel/kevent/kevent_aio.c
@@ -0,0 +1,361 @@
+/*
+ *     kevent_naio.c
+ * 
+ * 2006 Copyright (c) Evgeniy Polyakov <[EMAIL PROTECTED]>
+ * All rights reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/bio.h>
+#include <linux/buffer_head.h>
+#include <linux/kevent.h>
+
+struct kevent_aio_private
+{
+       struct page             **pages;
+       int                     pg_num;
+       int                     bio_num;
+       size_t                  size;
+       loff_t                  offset;
+};
+
+extern void bio_fs_destructor(struct bio *bio);
+
+static void kevent_aio_bio_destructor(struct bio *bio)
+{
+       struct inode *inode = bio->bi_private;
+       kevent_storage_ready(&inode->st, NULL, KEVENT_AIO_BIO);
+       bio_fs_destructor(bio);
+}
+
+static int kevent_mpage_end_io_read(struct bio *bio, unsigned int bytes_done, 
int err)
+{
+       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+
+       if (bio->bi_size)
+               return 1;
+
+       do {
+               struct page *page = bvec->bv_page;
+
+               if (--bvec >= bio->bi_io_vec)
+                       prefetchw(&bvec->bv_page->flags);
+
+               if (uptodate) {
+                       SetPageUptodate(page);
+               } else {
+                       ClearPageUptodate(page);
+                       SetPageError(page);
+               }
+       } while (bvec >= bio->bi_io_vec);
+
+       bio_put(bio);
+       return 0;
+}
+
+static inline struct bio *kevent_mpage_bio_submit(int rw, struct bio *bio)
+{
+       if (bio) {
+               bio->bi_end_io = kevent_mpage_end_io_read;
+               submit_bio(READ, bio);
+       }
+       return NULL;
+}
+
+static struct bio *kevent_mpage_readpage(struct bio *bio, struct inode *inode, 
struct page *page, 
+               unsigned nr_pages, get_block_t get_block, loff_t *offset, 
sector_t *last_block_in_bio,
+               int *bio_num)
+{
+       const unsigned blkbits = inode->i_blkbits;
+       const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+       const unsigned blocksize = 1 << blkbits;
+       sector_t block_in_file;
+       sector_t last_block;
+       struct block_device *bdev = NULL;
+       unsigned first_hole = blocks_per_page;
+       unsigned page_block;
+       sector_t blocks[MAX_BUF_PER_PAGE];
+       struct buffer_head bh;
+       int fully_mapped = 1, length;
+
+       block_in_file = (*offset + blocksize - 1) >> blkbits;
+       last_block = (i_size_read(inode) + blocksize - 1) >> blkbits;
+
+       bh.b_page = page;
+       for (page_block = 0; page_block < blocks_per_page; page_block++, 
block_in_file++) {
+               bh.b_state = 0;
+               if (block_in_file < last_block) {
+                       if (get_block(inode, block_in_file, &bh, 0)) {
+                               printk("%s: confused: get_block failed: 
page_block=%u.\n", __func__, page_block);
+                               goto confused;
+                       }
+               }
+
+               if (!buffer_mapped(&bh)) {
+                       fully_mapped = 0;
+                       if (first_hole == blocks_per_page)
+                               first_hole = page_block;
+                       continue;
+               }
+
+               /* some filesystems will copy data into the page during
+                * the get_block call, in which case we don't want to
+                * read it again.  map_buffer_to_page copies the data
+                * we just collected from get_block into the page's buffers
+                * so readpage doesn't have to repeat the get_block call
+                */
+               if (buffer_uptodate(&bh)) {
+                       BUG();
+                       //map_buffer_to_page(page, &bh, page_block);
+                       goto confused;
+               }
+       
+               if (first_hole != blocks_per_page) {
+                       printk("%s: confused: page_block=%u, first_hole=%u, 
blocks_per_page=%u.\n", 
+                                       __func__, page_block, first_hole, 
blocks_per_page);
+                       goto confused;          /* hole -> non-hole */
+               }
+
+               /* Contiguous blocks? */
+               if (page_block && blocks[page_block-1] != bh.b_blocknr-1) {
+                       printk("%s: confused: page_block=%u, blocks=%Lu, 
bh.b_blocknr=%Lu.\n",
+                                       __func__, page_block, 
blocks[page_block-1], bh.b_blocknr-1);
+                       goto confused;
+               }
+               blocks[page_block] = bh.b_blocknr;
+               bdev = bh.b_bdev;
+       }
+
+       if (!bdev)
+               goto confused;
+
+       if (first_hole != blocks_per_page) {
+               char *kaddr = kmap_atomic(page, KM_USER0);
+               memset(kaddr + (first_hole << blkbits), 0,
+                               PAGE_CACHE_SIZE - (first_hole << blkbits));
+               flush_dcache_page(page);
+               kunmap_atomic(kaddr, KM_USER0);
+               if (first_hole == 0) {
+                       SetPageUptodate(page);
+                       goto out;
+               }
+       } else if (fully_mapped) {
+               SetPageMappedToDisk(page);
+       }
+       
+       /*
+        * This page will go to BIO.  Do we need to send this BIO off first?
+        */
+       if (bio && (*last_block_in_bio != blocks[0] - 1))
+               bio = kevent_mpage_bio_submit(READ, bio);
+
+alloc_new:
+       if (bio == NULL) {
+               nr_pages = min_t(unsigned, nr_pages, bio_get_nr_vecs(bdev));
+               bio = bio_alloc(GFP_KERNEL, nr_pages);
+               if (bio == NULL)
+                       goto confused;
+
+               bio->bi_destructor = kevent_aio_bio_destructor;
+               bio->bi_bdev = bdev;
+               bio->bi_sector = blocks[0] << (blkbits - 9);
+               bio->bi_private = inode;
+
+               *bio_num = *bio_num + 1;
+       }
+
+       length = first_hole << blkbits;
+       if (bio_add_page(bio, page, length, 0) < length) {
+               bio = kevent_mpage_bio_submit(READ, bio);
+               printk("%s: Failed to add a page: nr_pages=%d, length=%d, 
page=%p.\n", __func__, nr_pages, length, page);
+               goto alloc_new;
+       }
+#if 0
+       printk("%s: bio=%p, b=%d, m=%d, u=%d, nr_pages=%d, offset=%Lu, 
size=%Lu. page_block=%u, page=%p.\n", 
+                       __func__, bio, buffer_boundary(&bh), 
buffer_mapped(&bh), buffer_uptodate(&bh),
+                       nr_pages, *offset, i_size_read(inode), page_block, 
page);
+#endif 
+       *offset = *offset + length;
+
+       if (buffer_boundary(&bh) || (first_hole != blocks_per_page))
+               bio = kevent_mpage_bio_submit(READ, bio);
+       else
+               *last_block_in_bio = blocks[blocks_per_page - 1];
+
+out:
+       return bio;
+
+confused:
+       if (bio)
+               bio = kevent_mpage_bio_submit(READ, bio);
+       goto out;
+}
+
+static int kevent_mpage_readpages(struct inode *inode, struct page **pages, 
unsigned nr_pages, 
+               get_block_t get_block, loff_t *offset, int *bio_num)
+{
+       struct bio *bio = NULL;
+       sector_t last_block_in_bio = 0;
+       int i;
+
+       for (i=0; i<nr_pages; ++i) {
+               struct page *page = pages[i];
+
+               bio = kevent_mpage_readpage(bio, inode, page, nr_pages-i, 
get_block, offset, &last_block_in_bio, bio_num);
+       }
+
+       if (bio)
+               bio = kevent_mpage_bio_submit(READ, bio);
+
+       return 0;
+}
+
+static int kevent_aio_callback(struct kevent *k)
+{
+       struct kevent_aio_private *priv = k->priv;
+       int i, ready = 1;
+       size_t size;
+
+       BUG_ON(!priv);
+       
+       size = priv->size;
+
+       for (i=0; i<priv->pg_num && ready > 0; ++i) {
+               struct page *page = priv->pages[i];
+
+               if (PageError(page))
+                       ready = -1;
+               else if (!PageUptodate(page))
+                       ready = 0;
+               else
+                       size -= PAGE_SIZE;
+       }
+
+       if (ready)
+               k->event.id.raw[1] = size;
+
+       return ready;
+}
+
+int ext3_get_block(struct inode *inode, sector_t iblock,
+                       struct buffer_head *bh_result, int create);
+
+static int kevent_aio_enqueue(struct kevent *k)
+{
+       int err, i;
+       unsigned long addr = (unsigned long)k->event.ptr;
+       unsigned int size = k->event.id.raw[1];
+       int num = size/PAGE_SIZE;
+       struct file *file;
+       int fput_needed;
+       struct inode *inode;
+       struct kevent_aio_private *priv;
+
+       if ((addr & PAGE_MASK) != addr)
+               return -EINVAL;
+
+       file = fget_light(k->event.id.raw[0], &fput_needed);
+       if (!file)
+               return -ENODEV;
+
+       err = -EINVAL;
+       if (!file->f_dentry || !file->f_dentry->d_inode)
+               goto err_out_fput;
+
+       inode = igrab(file->f_dentry->d_inode);
+       if (!inode)
+               goto err_out_fput;
+
+       err = -ENOMEM;
+       priv = kzalloc(sizeof(struct page *) * num + sizeof(struct 
kevent_aio_private), GFP_KERNEL);
+       if (!priv)
+               goto err_out_iput;
+
+       priv->pages = (struct page **)(priv + 1);
+
+       down_read(&current->mm->mmap_sem);
+       err = get_user_pages(current, current->mm, addr, num, 1, 0, 
priv->pages, NULL);
+       up_read(&current->mm->mmap_sem);
+       if (err <= 0)
+               goto err_out_free;
+       num = err;
+       
+       priv->pg_num = num;
+       priv->size = size;
+       priv->bio_num = 0;
+       priv->offset = 0;
+       
+       k->priv = priv;
+
+       kevent_storage_enqueue(&inode->st, k);
+
+       err = kevent_mpage_readpages(inode, priv->pages, num, &ext3_get_block, 
&priv->offset, &priv->bio_num);
+       if (err)
+               goto err_out_dequeue;
+
+       fput_light(file, fput_needed);
+
+       return err;
+
+err_out_dequeue:
+       kevent_storage_dequeue(k->st, k);
+       
+       for (i=0; i<num; ++i)
+               page_cache_release(priv->pages[i]);
+err_out_free:
+       kfree(priv);
+err_out_iput:
+       iput(inode);
+err_out_fput:
+       fput_light(file, fput_needed);
+
+       return err;
+}
+
+static int kevent_aio_dequeue(struct kevent *k)
+{
+       int i;
+       struct kevent_aio_private *priv = k->priv;
+       struct inode *inode = k->st->origin;
+
+       kevent_storage_dequeue(k->st, k);
+
+       for (i=0; i<priv->pg_num; ++i)
+               page_cache_release(priv->pages[i]);
+
+       kfree(k->priv);
+       k->priv = NULL;
+       iput(inode);
+
+       return 0;
+}
+
+
+int kevent_init_aio(struct kevent *k)
+{
+       k->enqueue = &kevent_aio_enqueue;
+       k->dequeue = &kevent_aio_dequeue;
+       k->callback = &kevent_aio_callback;
+       return 0;
+}
diff --git a/kernel/kevent/kevent_init.c b/kernel/kevent/kevent_init.c
index 74659df..ec95114 100644
--- a/kernel/kevent/kevent_init.c
+++ b/kernel/kevent/kevent_init.c
@@ -75,3 +75,11 @@ int kevent_init_naio(struct kevent *k)
        return -ENODEV;
 }
 #endif
+
+#ifndef CONFIG_KEVENT_AIO
+int kevent_init_aio(struct kevent *k)
+{
+       kevent_break(k);
+       return -ENODEV;
+}
+#endif
diff --git a/kernel/kevent/kevent_inode.c b/kernel/kevent/kevent_inode.c
index b6d12f6..3af0e11 100644
--- a/kernel/kevent/kevent_inode.c
+++ b/kernel/kevent/kevent_inode.c
@@ -48,7 +48,7 @@ static int kevent_inode_enqueue(struct k
                goto err_out_fput;
 
        err = kevent_storage_enqueue(&inode->st, k);
-       if (err < 0)
+       if (err)
                goto err_out_iput;
 
        fput_light(file, fput_needed);
diff --git a/kernel/kevent/kevent_naio.c b/kernel/kevent/kevent_naio.c
index 004b292..277e5ca 100644
--- a/kernel/kevent/kevent_naio.c
+++ b/kernel/kevent/kevent_naio.c
@@ -87,7 +87,7 @@ static int kevent_naio_enqueue(struct ke
        struct page **page;
        void *addr;
        unsigned int size = k->event.id.raw[1];
-       int num = size/PAGE_SIZE + 1;
+       int num = size/PAGE_SIZE;
        struct file *file;
        struct sock *sk = NULL;
        int fput_needed;
@@ -106,13 +106,15 @@ static int kevent_naio_enqueue(struct ke
        if (!sk || !sk->sk_prot->async_recv || !sk->sk_prot->async_send || 
                        !test_bit(KEVENT_SOCKET_FLAGS_ASYNC, 
&sk->sk_kevent_flags))
                goto err_out_fput;
+       
+       addr = k->event.ptr;
+       if (((unsigned long)addr & PAGE_MASK) != (unsigned long)addr)
+               num++;
 
        page = kmalloc(sizeof(struct page *) * num, GFP_KERNEL);
        if (!page)
                return -ENOMEM;
 
-       addr = k->event.ptr;
-
        down_read(&current->mm->mmap_sem);
        err = get_user_pages(current, current->mm, (unsigned long)addr, num, 1, 
0, page, NULL);
        up_read(&current->mm->mmap_sem);
diff --git a/kernel/kevent/kevent_poll.c b/kernel/kevent/kevent_poll.c
index 12b06bb..7149770 100644
--- a/kernel/kevent/kevent_poll.c
+++ b/kernel/kevent/kevent_poll.c
@@ -125,7 +125,7 @@ static int kevent_poll_enqueue(struct ke
                ready = 1;
 
        err = kevent_storage_enqueue(&file->st, k);
-       if (err < 0)
+       if (err)
                goto err_out_free;
        
        return ready;
diff --git a/kernel/kevent/kevent_socket.c b/kernel/kevent/kevent_socket.c
index d179ca8..d3938a2 100644
--- a/kernel/kevent/kevent_socket.c
+++ b/kernel/kevent/kevent_socket.c
@@ -78,7 +78,7 @@ int kevent_socket_enqueue(struct kevent 
                goto err_out_fput;
 
        err = kevent_storage_enqueue(&inode->st, k);
-       if (err < 0)
+       if (err)
                goto err_out_iput;
 
        err = k->callback(k);
diff --git a/kernel/kevent/kevent_timer.c b/kernel/kevent/kevent_timer.c
index 3ae05c2..2f9291b 100644
--- a/kernel/kevent/kevent_timer.c
+++ b/kernel/kevent/kevent_timer.c
@@ -53,12 +53,12 @@ static int kevent_timer_enqueue(struct k
        t->data = (unsigned long)k;
 
        st = (struct kevent_storage *)(t+1);
-       err = kevent_storage_init(k->event.type, KEVENT_MASK_EMPTY, t, st);
+       err = kevent_storage_init(t, st);
        if (err)
                goto err_out_free;
 
        err = kevent_storage_enqueue(st, k);
-       if (err < 0)
+       if (err)
                goto err_out_st_fini;
        
        add_timer(t);
diff --git a/kernel/kevent/kevent_user.c b/kernel/kevent/kevent_user.c
index 6a0eedd..2f0a124 100644
--- a/kernel/kevent/kevent_user.c
+++ b/kernel/kevent/kevent_user.c
@@ -92,7 +92,9 @@ static struct kevent_user *kevent_user_a
        INIT_LIST_HEAD(&u->ready_list);
        spin_lock_init(&u->ready_lock);
        u->ready_num = 0;
-       
+#ifdef CONFIG_KEVENT_USER_STAT
+       u->wait_num = u->im_num = u->total = 0;
+#endif
        for (i=0; i<KEVENT_HASH_MASK+1; ++i) {
                INIT_LIST_HEAD(&u->kqueue[i].kevent_list);
                spin_lock_init(&u->kqueue[i].kevent_lock);
@@ -128,8 +130,12 @@ static inline void kevent_user_get(struc
 
 static inline void kevent_user_put(struct kevent_user *u)
 {
-       if (atomic_dec_and_test(&u->refcnt))
+       if (atomic_dec_and_test(&u->refcnt)) {
+#ifdef CONFIG_KEVENT_USER_STAT
+               printk("%s: u=%p, wait=%lu, immediately=%lu, total=%lu.\n", 
__func__, u, u->wait_num, u->im_num, u->total);
+#endif
                kfree(u);
+       }
 }
 
 #if 0
@@ -154,15 +160,15 @@ static inline unsigned int kevent_user_h
  * dequeue it from storage and decrease user's reference counter,
  * since this kevent does not exist anymore. That is why it is freed here.
  */
-static void kevent_finish_user(struct kevent *k, int lock)
+static void kevent_finish_user(struct kevent *k, int lock, int deq)
 {
        struct kevent_user *u = k->user;
        unsigned long flags;
-       
+
        if (lock) {
                unsigned int hash = kevent_user_hash(&k->event);
                struct kevent_list *l = &u->kqueue[hash];
-               
+
                spin_lock_irqsave(&l->kevent_lock, flags);
                list_del(&k->kevent_entry);
                u->kevent_num--;
@@ -171,7 +177,17 @@ static void kevent_finish_user(struct ke
                list_del(&k->kevent_entry);
                u->kevent_num--;
        }
-       kevent_dequeue(k);
+
+       if (deq)
+               kevent_dequeue(k);
+
+       spin_lock_irqsave(&u->ready_lock, flags);
+       if (k->ready_entry.next != LIST_POISON1) {
+               list_del(&k->ready_entry);
+               u->ready_num--;
+       }
+       spin_unlock_irqrestore(&u->ready_lock, flags);
+       
        kevent_user_put(u);
        kevent_free(k);
 }
@@ -184,7 +200,7 @@ static struct kevent *__kqueue_dequeue_o
        struct kevent *k = NULL;
        unsigned int len = *qlen;
        
-       if (len) {
+       if (len && !list_empty(q)) {
                k = list_entry(q->next, struct kevent, ready_entry);
                list_del(&k->ready_entry);
                *qlen = len - 1;
@@ -205,15 +221,11 @@ static struct kevent *kqueue_dequeue_rea
        return k;
 }
 
-struct kevent *kevent_search(struct ukevent *uk, struct kevent_user *u)
+static struct kevent *__kevent_search(struct kevent_list *l, struct ukevent 
*uk, struct kevent_user *u)
 {
        struct kevent *k;
-       unsigned int hash = kevent_user_hash(uk);
-       struct kevent_list *l = &u->kqueue[hash];
        int found = 0;
-       unsigned long flags;
        
-       spin_lock_irqsave(&l->kevent_lock, flags);
        list_for_each_entry(k, &l->kevent_list, kevent_entry) {
                spin_lock(&k->lock);
                if (k->event.user[0] == uk->user[0] && k->event.user[1] == 
uk->user[1] &&
@@ -224,11 +236,53 @@ struct kevent *kevent_search(struct ukev
                }
                spin_unlock(&k->lock);
        }
-       spin_unlock_irqrestore(&l->kevent_lock, flags);
 
        return (found)?k:NULL;
 }
 
+static int kevent_modify(struct ukevent *uk, struct kevent_user *u)
+{
+       struct kevent *k;
+       unsigned int hash = kevent_user_hash(uk);
+       struct kevent_list *l = &u->kqueue[hash];
+       int err = -ENODEV;
+       unsigned long flags;
+       
+       spin_lock_irqsave(&l->kevent_lock, flags);
+       k = __kevent_search(l, uk, u);
+       if (k) {
+               spin_lock(&k->lock);
+               k->event.event = uk->event;
+               k->event.req_flags = uk->req_flags;
+               k->event.ret_flags = 0;
+               spin_unlock(&k->lock);
+               kevent_requeue(k);
+               err = 0;
+       }
+       spin_unlock_irqrestore(&l->kevent_lock, flags);
+       
+       return err;
+}
+
+static int kevent_remove(struct ukevent *uk, struct kevent_user *u)
+{
+       int err = -ENODEV;
+       struct kevent *k;
+       unsigned int hash = kevent_user_hash(uk);
+       struct kevent_list *l = &u->kqueue[hash];
+       unsigned long flags;
+
+       spin_lock_irqsave(&l->kevent_lock, flags);
+       k = __kevent_search(l, uk, u);
+       if (k) {
+               kevent_finish_user(k, 0, 1);
+               err = 0;
+       }
+       spin_unlock_irqrestore(&l->kevent_lock, flags);
+
+       return err;
+}
+
 /*
  * No new entry can be added or removed from any list at this point.
  * It is not permitted to call ->ioctl() and ->release() in parallel.
@@ -243,7 +297,7 @@ static int kevent_user_release(struct in
                struct kevent_list *l = &u->kqueue[i];
                
                list_for_each_entry_safe(k, n, &l->kevent_list, kevent_entry)
-                       kevent_finish_user(k, 1);
+                       kevent_finish_user(k, 1, 1);
        }
 
        kevent_user_put(u);
@@ -255,8 +309,6 @@ static int kevent_user_release(struct in
 static int kevent_user_ctl_modify(struct kevent_user *u, struct 
kevent_user_control *ctl, void __user *arg)
 {
        int err = 0, i;
-       struct kevent *k;
-       unsigned long flags;
        struct ukevent uk;
 
        if (down_interruptible(&u->ctl_mutex))
@@ -268,17 +320,8 @@ static int kevent_user_ctl_modify(struct
                        break;
                }
 
-               k = kevent_search(&uk, u);
-               if (k) {
-                       spin_lock_irqsave(&k->lock, flags);
-                       k->event.event = uk.event;
-                       k->event.req_flags = uk.req_flags;
-                       k->event.ret_flags = 0;
-                       spin_unlock_irqrestore(&k->lock, flags);
-                       kevent_requeue(k);
-               } else
+               if (kevent_modify(&uk, u))
                        uk.ret_flags |= KEVENT_RET_BROKEN;
-
                uk.ret_flags |= KEVENT_RET_DONE;
 
                if (copy_to_user(arg, &uk, sizeof(struct ukevent))) {
@@ -297,7 +340,6 @@ static int kevent_user_ctl_modify(struct
 static int kevent_user_ctl_remove(struct kevent_user *u, struct 
kevent_user_control *ctl, void __user *arg)
 {
        int err = 0, i;
-       struct kevent *k;
        struct ukevent uk;
 
        if (down_interruptible(&u->ctl_mutex))
@@ -309,10 +351,7 @@ static int kevent_user_ctl_remove(struct
                        break;
                }
 
-               k = kevent_search(&uk, u);
-               if (k) {
-                       kevent_finish_user(k, 1);
-               } else
+               if (kevent_remove(&uk, u))
                        uk.ret_flags |= KEVENT_RET_BROKEN;
 
                uk.ret_flags |= KEVENT_RET_DONE;
@@ -353,14 +392,10 @@ int kevent_user_add_ukevent(struct ukeve
                goto err_out_exit;
        }
        k->user = u;
-
-       err = kevent_enqueue(k);
-       if (err) {
-               if (err < 0)
-                       uk->ret_flags |= KEVENT_RET_BROKEN;
-               uk->ret_flags |= KEVENT_RET_DONE;
-               kevent_free(k);
-       } else {
+#ifdef CONFIG_KEVENT_USER_STAT
+       u->total++;
+#endif
+       {
                unsigned long flags;
                unsigned int hash = kevent_user_hash(&k->event);
                struct kevent_list *l = &u->kqueue[hash];
@@ -372,6 +407,15 @@ int kevent_user_add_ukevent(struct ukeve
                spin_unlock_irqrestore(&l->kevent_lock, flags);
        }
 
+       err = kevent_enqueue(k);
+       if (err) {
+               memcpy(uk, &k->event, sizeof(struct ukevent));
+               if (err < 0)
+                       uk->ret_flags |= KEVENT_RET_BROKEN;
+               uk->ret_flags |= KEVENT_RET_DONE;
+               kevent_finish_user(k, 1, 0);
+       } 
+
 err_out_exit:
        return err;
 }
@@ -410,6 +454,9 @@ static int kevent_user_ctl_add(struct ke
 
                err = kevent_user_add_ukevent(&uk, u);
                if (err) {
+#ifdef CONFIG_KEVENT_USER_STAT
+                       u->im_num++;
+#endif
                        if (copy_to_user(orig, &uk, sizeof(struct ukevent)))
                                cerr = -EINVAL;
                        orig += sizeof(struct ukevent);
@@ -446,7 +493,7 @@ static int kevent_user_wait(struct file 
        int cerr = 0, num = 0;
        void __user *ptr = arg + sizeof(struct kevent_user_control);
 
-       if (down_interruptible(&u->wait_mutex))
+       if (down_interruptible(&u->ctl_mutex))
                return -ERESTARTSYS;
 
        if (!(file->f_flags & O_NONBLOCK)) {
@@ -457,23 +504,27 @@ static int kevent_user_wait(struct file 
                        wait_event_interruptible_timeout(u->wait, 
                                        u->ready_num > 0, 
msecs_to_jiffies(1000));
        }
-       while (num < ctl->num && (k = kqueue_dequeue_ready(u))) {
+       while (num < ctl->num && ((k = kqueue_dequeue_ready(u)) != NULL)) {
                if (copy_to_user(ptr + num*sizeof(struct ukevent), &k->event, 
sizeof(struct ukevent)))
                        cerr = -EINVAL;
+
                /*
                 * If it is one-shot kevent, it has been removed already from
                 * origin's queue, so we can easily free it here.
                 */
                if (k->event.req_flags & KEVENT_REQ_ONESHOT)
-                       kevent_finish_user(k, 1);
+                       kevent_finish_user(k, 1, 1);
                ++num;
+#ifdef CONFIG_KEVENT_USER_STAT
+               u->wait_num++;
+#endif
        }
 
        ctl->num = num;
        if (copy_to_user(arg, ctl, sizeof(struct kevent_user_control)))
                cerr = -EINVAL;
 
-       up(&u->wait_mutex);
+       up(&u->ctl_mutex);
 
        return (cerr)?cerr:num;
 }
diff --git a/net/core/sock.c b/net/core/sock.c
index 3d32c5c..c54f942 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1211,7 +1211,7 @@ static void sock_def_wakeup(struct sock 
        if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
                wake_up_interruptible_all(sk->sk_sleep);
        read_unlock(&sk->sk_callback_lock);
-       kevent_socket_notify(sk, KEVENT_SOCKET_RECV);
+       kevent_socket_notify(sk, KEVENT_SOCKET_RECV|KEVENT_SOCKET_SEND);
 }
 
 static void sock_def_error_report(struct sock *sk)
@@ -1221,7 +1221,7 @@ static void sock_def_error_report(struct
                wake_up_interruptible(sk->sk_sleep);
        sk_wake_async(sk,0,POLL_ERR); 
        read_unlock(&sk->sk_callback_lock);
-       kevent_socket_notify(sk, KEVENT_SOCKET_RECV);
+       kevent_socket_notify(sk, KEVENT_SOCKET_RECV|KEVENT_SOCKET_SEND);
 }
 
 static void sock_def_readable(struct sock *sk, int len)
@@ -1231,7 +1231,7 @@ static void sock_def_readable(struct soc
                wake_up_interruptible(sk->sk_sleep);
        sk_wake_async(sk,1,POLL_IN);
        read_unlock(&sk->sk_callback_lock);
-       kevent_socket_notify(sk, KEVENT_SOCKET_RECV);
+       kevent_socket_notify(sk, KEVENT_SOCKET_RECV|KEVENT_SOCKET_SEND);
 }
 
 static void sock_def_write_space(struct sock *sk)
@@ -1248,11 +1248,10 @@ static void sock_def_write_space(struct 
                /* Should agree with poll, otherwise some programs break */
                if (sock_writeable(sk))
                        sk_wake_async(sk, 2, POLL_OUT);
-       
-               kevent_socket_notify(sk, KEVENT_SOCKET_SEND);
        }
 
        read_unlock(&sk->sk_callback_lock);
+       kevent_socket_notify(sk, KEVENT_SOCKET_SEND|KEVENT_SOCKET_RECV);
 }
 
 static void sock_def_destruct(struct sock *sk)
@@ -1354,8 +1353,10 @@ void fastcall release_sock(struct sock *
        if (sk->sk_backlog.tail)
                __release_sock(sk);
        sk->sk_lock.owner = NULL;
-        if (waitqueue_active(&(sk->sk_lock.wq)))
+        if (waitqueue_active(&(sk->sk_lock.wq))) {
                wake_up(&(sk->sk_lock.wq));
+               kevent_socket_notify(sk, KEVENT_SOCKET_RECV|KEVENT_SOCKET_SEND);
+       }
        spin_unlock_bh(&(sk->sk_lock.slock));
 }
 EXPORT_SYMBOL(release_sock);
diff --git a/net/core/stream.c b/net/core/stream.c
index 745a07f..fd7e11e 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -36,7 +36,7 @@ void sk_stream_write_space(struct sock *
                        wake_up_interruptible(sk->sk_sleep);
                if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
                        sock_wake_async(sock, 2, POLL_OUT);
-               kevent_socket_notify(sk, KEVENT_SOCKET_SEND);
+               kevent_socket_notify(sk, KEVENT_SOCKET_SEND|KEVENT_SOCKET_RECV);
        }
 }
 

-- 
        Evgeniy Polyakov

Attachment: memcpy_vs_copy_user.png
Description: PNG image

Reply via email to