diff --git a/aio-win32.c b/aio-win32.c
index f9cfbb7..c02fdf7 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -22,12 +22,87 @@
 
 struct AioHandler {
     EventNotifier *e;
+    IOHandler *io_read;
+    IOHandler *io_write;
     EventNotifierHandler *io_notify;
     GPollFD pfd;
     int deleted;
+    void *opaque;
     QLIST_ENTRY(AioHandler) node;
 };
 
+static AioHandler *find_aio_handler(AioContext *ctx, int fd)
+{
+    AioHandler *node;
+
+    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+        if (node->pfd.fd == fd)
+            if (!node->deleted)
+                return node;
+    }
+
+    return NULL;
+}
+
+void aio_set_fd_handler(AioContext *ctx,
+                        int fd,
+                        IOHandler *io_read,
+                        IOHandler *io_write,
+                        void *opaque)
+{
+	AioHandler *node = find_aio_handler(ctx, fd);
+
+	/* Are we deleting the fd handler? */
+	if (!io_read && !io_write) {
+		if (node) {
+			g_source_remove_poll(&ctx->source, &node->pfd);
+
+			/* If the lock is held, just mark the node as deleted */
+			if (ctx->walking_handlers) {
+				node->deleted = 1;
+				node->pfd.revents = 0;
+			} else {
+				/* Otherwise, delete it for real.  We can't just mark it as
+				 * deleted because deleted nodes are only cleaned up after
+				 * releasing the walking_handlers lock.
+				 */
+				QLIST_REMOVE(node, node);
+				g_free(node);
+			}
+		}
+	} else {
+		if (node == NULL) {
+			/* Alloc and insert if it's not already there */
+			node = g_malloc0(sizeof(AioHandler));
+			node->pfd.fd = fd;
+			QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
+
+			g_source_add_poll(&ctx->source, &node->pfd);
+		}
+		/* Create event */
+		HANDLE event = CreateEvent(NULL, FALSE, FALSE, NULL);
+		//HANDLE event = event_notifier_get_handle(&ctx->notifier);
+		long lNetworkEvents = 0;
+
+		if (node->io_read)
+			lNetworkEvents |= FD_READ;
+		if (node->io_write)
+			lNetworkEvents |= FD_WRITE;
+
+		WSAEventSelect(node->pfd.fd, event, lNetworkEvents);
+		node->e = (EventNotifier *)event;
+
+		/* Update handler with latest information */
+		node->pfd.events = (io_read != NULL ? G_IO_IN : 0);
+		node->pfd.events |= (io_write != NULL ? G_IO_OUT : 0);
+		node->opaque = opaque;
+		node->io_read = io_read;
+		node->io_write = io_write;
+	}
+
+	aio_notify(ctx);
+}
+
 void aio_set_event_notifier(AioContext *ctx,
                             EventNotifier *e,
                             EventNotifierHandler *io_notify)
@@ -79,23 +154,95 @@ void aio_set_event_notifier(AioContext *ctx,
 bool aio_pending(AioContext *ctx)
 {
     AioHandler *node;
+    int revents;
 
     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
         if (node->pfd.revents && node->io_notify) {
             return true;
         }
+
+        revents = node->pfd.revents & node->pfd.events;
+		if ((revents & G_IO_IN) && node->io_read) {
+			return true;
+		}
+		if ((revents & G_IO_OUT) && node->io_write) {
+			return true;
+		}
     }
 
     return false;
 }
 
+static bool aio_dispatch(AioContext *ctx)
+{
+    AioHandler *node;
+    bool progress = false;
+
+    /*
+     * We have to walk very carefully in case qemu_aio_set_fd_handler is
+     * called while we're walking.
+     */
+    node = QLIST_FIRST(&ctx->aio_handlers);
+	while (node) {
+		AioHandler *tmp = node;
+
+		ctx->walking_handlers++;
+
+		if (!node->deleted) {
+			// HANDLE ?
+			if (node->pfd.revents && node->io_notify) {
+				node->pfd.revents = 0;
+				node->io_notify(node->e);
+
+				/* aio_notify() does not count as progress */
+				if (node->e != &ctx->notifier) {
+					progress = true;
+				}
+			}
+
+			// SOCKET ?
+			int revents = node->pfd.revents & node->pfd.events;
+			node->pfd.revents = 0;
+
+			if ((revents & G_IO_IN) && node->io_read) {
+				node->io_read(node->opaque);
+
+				/* aio_notify() does not count as progress */
+				if (node->opaque != &ctx->notifier) {
+					progress = true;
+				}
+			}
+			if ((revents & G_IO_OUT) && node->io_write) {
+				node->io_write(node->opaque);
+				progress = true;
+			}
+		}
+
+		node = QLIST_NEXT(node, node);
+
+		ctx->walking_handlers--;
+
+		if (!ctx->walking_handlers && tmp->deleted) {
+			QLIST_REMOVE(tmp, node);
+			g_free(tmp);
+		}
+	}
+
+	/* Run our timers */
+	progress |= timerlistgroup_run_timers(&ctx->tlg);
+
+    return progress;
+}
+
 bool aio_poll(AioContext *ctx, bool blocking)
 {
     AioHandler *node;
     HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
-    bool progress;
+    bool progress, has_aio_notify = false;
     int count;
     int timeout;
+    fd_set rfds, wfds;
+    struct timeval tv0 = { .tv_sec = 0, .tv_usec = 0};
 
     progress = false;
 
@@ -109,63 +256,68 @@ bool aio_poll(AioContext *ctx, bool blocking)
         progress = true;
     }
 
-    /* Run timers */
-    progress |= timerlistgroup_run_timers(&ctx->tlg);
+    if (aio_dispatch(ctx)) {
+		progress = true;
+	}
 
-    /*
-     * Then dispatch any pending callbacks from the GSource.
-     *
-     * We have to walk very carefully in case qemu_aio_set_fd_handler is
-     * called while we're walking.
-     */
-    node = QLIST_FIRST(&ctx->aio_handlers);
-    while (node) {
-        AioHandler *tmp;
-
-        ctx->walking_handlers++;
-
-        if (node->pfd.revents && node->io_notify) {
-            node->pfd.revents = 0;
-            node->io_notify(node->e);
-
-            /* aio_notify() does not count as progress */
-            if (node->e != &ctx->notifier) {
-                progress = true;
-            }
-        }
-
-        tmp = node;
-        node = QLIST_NEXT(node, node);
-
-        ctx->walking_handlers--;
-
-        if (!ctx->walking_handlers && tmp->deleted) {
-            QLIST_REMOVE(tmp, node);
-            g_free(tmp);
-        }
-    }
-
-    if (progress && !blocking) {
-        return true;
-    }
+	if (progress && !blocking) {
+		return true;
+	}
 
     ctx->walking_handlers++;
 
     /* fill fd sets */
     count = 0;
+    FD_ZERO (&rfds);
+    FD_ZERO (&wfds);
     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
-        if (!node->deleted && node->io_notify) {
+    	if (node->deleted)
+    	    continue;
+
+    	// HANDLE ?
+        if (node->io_notify) {
             events[count++] = event_notifier_get_handle(node->e);
+
+            has_aio_notify = true;
         }
+
+        // SOCKET ?
+        if (node->io_read || node->io_write) {
+			if (node->io_read)
+				FD_SET ((SOCKET)node->pfd.fd, &rfds);
+			if (node->io_write)
+				FD_SET ((SOCKET)node->pfd.fd, &wfds);
+
+			events[count++] = (HANDLE)node->e;
+		}
     }
 
     ctx->walking_handlers--;
 
     /* early return if we only have the aio_notify() fd */
-    if (count == 1) {
+    if (count == 1 && has_aio_notify == true) {
         return progress;
     }
 
+    if (select(0, &rfds, &wfds, NULL, &tv0) > 0) {
+		QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+			node->pfd.revents = 0;
+			if (FD_ISSET(node->pfd.fd, &rfds)) {
+				node->pfd.revents |= G_IO_IN;
+				blocking = false;
+
+				Sleep(100);
+			}
+
+			if (FD_ISSET(node->pfd.fd, &wfds)) {
+				node->pfd.revents |= G_IO_OUT;
+				blocking = false;
+
+				Sleep(100);
+			}
+		}
+	}
+
     /* wait until next event */
     while (count > 0) {
         int ret;
@@ -190,14 +342,11 @@ bool aio_poll(AioContext *ctx, bool blocking)
             ctx->walking_handlers++;
 
             if (!node->deleted &&
+            	node->io_notify &&
                 event_notifier_get_handle(node->e) == events[ret - WAIT_OBJECT_0] &&
                 node->io_notify) {
-                node->io_notify(node->e);
 
-                /* aio_notify() does not count as progress */
-                if (node->e != &ctx->notifier) {
-                    progress = true;
-                }
+            	node->pfd.revents = G_IO_IN;
             }
 
             tmp = node;
@@ -215,14 +364,10 @@ bool aio_poll(AioContext *ctx, bool blocking)
         events[ret - WAIT_OBJECT_0] = events[--count];
     }
 
-    if (blocking) {
-        /* Run the timers a second time. We do this because otherwise aio_wait
-         * will not note progress - and will stop a drain early - if we have
-         * a timer that was not ready to run entering g_poll but is ready
-         * after g_poll. This will only do anything if a timer has expired.
-         */
-        progress |= timerlistgroup_run_timers(&ctx->tlg);
-    }
+    /* Run dispatch even if there were no readable fds to run timers */
+	if (aio_dispatch(ctx)) {
+		progress = true;
+	}
 
     return progress;
 }
diff --git a/block/Makefile.objs b/block/Makefile.objs
index 3bb85b5..9bcb4b0 100644
--- a/block/Makefile.objs
+++ b/block/Makefile.objs
@@ -5,12 +5,13 @@ block-obj-y += qed-check.o
 block-obj-y += vhdx.o
 block-obj-y += parallels.o blkdebug.o blkverify.o
 block-obj-y += snapshot.o qapi.o
+block-obj-y += nbd.o
 block-obj-$(CONFIG_WIN32) += raw-win32.o win32-aio.o
 block-obj-$(CONFIG_POSIX) += raw-posix.o
 block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o
 
 ifeq ($(CONFIG_POSIX),y)
-block-obj-y += nbd.o sheepdog.o
+block-obj-y += sheepdog.o
 block-obj-$(CONFIG_LIBISCSI) += iscsi.o
 block-obj-$(CONFIG_CURL) += curl.o
 block-obj-$(CONFIG_RBD) += rbd.o
diff --git a/include/block/aio.h b/include/block/aio.h
index 2efdf41..80b3968 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -199,7 +199,7 @@ bool aio_pending(AioContext *ctx);
  */
 bool aio_poll(AioContext *ctx, bool blocking);
 
-#ifdef CONFIG_POSIX
+//#ifdef CONFIG_POSIX
 /* Register a file descriptor and associated callbacks.  Behaves very similarly
  * to qemu_set_fd_handler2.  Unlike qemu_set_fd_handler2, these callbacks will
  * be invoked when using qemu_aio_wait().
@@ -212,7 +212,7 @@ void aio_set_fd_handler(AioContext *ctx,
                         IOHandler *io_read,
                         IOHandler *io_write,
                         void *opaque);
-#endif
+//#endif
 
 /* Register an event notifier and associated callbacks.  Behaves very similarly
  * to event_notifier_set_handler.  Unlike event_notifier_set_handler, these callbacks
@@ -239,12 +239,12 @@ bool qemu_aio_wait(void);
 void qemu_aio_set_event_notifier(EventNotifier *notifier,
                                  EventNotifierHandler *io_read);
 
-#ifdef CONFIG_POSIX
+//#ifdef CONFIG_POSIX
 void qemu_aio_set_fd_handler(int fd,
                              IOHandler *io_read,
                              IOHandler *io_write,
                              void *opaque);
-#endif
+//#endif
 
 /**
  * aio_timer_new:
diff --git a/main-loop.c b/main-loop.c
index c3c9c28..4847be6 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -503,7 +503,7 @@ bool qemu_aio_wait(void)
     return aio_poll(qemu_aio_context, true);
 }
 
-#ifdef CONFIG_POSIX
+//#ifdef CONFIG_POSIX
 void qemu_aio_set_fd_handler(int fd,
                              IOHandler *io_read,
                              IOHandler *io_write,
@@ -511,7 +511,7 @@ void qemu_aio_set_fd_handler(int fd,
 {
     aio_set_fd_handler(qemu_aio_context, fd, io_read, io_write, opaque);
 }
-#endif
+//#endif
 
 void qemu_aio_set_event_notifier(EventNotifier *notifier,
                                  EventNotifierHandler *io_read)
diff --git a/qga/vss-win32/requester.h b/qga/vss-win32/requester.h
index cffec01..a71e607 100644
--- a/qga/vss-win32/requester.h
+++ b/qga/vss-win32/requester.h
@@ -13,6 +13,7 @@
 #ifndef VSS_WIN32_REQUESTER_H
 #define VSS_WIN32_REQUESTER_H
 
+#include <basetyps.h>
 #include "qemu/compiler.h"
 
 #ifdef __cplusplus
