Hi Avi,

I believe I have incorporated all of the changes requested.  Please find the 
result of that patch inline.

Note that I finally understand what you were getting at with the array of 
objects thing.  I didn't change it yet for the same reason that you mentioned: 
reduction of churn.  However, now that I understand it, I see why you wanted 
it.  Perhaps I will send a follow-on to this patch that uses your idea.  But 
for now...

---

KVM: Add support for in-kernel mmio handlers

There is a near-term need for moving some of the emulation from userspace to 
the kernel (e.g. interrupt handling).  This patch adds a construct for 
registering in-kernel MMIO handlers.  The consumers of this interface will
appear in a follow-on patch.

Signed-off-by: Gregory Haskins <[EMAIL PROTECTED]>

---

 drivers/kvm/kvm.h      |   31 ++++++++++++++++++
 drivers/kvm/kvm_main.c |   82 +++++++++++++++++++++++++++++++++++++++++-------
 2 files changed, 101 insertions(+), 12 deletions(-)

diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index fceeb84..181099f 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -236,6 +236,36 @@ struct kvm_pio_request {
        int rep;
 };
 
+struct kvm_io_device {
+       unsigned long (*read)(struct kvm_io_device *this,
+                             gpa_t addr,
+                             int length);
+       void (*write)(struct kvm_io_device *this,
+                     gpa_t addr,
+                     int length,
+                     unsigned long val);
+       int (*in_range)(struct kvm_io_device *this, gpa_t addr);
+
+       void             *private;
+};
+
+/*
+ * It would be nice to use something smarter than a linear search, TBD...
+ * Thankfully we dont expect many devices to register (famous last words :),
+ * so until then it will suffice.  At least its abstracted so we can change
+ * in one place.
+ */
+struct kvm_io_bus {
+       int                   dev_count;
+#define NR_IOBUS_DEVS 6
+       struct kvm_io_device *devs[NR_IOBUS_DEVS];
+};
+
+void kvm_io_bus_init(struct kvm_io_bus *bus);
+struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
+void kvm_io_bus_register_dev(struct kvm_io_bus *bus, 
+                            struct kvm_io_device *dev);
+
 struct kvm_vcpu {
        struct kvm *kvm;
        union {
@@ -345,6 +375,7 @@ struct kvm {
        unsigned long rmap_overflow;
        struct list_head vm_list;
        struct file *filp;
+       struct kvm_io_bus mmio_bus;
 };
 
 struct kvm_stat {
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 4473174..c3c0059 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -294,6 +294,7 @@ static struct kvm *kvm_create_vm(void)
 
        spin_lock_init(&kvm->lock);
        INIT_LIST_HEAD(&kvm->active_mmu_pages);
+       kvm_io_bus_init(&kvm->mmio_bus);
        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
                struct kvm_vcpu *vcpu = &kvm->vcpus[i];
 
@@ -1015,12 +1016,25 @@ static int emulator_write_std(unsigned long addr,
        return X86EMUL_UNHANDLEABLE;
 }
 
+static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, 
+                                               gpa_t addr)
+{
+       /*
+        * Note that its important to have this wrapper function because 
+        * in the very near future we will be checking for MMIOs against 
+        * the LAPIC as well as the general MMIO bus 
+        */
+       return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
+}
+
 static int emulator_read_emulated(unsigned long addr,
                                  unsigned long *val,
                                  unsigned int bytes,
                                  struct x86_emulate_ctxt *ctxt)
 {
-       struct kvm_vcpu *vcpu = ctxt->vcpu;
+       struct kvm_vcpu      *vcpu = ctxt->vcpu;
+       struct kvm_io_device *mmio_dev;
+       gpa_t                 gpa;
 
        if (vcpu->mmio_read_completed) {
                memcpy(val, vcpu->mmio_data, bytes);
@@ -1029,18 +1043,26 @@ static int emulator_read_emulated(unsigned long addr,
        } else if (emulator_read_std(addr, val, bytes, ctxt)
                   == X86EMUL_CONTINUE)
                return X86EMUL_CONTINUE;
-       else {
-               gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
 
-               if (gpa == UNMAPPED_GVA)
-                       return X86EMUL_PROPAGATE_FAULT;
-               vcpu->mmio_needed = 1;
-               vcpu->mmio_phys_addr = gpa;
-               vcpu->mmio_size = bytes;
-               vcpu->mmio_is_write = 0;
+       gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+       if (gpa == UNMAPPED_GVA)
+               return X86EMUL_PROPAGATE_FAULT;
 
-               return X86EMUL_UNHANDLEABLE;
+       /*
+        * Is this MMIO handled locally? 
+        */
+       mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
+       if (mmio_dev) {
+               *val = mmio_dev->read(mmio_dev, gpa, bytes);
+               return X86EMUL_CONTINUE;
        }
+
+       vcpu->mmio_needed = 1;
+       vcpu->mmio_phys_addr = gpa;
+       vcpu->mmio_size = bytes;
+       vcpu->mmio_is_write = 0;
+       
+       return X86EMUL_UNHANDLEABLE;
 }
 
 static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -1068,8 +1090,9 @@ static int emulator_write_emulated(unsigned long addr,
                                   unsigned int bytes,
                                   struct x86_emulate_ctxt *ctxt)
 {
-       struct kvm_vcpu *vcpu = ctxt->vcpu;
-       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+       struct kvm_vcpu      *vcpu = ctxt->vcpu;
+       struct kvm_io_device *mmio_dev;
+       gpa_t                 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
 
        if (gpa == UNMAPPED_GVA)
                return X86EMUL_PROPAGATE_FAULT;
@@ -1077,6 +1100,15 @@ static int emulator_write_emulated(unsigned long addr,
        if (emulator_write_phys(vcpu, gpa, val, bytes))
                return X86EMUL_CONTINUE;
 
+       /*
+        * Is this MMIO handled locally?
+        */
+       mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
+       if (mmio_dev) {
+               mmio_dev->write(mmio_dev, gpa, bytes, val);
+               return X86EMUL_CONTINUE;
+       }
+
        vcpu->mmio_needed = 1;
        vcpu->mmio_phys_addr = gpa;
        vcpu->mmio_size = bytes;
@@ -2911,6 +2943,32 @@ static int kvm_cpu_hotplug(struct notifier_block 
*notifier, unsigned long val,
        return NOTIFY_OK;
 }
 
+void kvm_io_bus_init(struct kvm_io_bus *bus)
+{
+       memset(bus, 0, sizeof(*bus));
+}
+
+struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
+{
+       int i;
+
+       for (i = 0; i < bus->dev_count; i++) {
+               struct kvm_io_device *pos = bus->devs[i];
+
+               if (pos->in_range(pos, addr))
+                       return pos;
+       }
+
+       return NULL;
+}
+
+void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
+{
+       BUG_ON(bus->dev_count >= (NR_IOBUS_DEVS-1));
+
+       bus->devs[bus->dev_count++] = dev;
+}
+
 static struct notifier_block kvm_cpu_notifier = {
        .notifier_call = kvm_cpu_hotplug,
        .priority = 20, /* must be > scheduler priority */


-------------------------------------------------------------------------
Take Surveys. Earn Cash. Influence the Future of IT
Join SourceForge.net's Techsay panel and you'll get the chance to share your
opinions on IT & business topics through brief surveys-and earn cash
http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to