* Gregory Haskins ([EMAIL PROTECTED]) wrote:
> The MMIO registration code has been broken out as a new patch from the 
> in-kernel APIC work with the following changes per Avi's request:
> 
> 1) Supports dynamic registration
> 2) Uses gpa_t addresses
> 3) Explicit per-cpu mappings
> 
> In addition, I have added the concept of distinct VCPU and VM level 
> registrations (where VCPU devices will eclipse competing VM registrations (if 
> any).  This will be key down the road where LAPICs should use VCPU 
> registration, but IOAPICs should use VM level.

hmm, i'm surprised it makes a difference.

> Signed-off-by: Gregory Haskins <[EMAIL PROTECTED]>
> 
> ---
>  drivers/kvm/kvm.h      |   50 +++++++++++++++++++++++++++++++++++++++++++++
>  drivers/kvm/kvm_main.c |   53 +++++++++++++++++++++++++++++++++++++++--------
>  2 files changed, 94 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
> index fceeb84..3334730 100644
> --- a/drivers/kvm/kvm.h
> +++ b/drivers/kvm/kvm.h
> @@ -236,6 +236,54 @@ struct kvm_pio_request {
>       int rep;
>  };
>  
> +struct kvm_io_device {
> +     unsigned long (*read)(struct kvm_io_device *this,
> +                           gpa_t addr,
> +                           unsigned long length);
> +     void (*write)(struct kvm_io_device *this,
> +                   gpa_t addr,
> +                   unsigned long length,
> +                   unsigned long val);
> +     int (*in_range)(struct kvm_io_device *this, gpa_t addr);
> +
> +     void             *private;

This looks unused, what is it meant for?

> +     struct list_head  link;
> +};
> +
> +/* It would be nice to use something smarter than a linear search, TBD...
> +   Thankfully we dont expect many devices to register (famous last words :),
> +   so until then it will suffice.  At least its abstracted so we can change
> +   in one place.
> + */
> +struct kvm_io_bus {
> +     struct list_head list;
> +};
> +
> +static inline void 
> +kvm_io_bus_init(struct kvm_io_bus *bus)
> +{
> +     INIT_LIST_HEAD(&bus->list);
> +}
> +
> +static inline struct kvm_io_device* 
> +kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
> +{
> +     struct kvm_io_device *pos = NULL;
> +
> +     list_for_each_entry(pos, &bus->list, link) {
> +             if(pos->in_range(pos, addr))

linux style nit, missing space after if --> if (pos->in_range(pos, addr))

> +                     return pos;
> +     }
> +
> +     return NULL;
> +}
> +
> +static inline void 
> +kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
> +{
> +     list_add_tail(&dev->link, &bus->list);
> +}
> +
>  struct kvm_vcpu {
>       struct kvm *kvm;
>       union {
> @@ -294,6 +342,7 @@ struct kvm_vcpu {
>       gpa_t mmio_phys_addr;
>       struct kvm_pio_request pio;
>       void *pio_data;
> +     struct kvm_io_bus mmio_bus;
>  
>       int sigset_active;
>       sigset_t sigset;
> @@ -345,6 +394,7 @@ struct kvm {
>       unsigned long rmap_overflow;
>       struct list_head vm_list;
>       struct file *filp;
> +     struct kvm_io_bus mmio_bus;
>  };
>  
>  struct kvm_stat {
> diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
> index 4473174..da119c0 100644
> --- a/drivers/kvm/kvm_main.c
> +++ b/drivers/kvm/kvm_main.c
> @@ -294,6 +294,7 @@ static struct kvm *kvm_create_vm(void)
>  
>       spin_lock_init(&kvm->lock);
>       INIT_LIST_HEAD(&kvm->active_mmu_pages);
> +     kvm_io_bus_init(&kvm->mmio_bus);

I'd just do INIT_LIST_HEAD, unless you havd bigger plans for this wrapper?

>       for (i = 0; i < KVM_MAX_VCPUS; ++i) {
>               struct kvm_vcpu *vcpu = &kvm->vcpus[i];
>  
> @@ -302,6 +303,7 @@ static struct kvm *kvm_create_vm(void)
>               vcpu->kvm = kvm;
>               vcpu->mmu.root_hpa = INVALID_PAGE;
>               INIT_LIST_HEAD(&vcpu->free_pages);
> +             kvm_io_bus_init(&vcpu->mmio_bus);

ditto

>               spin_lock(&kvm_lock);
>               list_add(&kvm->vm_list, &vm_list);
>               spin_unlock(&kvm_lock);
> @@ -1015,12 +1017,30 @@ static int emulator_write_std(unsigned long addr,
>       return X86EMUL_UNHANDLEABLE;
>  }
>  
> +static struct kvm_io_device* vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, 
> +                                             gpa_t addr)
> +{
> +     struct kvm_io_device *mmio_dev;
> +
> +     /* First check the local CPU addresses */
> +     mmio_dev = kvm_io_bus_find_dev(&vcpu->mmio_bus, addr);
> +     if(!mmio_dev) {

same style nit.  and why do you have local vs global check (or dynamic 
registration
for that matter)?

> +             /* Then check the entire VM */
> +             mmio_dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
> +     }
> +     
> +     return mmio_dev;
> +}
> +
>  static int emulator_read_emulated(unsigned long addr,
>                                 unsigned long *val,
>                                 unsigned int bytes,
>                                 struct x86_emulate_ctxt *ctxt)
>  {
>       struct kvm_vcpu *vcpu = ctxt->vcpu;
> +     gpa_t gpa;
> +     int i;
> +     struct kvm_io_device *mmio_dev;
>  
>       if (vcpu->mmio_read_completed) {
>               memcpy(val, vcpu->mmio_data, bytes);
> @@ -1029,18 +1049,24 @@ static int emulator_read_emulated(unsigned long addr,
>       } else if (emulator_read_std(addr, val, bytes, ctxt)
>                  == X86EMUL_CONTINUE)
>               return X86EMUL_CONTINUE;
> -     else {
> -             gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
>  
> -             if (gpa == UNMAPPED_GVA)
> -                     return X86EMUL_PROPAGATE_FAULT;
> -             vcpu->mmio_needed = 1;
> -             vcpu->mmio_phys_addr = gpa;
> -             vcpu->mmio_size = bytes;
> -             vcpu->mmio_is_write = 0;
> +     gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
> +     if (gpa == UNMAPPED_GVA)
> +             return vcpu_printf(vcpu, "not present\n"), 
> X86EMUL_PROPAGATE_FAULT;
>  
> -             return X86EMUL_UNHANDLEABLE;
> +     /* Is thie MMIO handled locally? */

s/thie/this/

> +     mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
> +     if(mmio_dev) {

style

> +             *val = mmio_dev->read(mmio_dev, gpa, bytes);
> +             return X86EMUL_CONTINUE;
>       }
> +
> +     vcpu->mmio_needed = 1;
> +     vcpu->mmio_phys_addr = gpa;
> +     vcpu->mmio_size = bytes;
> +     vcpu->mmio_is_write = 0;
> +     
> +     return X86EMUL_UNHANDLEABLE;
>  }
>  
>  static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
> @@ -1070,6 +1096,8 @@ static int emulator_write_emulated(unsigned long addr,
>  {
>       struct kvm_vcpu *vcpu = ctxt->vcpu;
>       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
> +     int i;
> +     struct kvm_io_device *mmio_dev;
>  
>       if (gpa == UNMAPPED_GVA)
>               return X86EMUL_PROPAGATE_FAULT;
> @@ -1077,6 +1105,13 @@ static int emulator_write_emulated(unsigned long addr,
>       if (emulator_write_phys(vcpu, gpa, val, bytes))
>               return X86EMUL_CONTINUE;
>  
> +     /* Is thie MMIO handled locally? */

s/thie/this/

> +     mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
> +     if(mmio_dev) {

style

> +             mmio_dev->write(mmio_dev, gpa, bytes, val);
> +             return X86EMUL_CONTINUE;
> +     }
> +
>       vcpu->mmio_needed = 1;
>       vcpu->mmio_phys_addr = gpa;
>       vcpu->mmio_size = bytes;

-------------------------------------------------------------------------
Take Surveys. Earn Cash. Influence the Future of IT
Join SourceForge.net's Techsay panel and you'll get the chance to share your
opinions on IT & business topics through brief surveys-and earn cash
http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to