Avi's purpose, to use single kvm_set_irq() to deal with all interrupt, including
MSI. So here is it.

struct gsi_route_entry is a mapping from a special gsi(with
KVM_GSI_MSG_ENTRY_MASK) to MSI/MSI-X message address/data. And the struct can
also be extended for other purpose.

Now we support up to 128 gsi_route_entry mapping, and gsi is allocated by 
kernel and
provide two ioctls to userspace, which is more flexiable.

Signed-off-by: Sheng Yang <[email protected]>
---
 include/linux/kvm.h      |   24 +++++++++++++++
 include/linux/kvm_host.h |   22 +++++++++++++
 virt/kvm/irq_comm.c      |   74 ++++++++++++++++++++++++++++++++++++++++++++++
 virt/kvm/kvm_main.c      |   65 ++++++++++++++++++++++++++++++++++++++++
 4 files changed, 185 insertions(+), 0 deletions(-)

diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 71c150f..ba8ab1c 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -399,6 +399,9 @@ struct kvm_trace_rec {
 #if defined(CONFIG_X86)
 #define KVM_CAP_REINJECT_CONTROL 24
 #endif
+#if defined(CONFIG_X86)
+#define KVM_CAP_GSI_ROUTE 25
+#endif
 
 /*
  * ioctls for VM fds
@@ -433,6 +436,10 @@ struct kvm_trace_rec {
 #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \
                            struct kvm_assigned_irq)
 #define KVM_REINJECT_CONTROL      _IO(KVMIO, 0x71)
+#define KVM_REQUEST_GSI_ROUTE    _IOWR(KVMIO, 0x72, \
+                                       struct kvm_gsi_route_entry)
+#define KVM_FREE_GSI_ROUTE       _IOR(KVMIO, 0x73, \
+                                       struct kvm_gsi_route_entry)
 
 /*
  * ioctls for vcpu fds
@@ -553,4 +560,21 @@ struct kvm_assigned_irq {
 #define KVM_DEV_IRQ_ASSIGN_MSI_ACTION  KVM_DEV_IRQ_ASSIGN_ENABLE_MSI
 #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI  (1 << 0)
 
+#define KVM_GSI_ROUTE_TYPE_MSI 0x1
+
+struct kvm_gsi_route_entry {
+       __u32 gsi;
+       __u32 type;
+       __u32 flags;
+       __u32 reserved;
+       union {
+               struct {
+                       __u32 addr_lo;
+                       __u32 addr_hi;
+                       __u32 data;
+               } msi;
+               __u32 padding[8];
+       };
+};
+
 #endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a8bcad0..647a6bc 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -136,6 +136,9 @@ struct kvm {
        unsigned long mmu_notifier_seq;
        long mmu_notifier_count;
 #endif
+       struct hlist_head gsi_route_list;
+#define KVM_NR_GSI_ROUTE_ENTRIES    128
+       DECLARE_BITMAP(gsi_route_bitmap, KVM_NR_GSI_ROUTE_ENTRIES);
 };
 
 /* The guest did something we don't support. */
@@ -336,6 +339,18 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int 
irq,
                                      struct kvm_irq_mask_notifier *kimn);
 void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
 
+#define KVM_GSI_ROUTE_ENTRY_MASK    0x1000000ull
+struct kvm_gsi_route_kernel_entry {
+       u32 gsi;
+       u32 type;
+       u32 flags;
+       u32 reserved;
+       union {
+               struct msi_msg msi;
+       };
+       struct hlist_node link;
+};
+
 void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
@@ -343,6 +358,13 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
 void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian);
 int kvm_request_irq_source_id(struct kvm *kvm);
 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
+int kvm_update_gsi_route(struct kvm *kvm,
+                        struct kvm_gsi_route_kernel_entry *entry);
+struct kvm_gsi_route_kernel_entry *kvm_find_gsi_route_entry(struct kvm *kvm,
+                                                           u32 gsi);
+void kvm_free_gsi_route(struct kvm *kvm,
+                       struct kvm_gsi_route_kernel_entry *entry);
+void kvm_free_gsi_route_list(struct kvm *kvm);
 
 #ifdef CONFIG_DMAR
 int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 5162a41..8f49113 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -123,3 +123,77 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, 
bool mask)
                        kimn->func(kimn, mask);
 }
 
+int kvm_update_gsi_route(struct kvm *kvm,
+                        struct kvm_gsi_route_kernel_entry *entry)
+{
+       struct kvm_gsi_route_kernel_entry *found_entry, *new_entry;
+       int r, gsi;
+
+       mutex_lock(&kvm->lock);
+       /* Find whether we need a update or a new entry */
+       found_entry = kvm_find_gsi_route_entry(kvm, entry->gsi);
+       if (found_entry)
+               *found_entry = *entry;
+       else {
+               gsi = find_first_zero_bit(kvm->gsi_route_bitmap,
+                                         KVM_NR_GSI_ROUTE_ENTRIES);
+               if (gsi >= KVM_NR_GSI_ROUTE_ENTRIES) {
+                       r = -ENOSPC;
+                       goto out;
+               }
+               new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+               if (!new_entry) {
+                       r = -ENOMEM;
+                       goto out;
+               }
+               entry->gsi = gsi | KVM_GSI_ROUTE_ENTRY_MASK;
+               *new_entry = *entry;
+               __set_bit(gsi, kvm->gsi_route_bitmap);
+               hlist_add_head(&new_entry->link, &kvm->gsi_route_list);
+       }
+       r = 0;
+out:
+       mutex_unlock(&kvm->lock);
+       return r;
+}
+
+/* Call with kvm->lock hold */
+struct kvm_gsi_route_kernel_entry *kvm_find_gsi_route_entry(struct kvm *kvm,
+                                                           u32 gsi)
+{
+       struct kvm_gsi_route_kernel_entry *entry;
+       struct hlist_node *n;
+
+       if (!(gsi & KVM_GSI_ROUTE_ENTRY_MASK))
+               return NULL;
+       hlist_for_each_entry(entry, n, &kvm->gsi_route_list, link)
+               if (entry->gsi == gsi)
+                       goto out;
+       entry = NULL;
+out:
+       return entry;
+}
+
+/* Call with kvm->lock hold */
+void kvm_free_gsi_route(struct kvm *kvm,
+                       struct kvm_gsi_route_kernel_entry *entry)
+{
+       if (!entry)
+               return;
+       __clear_bit(entry->gsi & ~KVM_GSI_ROUTE_ENTRY_MASK,
+                   kvm->gsi_route_bitmap);
+       hlist_del(&entry->link);
+       kfree(entry);
+}
+
+void kvm_free_gsi_route_list(struct kvm *kvm)
+{
+       struct kvm_gsi_route_kernel_entry *entry;
+       struct hlist_node *pos, *n;
+
+       mutex_lock(&kvm->lock);
+       hlist_for_each_entry_safe(entry, pos, n, &kvm->gsi_route_list, link)
+               kvm_free_gsi_route(kvm, entry);
+       mutex_unlock(&kvm->lock);
+}
+
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 61688a6..7fe1e43 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -839,6 +839,7 @@ static struct kvm *kvm_create_vm(void)
 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
        kvm_coalesced_mmio_init(kvm);
 #endif
+       INIT_HLIST_HEAD(&kvm->gsi_route_list);
 out:
        return kvm;
 }
@@ -877,6 +878,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
        struct mm_struct *mm = kvm->mm;
 
        kvm_arch_sync_events(kvm);
+       kvm_free_gsi_route_list(kvm);
        spin_lock(&kvm_lock);
        list_del(&kvm->vm_list);
        spin_unlock(&kvm_lock);
@@ -1605,6 +1607,44 @@ static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu 
*vcpu, sigset_t *sigset)
        return 0;
 }
 
+static int kvm_vm_ioctl_request_gsi_route(struct kvm *kvm,
+                       struct kvm_gsi_route_entry *guest_entry)
+{
+       struct kvm_gsi_route_kernel_entry entry;
+       int r;
+
+       entry.gsi = guest_entry->gsi;
+       entry.type = guest_entry->type;
+       entry.flags = guest_entry->flags;
+       if (entry.type == KVM_GSI_ROUTE_TYPE_MSI)  {
+               entry.msi.address_lo = guest_entry->msi.addr_lo;
+               entry.msi.address_hi = guest_entry->msi.addr_hi;
+               entry.msi.data = guest_entry->msi.data;
+       }
+       r = kvm_update_gsi_route(kvm, &entry);
+       if (r == 0)
+               guest_entry->gsi = entry.gsi;
+       return r;
+}
+
+static int kvm_vm_ioctl_free_gsi_route(struct kvm *kvm,
+                       struct kvm_gsi_route_entry *guest_entry)
+{
+       struct kvm_gsi_route_kernel_entry *entry;
+       int r;
+
+       mutex_lock(&kvm->lock);
+       entry = kvm_find_gsi_route_entry(kvm, guest_entry->gsi);
+       if (!entry) {
+               r = -EINVAL;
+               goto out;
+       }
+       kvm_free_gsi_route(kvm, entry);
+out:
+       mutex_unlock(&kvm->lock);
+       return r;
+}
+
 static long kvm_vcpu_ioctl(struct file *filp,
                           unsigned int ioctl, unsigned long arg)
 {
@@ -1887,6 +1927,31 @@ static long kvm_vm_ioctl(struct file *filp,
                break;
        }
 #endif
+       case KVM_REQUEST_GSI_ROUTE: {
+               struct kvm_gsi_route_entry guest_entry;
+               r = -EFAULT;
+               if (copy_from_user(&guest_entry, argp, sizeof guest_entry))
+                       goto out;
+               r = kvm_vm_ioctl_request_gsi_route(kvm, &guest_entry);
+               if (r)
+                       goto out;
+               r = -EFAULT;
+               if (copy_to_user(argp, &guest_entry,
+                               sizeof(struct kvm_gsi_route_entry)))
+                       goto out;
+               r = 0;
+               break;
+       }
+       case KVM_FREE_GSI_ROUTE: {
+               struct kvm_gsi_route_entry guest_entry;
+               r = -EFAULT;
+               if (copy_from_user(&guest_entry, argp, sizeof guest_entry))
+                       goto out;
+               r = kvm_vm_ioctl_free_gsi_route(kvm, &guest_entry);
+               if (r)
+                       goto out;
+               break;
+       }
        default:
                r = kvm_arch_vm_ioctl(filp, ioctl, arg);
        }
-- 
1.5.4.5

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to