On Tue, 2020-12-08 at 22:35 -0800, Ankur Arora wrote:
> > It looks like Linux doesn't use the per-vCPU upcall vector that you
> > called 'KVM_XEN_CALLBACK_VIA_EVTCHN'. So I'm delivering interrupts via
> > KVM_INTERRUPT as if they were ExtINT....
> > 
> > ... except I'm not. Because the kernel really does expect that to be an
> > ExtINT from a legacy PIC, and kvm_apic_accept_pic_intr() only returns
> > true if LVT0 is set up for EXTINT and unmasked.
> > 
> > I messed around with this hack and increasingly desperate variations on
> > the theme (since this one doesn't cause the IRQ window to be opened to
> > userspace in the first place), but couldn't get anything working:
> 
> Increasingly desperate variations,  about sums up my process as well while
> trying to get the upcall vector working.

:)

So this seems to work, and I think it's about as minimal as it can get.

All it does is implement a kvm_xen_has_interrupt() which checks the
vcpu_info->evtchn_upcall_pending flag, just like Xen does.

With this, my completely userspace implementation of event channels is
working. And of course this forms a basis for adding the minimal
acceleration of IPI/VIRQ that we need to the kernel, too.

My only slight concern is the bit in vcpu_enter_guest() where we have
to add the check for kvm_xen_has_interrupt(), because nothing is
setting KVM_REQ_EVENT. I suppose I could look at having something —
even an explicit ioctl from userspace — set that for us.... BUT...

I'm not sure that condition wasn't *already* broken some cases for
KVM_INTERRUPT anyway. In kvm_vcpu_ioctl_interrupt() we set
vcpu->arch.pending_userspace_vector and we *do* request KVM_REQ_EVENT,
sure.

But what if vcpu_enter_guest() processes that the first time (and
clears KVM_REQ_EVENT), and then exits for some *other* reason with
interrupts still disabled? Next time through vcpu_enter_guest(), even
though kvm_cpu_has_injectable_intr() is still true, we don't enable the
IRQ windowvmexit because KVM_REQ_EVENT got lost so we don't even call
inject_pending_event().

So instead of just kvm_xen_has_interrupt() in my patch below, I wonder
if we need to use kvm_cpu_has_injectable_intr() to fix the existing
bug? Or am I missing something there and there isn't a bug after all?

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d8716ef27728..4a63f212fdfc 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -902,6 +902,7 @@ struct msr_bitmap_range {
 /* Xen emulation context */
 struct kvm_xen {
        bool long_mode;
+       u8 upcall_vector;
        struct kvm_host_map shinfo_map;
        void *shinfo;
 };
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 814698e5b152..24668b51b5c8 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -14,6 +14,7 @@
 #include "irq.h"
 #include "i8254.h"
 #include "x86.h"
+#include "xen.h"
 
 /*
  * check if there are pending timer events
@@ -56,6 +57,9 @@ int kvm_cpu_has_extint(struct kvm_vcpu *v)
        if (!lapic_in_kernel(v))
                return v->arch.interrupt.injected;
 
+       if (kvm_xen_has_interrupt(v))
+               return 1;
+
        if (!kvm_apic_accept_pic_intr(v))
                return 0;
 
@@ -110,6 +114,9 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
        if (!lapic_in_kernel(v))
                return v->arch.interrupt.nr;
 
+       if (kvm_xen_has_interrupt(v))
+               return v->kvm->arch.xen.upcall_vector;
+
        if (irqchip_split(v->kvm)) {
                int vector = v->arch.pending_external_vector;
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ad9eea8f4f26..1711072b3616 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8891,7 +8891,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        kvm_x86_ops.msr_filter_changed(vcpu);
        }
 
-       if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
+       if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
+           kvm_xen_has_interrupt(vcpu)) {
                ++vcpu->stat.req_event;
                kvm_apic_accept_events(vcpu);
                if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index 4aa776c1ad57..116422d93d96 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -257,6 +257,22 @@ void kvm_xen_setup_pvclock_page(struct kvm_vcpu *v)
        srcu_read_unlock(&v->kvm->srcu, idx);
 }
 
+int kvm_xen_has_interrupt(struct kvm_vcpu *v)
+{
+       int rc = 0;
+
+       if (v->kvm->arch.xen.upcall_vector) {
+               int idx = srcu_read_lock(&v->kvm->srcu);
+               struct vcpu_info *vcpu_info = xen_vcpu_info(v);
+
+               if (vcpu_info && READ_ONCE(vcpu_info->evtchn_upcall_pending))
+                       rc = 1;
+
+               srcu_read_unlock(&v->kvm->srcu, idx);
+       }
+       return rc;
+}
+
 static int vcpu_attr_loc(struct kvm_vcpu *vcpu, u16 type,
                         struct kvm_host_map **map, void ***hva, size_t *sz)
 {
@@ -338,6 +354,14 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct 
kvm_xen_hvm_attr *data)
                break;
        }
 
+       case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
+               if (data->u.vector < 0x10)
+                       return -EINVAL;
+
+               kvm->arch.xen.upcall_vector = data->u.vector;
+               r = 0;
+               break;
+
        default:
                break;
        }
@@ -386,6 +410,11 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct 
kvm_xen_hvm_attr *data)
                break;
        }
 
+       case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
+               data->u.vector = kvm->arch.xen.upcall_vector;
+               r = 0;
+               break;
+
        default:
                break;
        }
diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h
index ccd6002f55bc..1f599342f02c 100644
--- a/arch/x86/kvm/xen.h
+++ b/arch/x86/kvm/xen.h
@@ -25,6 +25,7 @@ static inline struct kvm_vcpu *xen_vcpu_to_vcpu(struct 
kvm_vcpu_xen *xen_vcpu)
 void kvm_xen_setup_pvclock_page(struct kvm_vcpu *vcpu);
 void kvm_xen_setup_runstate_page(struct kvm_vcpu *vcpu);
 void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu);
+int kvm_xen_has_interrupt (struct kvm_vcpu *vcpu);
 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
 int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 1047364d1adf..113279fa9e1e 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1587,6 +1587,7 @@ struct kvm_xen_hvm_attr {
 
        union {
                __u8 long_mode;
+               __u8 vector;
                struct {
                        __u64 gfn;
                } shared_info;
@@ -1604,6 +1605,7 @@ struct kvm_xen_hvm_attr {
 #define KVM_XEN_ATTR_TYPE_VCPU_INFO            0x2
 #define KVM_XEN_ATTR_TYPE_VCPU_TIME_INFO       0x3
 #define KVM_XEN_ATTR_TYPE_VCPU_RUNSTATE                0x4
+#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR                0x5
 
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {

Attachment: smime.p7s
Description: S/MIME cryptographic signature

Reply via email to