Thanks for your comments. Will modify it in next patch.

Gleb Natapov wrote on 2013-01-24:
> On Wed, Jan 23, 2013 at 10:47:26PM +0800, Yang Zhang wrote:
>> From: Yang Zhang <[email protected]>
>> 
>> Virtual interrupt delivery avoids KVM to inject vAPIC interrupts
>> manually, which is fully taken care of by the hardware. This needs
>> some special awareness into existing interrupr injection path:
>> 
>> - for pending interrupt, instead of direct injection, we may need
>>   update architecture specific indicators before resuming to guest.
>> - A pending interrupt, which is masked by ISR, should be also
>>   considered in above update action, since hardware will decide
>>   when to inject it at right time. Current has_interrupt and
>>   get_interrupt only returns a valid vector from injection p.o.v.
>> Signed-off-by: Kevin Tian <[email protected]>
>> Signed-off-by: Yang Zhang <[email protected]>
>> ---
>>  arch/ia64/kvm/lapic.h           |    6 ++
>>  arch/x86/include/asm/kvm_host.h |    7 ++ arch/x86/include/asm/vmx.h  
>>     |   11 +++ arch/x86/kvm/irq.c              |   56 +++++++++++++++--
>>  arch/x86/kvm/lapic.c            |  106 ++++++++++++++++++++++++-------
>>  arch/x86/kvm/lapic.h            |   27 ++++++++ arch/x86/kvm/svm.c    
>>           |   31 +++++++++ arch/x86/kvm/vmx.c              |  133
>>  ++++++++++++++++++++++++++++++++++++--- arch/x86/kvm/x86.c            
>>   |   11 +++- include/linux/kvm_host.h        |    3 +
>>  virt/kvm/ioapic.c               |   39 +++++++++++ virt/kvm/ioapic.h  
>>              |    4 + virt/kvm/irq_comm.c             |   25 +++++++
>>  virt/kvm/kvm_main.c             |    5 ++ 14 files changed, 425
>>  insertions(+), 39 deletions(-)
>> diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
>> index c5f92a9..c3e2935 100644
>> --- a/arch/ia64/kvm/lapic.h
>> +++ b/arch/ia64/kvm/lapic.h
>> @@ -27,4 +27,10 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct
> kvm_lapic_irq *irq);
>>  #define kvm_apic_present(x) (true)
>>  #define kvm_lapic_enabled(x) (true)
>> +static inline bool kvm_apic_vid_enabled(void)
>> +{
>> +    /* IA64 has no apicv supporting, do nothing here */
>> +    return false;
>> +}
>> +
>>  #endif
>> diff --git a/arch/x86/include/asm/kvm_host.h
>> b/arch/x86/include/asm/kvm_host.h index e1306c1..a94f8d7 100644 ---
>> a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h
>> @@ -692,6 +692,12 @@ struct kvm_x86_ops {
>>      void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
>>      void (*enable_irq_window)(struct kvm_vcpu *vcpu);
>>      void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
>> +    int (*vcpu_has_apicv)(struct kvm *kvm);
>> +    void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
>> +    void (*hwapic_isr_update)(struct kvm *kvm, int isr);
>> +    void (*hwapic_vector_intercept_on_eoi)(struct kvm_vcpu *vcpu,
>> +                                    u32 vector, u64 *eoi_exit_bitmap);
>> +    void (*update_eoi_exitmap)(struct kvm_vcpu *vcpu);
>>      void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
>>      int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);        int
>>  (*get_tdp_level)(void); @@ -987,6 +993,7 @@ int kvm_age_hva(struct kvm
>>  *kvm, unsigned long hva); int kvm_test_age_hva(struct kvm *kvm,
>>  unsigned long hva); void kvm_set_spte_hva(struct kvm *kvm, unsigned
>>  long hva, pte_t pte); int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
>>  +int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int
>>  kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int
>>  kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); int
>>  kvm_cpu_get_interrupt(struct kvm_vcpu *v);
>> diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
>> index 0a54df0..694586c 100644
>> --- a/arch/x86/include/asm/vmx.h
>> +++ b/arch/x86/include/asm/vmx.h
>> @@ -62,6 +62,7 @@
>>  #define EXIT_REASON_MCE_DURING_VMENTRY  41 #define
>>  EXIT_REASON_TPR_BELOW_THRESHOLD 43 #define EXIT_REASON_APIC_ACCESS    
>>      44 +#define EXIT_REASON_EOI_INDUCED         45 #define
>>  EXIT_REASON_EPT_VIOLATION       48 #define EXIT_REASON_EPT_MISCONFIG  
>>      49 #define EXIT_REASON_WBINVD              54 @@ -144,6 +145,7 @@
>>  #define SECONDARY_EXEC_WBINVD_EXITING               0x00000040 #define
>>  SECONDARY_EXEC_UNRESTRICTED_GUEST   0x00000080 #define
>>  SECONDARY_EXEC_APIC_REGISTER_VIRT       0x00000100 +#define
>>  SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY    0x00000200 #define
>>  SECONDARY_EXEC_PAUSE_LOOP_EXITING   0x00000400 #define
>>  SECONDARY_EXEC_ENABLE_INVPCID               0x00001000
>> @@ -181,6 +183,7 @@ enum vmcs_field {
>>      GUEST_GS_SELECTOR               = 0x0000080a,   GUEST_LDTR_SELECTOR   
>>           = 0x0000080c,      GUEST_TR_SELECTOR               = 0x0000080e,
>>  +   GUEST_INTR_STATUS               = 0x00000810,   HOST_ES_SELECTOR     
>>            = 0x00000c00,     HOST_CS_SELECTOR                = 0x00000c02,
>>      HOST_SS_SELECTOR                = 0x00000c04, @@ -208,6 +211,14 @@
>>  enum vmcs_field {   APIC_ACCESS_ADDR_HIGH           = 0x00002015,   
>> EPT_POINTER  
>>                    = 0x0000201a,     EPT_POINTER_HIGH                =
>>  0x0000201b,
>> +    EOI_EXIT_BITMAP0                = 0x0000201c,
>> +    EOI_EXIT_BITMAP0_HIGH           = 0x0000201d,
>> +    EOI_EXIT_BITMAP1                = 0x0000201e,
>> +    EOI_EXIT_BITMAP1_HIGH           = 0x0000201f,
>> +    EOI_EXIT_BITMAP2                = 0x00002020,
>> +    EOI_EXIT_BITMAP2_HIGH           = 0x00002021,
>> +    EOI_EXIT_BITMAP3                = 0x00002022,
>> +    EOI_EXIT_BITMAP3_HIGH           = 0x00002023,
>>      GUEST_PHYSICAL_ADDRESS          = 0x00002400,
>>      GUEST_PHYSICAL_ADDRESS_HIGH     = 0x00002401,
>>      VMCS_LINK_POINTER               = 0x00002800,
>> diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
>> index b111aee..484bc87 100644
>> --- a/arch/x86/kvm/irq.c
>> +++ b/arch/x86/kvm/irq.c
>> @@ -38,6 +38,38 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu
> *vcpu)
>>  EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
>>  
>>  /*
>> + * check if there is pending interrupt from
>> + * non-APIC source without intack.
>> + */
>> +static int kvm_cpu_has_extint(struct kvm_vcpu *v)
>> +{
>> +    if (kvm_apic_accept_pic_intr(v))
>> +            return pic_irqchip(v->kvm)->output;     /* PIC */
>> +    else
>> +            return 0;
>> +}
>> +
>> +/*
>> + * check if there is injectable interrupt:
>> + * when virtual interrupt delivery enabled,
>> + * interrupt from apic will handled by hardware,
>> + * we don't need to check it here.
>> + */
>> +int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
>> +{
>> +    if (!irqchip_in_kernel(v->kvm))
>> +            return v->arch.interrupt.pending;
>> +
>> +    if (kvm_cpu_has_extint(v))
>> +            return 1;
>> +
>> +    if (kvm_apic_vid_enabled(v->kvm))
>> +            return 0;
>> +
>> +    return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
>> +}
>> +
>> +/*
>>   * check if there is pending interrupt without
>>   * intack.
>>   */
>> @@ -46,27 +78,41 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
>>      if (!irqchip_in_kernel(v->kvm))
>>              return v->arch.interrupt.pending;
>> -    if (kvm_apic_accept_pic_intr(v) && pic_irqchip(v->kvm)->output)
>> -            return pic_irqchip(v->kvm)->output;     /* PIC */
>> +    if (kvm_cpu_has_extint(v))
>> +            return 1;
>> 
>>      return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
>>  }
>>  EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
>>  
>>  /*
>> + * Read pending interrupt(from non-APIC source)
>> + * vector and intack.
>> + */
>> +static int kvm_cpu_get_extint(struct kvm_vcpu *v)
>> +{
>> +    if (kvm_cpu_has_extint(v))
>> +            return kvm_pic_read_irq(v->kvm); /* PIC */
>> +    return -1;
>> +}
>> +
>> +/*
>>   * Read pending interrupt vector and intack.
>>   */
>>  int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
>>  {
>> +    int vector;
>> +
>>      if (!irqchip_in_kernel(v->kvm))
>>              return v->arch.interrupt.nr;
>> -    if (kvm_apic_accept_pic_intr(v) && pic_irqchip(v->kvm)->output)
>> -            return kvm_pic_read_irq(v->kvm);        /* PIC */
>> +    vector = kvm_cpu_get_extint(v);
>> +
>> +    if (kvm_apic_vid_enabled(v->kvm) || vector != -1)
>> +            return vector;                  /* PIC */
>> 
>>      return kvm_get_apic_interrupt(v);       /* APIC */
>>  }
>> -EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
>> 
>>  void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
>>  {
>> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
>> index 83a9547..584d628 100644
>> --- a/arch/x86/kvm/lapic.c
>> +++ b/arch/x86/kvm/lapic.c
>> @@ -150,21 +150,52 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
>>      return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
>>  }
>> -static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
>> +void kvm_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
>> +                            struct kvm_lapic_irq *irq,
>> +                            u64 *eoi_exit_bitmap)
>>  {
>> -    u16 cid;
>> -    ldr >>= 32 - map->ldr_bits;
>> -    cid = (ldr >> map->cid_shift) & map->cid_mask;
>> +    struct kvm_lapic **dst;
>> +    struct kvm_apic_map *map;
>> +    unsigned long bitmap = 1;
>> +    int i;
>> 
>> -    BUG_ON(cid >= ARRAY_SIZE(map->logical_map));
>> +    rcu_read_lock();
>> +    map = rcu_dereference(vcpu->kvm->arch.apic_map);
>> 
>> -    return cid;
>> -}
>> +    if (unlikely(!map)) {
>> +            kvm_x86_ops->hwapic_vector_intercept_on_eoi(vcpu, irq->vector,
>> +                            eoi_exit_bitmap);
> You do not need callback to set a bit in eoi_exit_bitmap.
> 
>> +            goto out;
>> +    }
>> 
>> -static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
>> -{
>> -    ldr >>= (32 - map->ldr_bits);
>> -    return ldr & map->lid_mask;
>> +    if (irq->dest_mode == 0) { /* physical mode */
>> +            if (irq->delivery_mode == APIC_DM_LOWEST ||
>> +                            irq->dest_id == 0xff) {
>> +                    kvm_x86_ops->hwapic_vector_intercept_on_eoi(vcpu,
>> +                                            irq->vector, eoi_exit_bitmap);
>> +                    goto out;
>> +            }
>> +            dst = &map->phys_map[irq->dest_id & 0xff];
>> +    } else {
>> +            u32 mda = irq->dest_id << (32 - map->ldr_bits);
>> +
>> +            dst = map->logical_map[apic_cluster_id(map, mda)];
>> +
>> +            bitmap = apic_logical_id(map, mda);
>> +    }
>> +
>> +    for_each_set_bit(i, &bitmap, 16) {
>> +            if (!dst[i])
>> +                    continue;
>> +            if (dst[i]->vcpu == vcpu) {
>> +                    kvm_x86_ops->hwapic_vector_intercept_on_eoi(vcpu,
>> +                                            irq->vector, eoi_exit_bitmap);
>> +                    break;
>> +            }
>> +    }
>> +
>> +out:
>> +    rcu_read_unlock();
>>  }
>>  
>>  static void recalculate_apic_map(struct kvm *kvm)
>> @@ -230,6 +261,8 @@ out:
>> 
>>      if (old)
>>              kfree_rcu(old, rcu);
>> +
>> +    kvm_ioapic_make_eoibitmap_request(kvm);
>>  }
>>  
>>  static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id)
>> @@ -345,6 +378,9 @@ static inline int apic_find_highest_irr(struct kvm_lapic
> *apic)
>>  {
>>      int result;
>> +    /* Note that irr_pending is just a hint. It will be always
>> +     * true with virtual interrupt delivery enabled.
>> +     */
> Still not correct comment format. Should be:
> /*
>  * line one
>  * line two
>  */
>>      if (!apic->irr_pending)
>>              return -1;
>> @@ -461,6 +497,8 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
>>  static inline int apic_find_highest_isr(struct kvm_lapic *apic)
>>  {
>>      int result;
>> +
>> +    /* Note that isr_count is always 1 with vid enabled */
>>      if (!apic->isr_count)
>>              return -1;
>>      if (likely(apic->highest_isr_cache != -1))
>> @@ -740,6 +778,19 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1,
> struct kvm_vcpu *vcpu2)
>>      return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
>>  }
>> +static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
>> +{
>> +    if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
>> +        kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
>> +            int trigger_mode;
>> +            if (apic_test_vector(vector, apic->regs + APIC_TMR))
>> +                    trigger_mode = IOAPIC_LEVEL_TRIG;
>> +            else
>> +                    trigger_mode = IOAPIC_EDGE_TRIG;
>> +            kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
>> +    }
>> +}
>> +
>>  static int apic_set_eoi(struct kvm_lapic *apic) {   int vector =
>>  apic_find_highest_isr(apic); @@ -756,19 +807,26 @@ static int
>>  apic_set_eoi(struct kvm_lapic *apic)        apic_clear_isr(vector, apic);
>>      apic_update_ppr(apic);
>> -    if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
>> -        kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
>> -            int trigger_mode;
>> -            if (apic_test_vector(vector, apic->regs + APIC_TMR))
>> -                    trigger_mode = IOAPIC_LEVEL_TRIG;
>> -            else
>> -                    trigger_mode = IOAPIC_EDGE_TRIG;
>> -            kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
>> -    }
>> +    kvm_ioapic_send_eoi(apic, vector);
>>      kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
>>      return vector;
>>  }
>> +/*
>> + * this interface assumes a trap-like exit, which has already finished
>> + * desired side effect including vISR and vPPR update.
>> + */
>> +void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
>> +{
>> +    struct kvm_lapic *apic = vcpu->arch.apic;
>> +
>> +    trace_kvm_eoi(apic, vector);
>> +
>> +    kvm_ioapic_send_eoi(apic, vector);
>> +    kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
>> +}
>> +EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
>> +
>>  static void apic_send_ipi(struct kvm_lapic *apic) {         u32 icr_low =
>>  kvm_apic_get_reg(apic, APIC_ICR); @@ -1380,8 +1438,8 @@ void
>>  kvm_lapic_reset(struct kvm_vcpu *vcpu)              apic_set_reg(apic, 
>> APIC_ISR +
>>  0x10 * i, 0);               apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);     
>> }
>> -    apic->irr_pending = false;
>> -    apic->isr_count = 0;
>> +    apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm);
>> +    apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm);
>>      apic->highest_isr_cache = -1;
>>      update_divide_count(apic);
>>      atomic_set(&apic->lapic_timer.pending, 0);
>> @@ -1596,8 +1654,10 @@ void kvm_apic_post_state_restore(struct kvm_vcpu
> *vcpu,
>>      update_divide_count(apic);
>>      start_apic_timer(apic);
>>      apic->irr_pending = true;
>> -    apic->isr_count = count_vectors(apic->regs + APIC_ISR);
>> +    apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ?
>> +                            1 : count_vectors(apic->regs + APIC_ISR);
>>      apic->highest_isr_cache = -1;
>> +    kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
> apic_find_highest_isr(apic));
>>      kvm_make_request(KVM_REQ_EVENT, vcpu);
>>  }
>> diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
>> index 9a8ee22..abadbaa 100644
>> --- a/arch/x86/kvm/lapic.h
>> +++ b/arch/x86/kvm/lapic.h
>> @@ -65,6 +65,7 @@ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu
> *vcpu);
>>  void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
>>  
>>  void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
>> +void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
>> 
>>  void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
>>  void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
>> @@ -126,4 +127,30 @@ static inline int kvm_lapic_enabled(struct kvm_vcpu
> *vcpu)
>>      return kvm_apic_present(vcpu) &&
>>  kvm_apic_sw_enabled(vcpu->arch.apic); }
>> +static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
>> +{
>> +    return kvm_x86_ops->vcpu_has_apicv(kvm);
>> +}
>> +
>> +static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
>> +{
>> +    u16 cid;
>> +    ldr >>= 32 - map->ldr_bits;
>> +    cid = (ldr >> map->cid_shift) & map->cid_mask;
>> +
>> +    BUG_ON(cid >= ARRAY_SIZE(map->logical_map));
>> +
>> +    return cid;
>> +}
>> +
>> +static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
>> +{
>> +    ldr >>= (32 - map->ldr_bits);
>> +    return ldr & map->lid_mask;
>> +}
>> +
>> +void kvm_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
>> +                            struct kvm_lapic_irq *irq,
>> +                            u64 *eoi_bitmap);
>> +
>>  #endif
>> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
>> index 38407e9..0b4f38b 100644
>> --- a/arch/x86/kvm/svm.c
>> +++ b/arch/x86/kvm/svm.c
>> @@ -3576,6 +3576,32 @@ static void svm_set_virtual_x2apic_mode(struct
> kvm_vcpu *vcpu, bool set)
>>      return;
>>  }
>> +static int svm_vcpu_has_apicv(struct kvm *kvm)
>> +{
>> +    return 0;
>> +}
>> +
>> +static void svm_update_eoi_exitmap(struct kvm_vcpu *vcpu)
>> +{
>> +    return;
>> +}
>> +
>> +static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
>> +{
>> +    return;
>> +}
>> +
>> +static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
>> +{
>> +    return;
>> +}
>> +
>> +static void svm_hwapic_vector_intercept_on_eoi(struct kvm_vcpu *vcpu,
>> +                                    u32 vector, u64 *eoi_exit_bitmap)
>> +{
>> +    return;
>> +}
>> +
>>  static int svm_nmi_allowed(struct kvm_vcpu *vcpu) {         struct vcpu_svm
>>  *svm = to_svm(vcpu); @@ -4296,6 +4322,11 @@ static struct kvm_x86_ops
>>  svm_x86_ops = {     .enable_irq_window = enable_irq_window,
>>      .update_cr8_intercept = update_cr8_intercept,
>>      .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
>> +    .vcpu_has_apicv = svm_vcpu_has_apicv, + .update_eoi_exitmap =
>> svm_update_eoi_exitmap, +    .hwapic_irr_update = svm_hwapic_irr_update,
>> +    .hwapic_isr_update = svm_hwapic_isr_update,
>> +    .hwapic_vector_intercept_on_eoi = svm_hwapic_vector_intercept_on_eoi,
>> 
>>      .set_tss_addr = svm_set_tss_addr,
>>      .get_tdp_level = get_npt_level,
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index c2bc989..b04b749 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -84,8 +84,8 @@ module_param(vmm_exclusive, bool, S_IRUGO);
>>  static bool __read_mostly fasteoi = 1;
>>  module_param(fasteoi, bool, S_IRUGO);
>> -static bool __read_mostly enable_apicv_reg = 1;
>> -module_param(enable_apicv_reg, bool, S_IRUGO);
>> +static bool __read_mostly enable_apicv_reg_vid = 1;
>> +module_param(enable_apicv_reg_vid, bool, S_IRUGO);
>> 
>>  /*
>>   * If nested=1, nested virtualization is supported, i.e., guests may use
>> @@ -781,6 +781,12 @@ static inline bool
> cpu_has_vmx_apic_register_virt(void)
>>              SECONDARY_EXEC_APIC_REGISTER_VIRT;
>>  }
>> +static inline bool cpu_has_vmx_virtual_intr_delivery(void)
>> +{
>> +    return vmcs_config.cpu_based_2nd_exec_ctrl &
>> +            SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
>> +}
>> +
>>  static inline bool cpu_has_vmx_flexpriority(void)
>>  {
>>      return cpu_has_vmx_tpr_shadow() &&
>> @@ -2570,7 +2576,8 @@ static __init int setup_vmcs_config(struct
> vmcs_config *vmcs_conf)
>>                      SECONDARY_EXEC_PAUSE_LOOP_EXITING |
>>                      SECONDARY_EXEC_RDTSCP |
>>                      SECONDARY_EXEC_ENABLE_INVPCID |
>> -                    SECONDARY_EXEC_APIC_REGISTER_VIRT;
>> +                    SECONDARY_EXEC_APIC_REGISTER_VIRT |
>> +                    SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
>>              if (adjust_vmx_controls(min2, opt2,
>>                                      MSR_IA32_VMX_PROCBASED_CTLS2,
>>                                      &_cpu_based_2nd_exec_control) < 0)
>> @@ -2585,7 +2592,8 @@ static __init int setup_vmcs_config(struct
> vmcs_config *vmcs_conf)
>>      if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
>>              _cpu_based_2nd_exec_control &= ~(
>>                              SECONDARY_EXEC_APIC_REGISTER_VIRT |
>> -                            SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
>> +                            SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
>> +                            SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
>> 
>>      if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {          
>> /*
>>  CR3 accesses and invlpg don't need to cause VM Exits when EPT @@
>>  -2784,8 +2792,9 @@ static __init int hardware_setup(void)   if
>>  (!cpu_has_vmx_ple())                ple_gap = 0;
>> -    if (!cpu_has_vmx_apic_register_virt())
>> -            enable_apicv_reg = 0;
>> +    if (!cpu_has_vmx_apic_register_virt() ||
>> +                            !cpu_has_vmx_virtual_intr_delivery())
>> +            enable_apicv_reg_vid = 0;
>> 
>>      if (nested)             nested_vmx_setup_ctls_msrs(); @@ -3926,6 
>> +3935,11 @@
>>  static u32 vmx_exec_control(struct vcpu_vmx *vmx)   return
>>  exec_control; }
>> +static int vmx_vcpu_has_apicv(struct kvm *kvm)
> Name is misleading. It is not about vcpu, it's about vm. Change name of
> the callback too.
> 
>> +{
>> +    return enable_apicv_reg_vid && irqchip_in_kernel(kvm);
>> +}
>> +
>>  static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
>>  {
>>      u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
>> @@ -3943,7 +3957,7 @@ static u32 vmx_secondary_exec_control(struct
> vcpu_vmx *vmx)
>>              exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
>>      if (!ple_gap)
>>              exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
>> -    if (!enable_apicv_reg || !irqchip_in_kernel(vmx->vcpu.kvm))
>> +    if (!vmx_vcpu_has_apicv(vmx->vcpu.kvm))
>>              exec_control &= ~SECONDARY_EXEC_APIC_REGISTER_VIRT;     
>> exec_control &=
>>  ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;     return exec_control; @@
>>  -3990,6 +4004,15 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
>>                              vmx_secondary_exec_control(vmx));       }
>> +    if (enable_apicv_reg_vid) {
>> +            vmcs_write64(EOI_EXIT_BITMAP0, 0);
>> +            vmcs_write64(EOI_EXIT_BITMAP1, 0);
>> +            vmcs_write64(EOI_EXIT_BITMAP2, 0);
>> +            vmcs_write64(EOI_EXIT_BITMAP3, 0);
>> +
>> +            vmcs_write16(GUEST_INTR_STATUS, 0);
>> +    }
>> +
>>      if (ple_gap) {
>>              vmcs_write32(PLE_GAP, ple_gap);
>>              vmcs_write32(PLE_WINDOW, ple_window);
>> @@ -4907,6 +4930,16 @@ static int handle_apic_access(struct kvm_vcpu
> *vcpu)
>>      return emulate_instruction(vcpu, 0) == EMULATE_DONE;
>>  }
>> +static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
>> +{
>> +    unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
>> +    int vector = exit_qualification & 0xff;
>> +
>> +    /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
>> +    kvm_apic_set_eoi_accelerated(vcpu, vector);
>> +    return 1;
>> +}
>> +
>>  static int handle_apic_write(struct kvm_vcpu *vcpu)
>>  {
>>      unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
>> @@ -5852,6 +5885,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct
> kvm_vcpu *vcpu) = {
>>      [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
>>      [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
>>      [EXIT_REASON_APIC_WRITE]              = handle_apic_write,
>>  +   [EXIT_REASON_EOI_INDUCED]             = handle_apic_eoi_induced,
>>      [EXIT_REASON_WBINVD]                  = handle_wbinvd,
>>      [EXIT_REASON_XSETBV]                  = handle_xsetbv,
>>      [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
>> @@ -6193,6 +6227,9 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
>> 
>>  static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
>>  {
>> +    if (vmx_vcpu_has_apicv(vcpu->kvm))
>> +            return;
>> +
> What is it here? Where is this part of the v11 submissions went?
> 
>  +     if (enable_apicv_reg_vid)
>  +             kvm_x86_ops->update_cr8_intercept = NULL;
>  +     else
>  +             kvm_x86_ops->update_apic_irq = NULL;
> Revert to v11 behaviour.
> 
>>      if (irr == -1 || tpr < irr) {
>>              vmcs_write32(TPR_THRESHOLD, 0);
>>              return;
>> @@ -6209,7 +6246,8 @@ static void vmx_set_virtual_x2apic_mode(struct
> kvm_vcpu *vcpu, bool set)
>>      /* There is not point to enable virtualize x2apic without enable
>>       * apicv
>>       */
>> -    if (!cpu_has_vmx_virtualize_x2apic_mode() || !enable_apicv_reg)
>> +    if (!cpu_has_vmx_virtualize_x2apic_mode() ||
>> +                            !vmx_vcpu_has_apicv(vcpu->kvm))
>>              return;
>>  
>>      if (set) {
>> @@ -6235,6 +6273,74 @@ static void vmx_set_virtual_x2apic_mode(struct
> kvm_vcpu *vcpu, bool set)
>>      vmx_set_msr_bitmap(vcpu);
>>  }
>> +static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
>> +{
>> +    u16 status;
>> +    u8 old;
>> +
>> +    if (!vmx_vcpu_has_apicv(kvm))
>> +            return;
>> +
>> +    if (isr == -1)
>> +            isr = 0;
>> +
>> +    status = vmcs_read16(GUEST_INTR_STATUS);
>> +    old = status >> 8;
>> +    if (isr != old) {
>> +            status &= 0xff;
>> +            status |= isr << 8;
>> +            vmcs_write16(GUEST_INTR_STATUS, status);
>> +    }
>> +}
>> +
>> +static void vmx_set_rvi(int vector)
>> +{
>> +    u16 status;
>> +    u8 old;
>> +
>> +    status = vmcs_read16(GUEST_INTR_STATUS);
>> +    old = (u8)status & 0xff;
>> +    if ((u8)vector != old) {
>> +            status &= ~0xff;
>> +            status |= (u8)vector;
>> +            vmcs_write16(GUEST_INTR_STATUS, status);
>> +    }
>> +}
>> +
>> +static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
>> +{
>> +    if (!vmx_vcpu_has_apicv(vcpu->kvm) || max_irr == -1)
>> +            return;
>> +
>> +    vmx_set_rvi(max_irr);
>> +}
>> +
>> +static void vmx_hwapic_vector_intercept_on_eoi(struct kvm_vcpu *vcpu,
>> +                                    u32 vector, u64 *eoi_exit_bitmap)
>> +{
>> +    BUG_ON(vector > 255);
>> +    __set_bit(vector, (unsigned long *)eoi_exit_bitmap);
> Nothing that warrants this to be vmx callback is here.
> 
>> +} + +static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64
>> *eoi_exit_bitmap) +{ +       vmcs_write64(EOI_EXIT_BITMAP0,
>> eoi_exit_bitmap[0]); +       vmcs_write64(EOI_EXIT_BITMAP1,
>> eoi_exit_bitmap[1]); +       vmcs_write64(EOI_EXIT_BITMAP2,
>> eoi_exit_bitmap[2]); +       vmcs_write64(EOI_EXIT_BITMAP3,
>> eoi_exit_bitmap[3]); +} + +static void vmx_update_eoi_exitmap(struct
>> kvm_vcpu *vcpu) +{ + u64 eoi_exit_bitmap[4]; + +     /* clear eoi exit
>> bitmap */ +  memset(eoi_exit_bitmap, 0, 32); +
>> +    kvm_ioapic_calculate_eoi_exitmap(vcpu, eoi_exit_bitmap);
>> +    vmx_load_eoi_exitmap(vcpu, eoi_exit_bitmap); +}
> Same as above. Why is this vmx callback?
> 
>> +
>>  static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) {        u32
>>  exit_intr_info; @@ -7499,6 +7605,11 @@ static struct kvm_x86_ops
>>  vmx_x86_ops = {     .enable_irq_window = enable_irq_window,
>>      .update_cr8_intercept = update_cr8_intercept,
>>      .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
>> +    .vcpu_has_apicv = vmx_vcpu_has_apicv, + .update_eoi_exitmap =
>> vmx_update_eoi_exitmap, +    .hwapic_vector_intercept_on_eoi =
>> vmx_hwapic_vector_intercept_on_eoi, +        .hwapic_irr_update =
>> vmx_hwapic_irr_update, +     .hwapic_isr_update = vmx_hwapic_isr_update,
>> 
>>      .set_tss_addr = vmx_set_tss_addr,       .get_tdp_level = get_ept_level, 
>> @@
>>  -7601,7 +7712,7 @@ static int __init vmx_init(void)
>>      memcpy(vmx_msr_bitmap_longmode_x2apic,                  
>> vmx_msr_bitmap_longmode,
>>  PAGE_SIZE);
>> -    if (enable_apicv_reg) {
>> +    if (enable_apicv_reg_vid) {
>>              for (msr = 0x800; msr <= 0x8ff; msr++)
>>                      vmx_disable_intercept_msr_read_x2apic(msr);
>> @@ -7613,6 +7724,10 @@ static int __init vmx_init(void)
>>              vmx_enable_intercept_msr_read_x2apic(0x839);
>>              /* TPR */
>>              vmx_disable_intercept_msr_write_x2apic(0x808);
>> +            /* EOI */
>> +            vmx_disable_intercept_msr_write_x2apic(0x80b);
>> +            /* SELF-IPI */
>> +            vmx_disable_intercept_msr_write_x2apic(0x83f);
>>      }
>>  
>>      if (enable_ept) {
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index 5483228..9c77343 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -5525,7 +5525,7 @@ static void inject_pending_event(struct kvm_vcpu
> *vcpu)
>>                      vcpu->arch.nmi_injected = true;
>>                      kvm_x86_ops->set_nmi(vcpu);
>>              }
>> -    } else if (kvm_cpu_has_interrupt(vcpu)) {
>> +    } else if (kvm_cpu_has_injectable_intr(vcpu)) {
>>              if (kvm_x86_ops->interrupt_allowed(vcpu)) {
>>                      kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),  
>>                                    
>>  false); @@ -5646,6 +5646,8 @@ static int vcpu_enter_guest(struct
>>  kvm_vcpu *vcpu)                     kvm_handle_pmu_event(vcpu);             
>> if
>>  (kvm_check_request(KVM_REQ_PMI, vcpu))                      
>> kvm_deliver_pmi(vcpu);
>> +            if (kvm_check_request(KVM_REQ_EOIBITMAP, vcpu))
>> +                    kvm_x86_ops->update_eoi_exitmap(vcpu);
>>      }
>>  
>>      if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
>> @@ -5654,10 +5656,15 @@ static int vcpu_enter_guest(struct kvm_vcpu
> *vcpu)
>>              /* enable NMI/IRQ window open exits if needed */
>>              if (vcpu->arch.nmi_pending)
>>                      kvm_x86_ops->enable_nmi_window(vcpu);
>> -            else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
>> +            else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
>>                      kvm_x86_ops->enable_irq_window(vcpu);
>>  
>>              if (kvm_lapic_enabled(vcpu)) {
>> +                    /* Update architecture specific hints for APIC
>> +                     * virtual interrupt delivery.
>> +                     */
>> +                    kvm_x86_ops->hwapic_irr_update(vcpu,
>> +                                    kvm_lapic_find_highest_irr(vcpu));
>>                      update_cr8_intercept(vcpu);
>>                      kvm_lapic_sync_to_vapic(vcpu);
>>              }
>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index 4dd7d75..0350e0d 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -123,6 +123,7 @@ static inline bool is_error_page(struct page *page)
>>  #define KVM_REQ_MASTERCLOCK_UPDATE 19
>>  #define KVM_REQ_MCLOCK_INPROGRESS 20
>>  #define KVM_REQ_EPR_EXIT          21
>> +#define KVM_REQ_EOIBITMAP         22
>> 
>>  #define KVM_USERSPACE_IRQ_SOURCE_ID         0 #define
>>  KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID    1 @@ -538,6 +539,7 @@ void
>>  kvm_put_guest_fpu(struct kvm_vcpu *vcpu); void
>>  kvm_flush_remote_tlbs(struct kvm *kvm); void
>>  kvm_reload_remote_mmus(struct kvm *kvm); void
>>  kvm_make_mclock_inprogress_request(struct kvm *kvm);
>> +void kvm_make_update_eoibitmap_request(struct kvm *kvm);
>> 
>>  long kvm_arch_dev_ioctl(struct file *filp,
>>                      unsigned int ioctl, unsigned long arg);
>> @@ -691,6 +693,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32
> irq, int level);
>>  int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq,
>>  int level); int kvm_set_msi(struct kvm_kernel_irq_routing_entry
>>  *irq_entry, struct kvm *kvm,                int irq_source_id, int level); 
>> +bool
>>  kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
>>  void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned
>>  pin); void kvm_register_irq_ack_notifier(struct kvm *kvm,                   
>>           
>>  struct kvm_irq_ack_notifier *kian);
>> diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
>> index f3abbef..ce82b94 100644
>> --- a/virt/kvm/ioapic.c
>> +++ b/virt/kvm/ioapic.c
>> @@ -35,6 +35,7 @@
>>  #include <linux/hrtimer.h> #include <linux/io.h> #include
>>  <linux/slab.h> +#include <linux/export.h> #include <asm/processor.h>
>>  #include <asm/page.h> #include <asm/current.h>
>> @@ -115,6 +116,42 @@ static void update_handled_vectors(struct kvm_ioapic
> *ioapic)
>>      smp_wmb();
>>  }
>> +void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
>> +                                    u64 *eoi_exit_bitmap)
>> +{
>> +    struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
>> +    union kvm_ioapic_redirect_entry *e;
>> +    struct kvm_lapic_irq irqe;
>> +    int index;
>> +
>> +    spin_lock(&ioapic->lock);
>> +    /* traverse ioapic entry to set eoi exit bitmap*/
>> +    for (index = 0; index < IOAPIC_NUM_PINS; index++) {
>> +            e = &ioapic->redirtbl[index];
>> +            if (!e->fields.mask &&
>> +                    (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
>> +                     kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC,
>> +                             index))) {
>> +                    irqe.dest_id = e->fields.dest_id;
>> +                    irqe.vector = e->fields.vector;
>> +                    irqe.dest_mode = e->fields.dest_mode;
>> +                    irqe.delivery_mode = e->fields.delivery_mode << 8;
>> +                    kvm_calculate_eoi_exitmap(vcpu, &irqe, eoi_exit_bitmap);
>> +            }
>> +    }
>> +    spin_unlock(&ioapic->lock);
>> +}
>> +EXPORT_SYMBOL_GPL(kvm_ioapic_calculate_eoi_exitmap);
>> +
>> +void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm)
>> +{
>> +    struct kvm_ioapic *ioapic = kvm->arch.vioapic;
>> +
>> +    if (!kvm_apic_vid_enabled(kvm) || !ioapic)
>> +            return;
>> +    kvm_make_update_eoibitmap_request(kvm);
>> +}
>> +
>>  static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
>>  {
>>      unsigned index;
>> @@ -156,6 +193,7 @@ static void ioapic_write_indirect(struct kvm_ioapic
> *ioapic, u32 val)
>>              if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG                && 
>> ioapic->irr &
>>  (1 << index))                       ioapic_service(ioapic, index);
>>  +           kvm_ioapic_make_eoibitmap_request(ioapic->kvm);                 
>> break;  } }
>> @@ -455,6 +493,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct
> kvm_ioapic_state *state)
>>      spin_lock(&ioapic->lock);       memcpy(ioapic, state, sizeof(struct
>>  kvm_ioapic_state));         update_handled_vectors(ioapic);
>>  +   kvm_ioapic_make_eoibitmap_request(kvm);         
>> spin_unlock(&ioapic->lock);
>>      return 0; }
>> diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
>> index a30abfe..0400a46 100644
>> --- a/virt/kvm/ioapic.h
>> +++ b/virt/kvm/ioapic.h
>> @@ -82,5 +82,9 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct
> kvm_lapic *src,
>>              struct kvm_lapic_irq *irq);
>>  int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
>>  int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
>> +void kvm_ioapic_make_eoibitmap_request(struct kvm *kvm);
>> +void kvm_ioapic_calculate_eoi_exitmap(struct kvm_vcpu *vcpu,
>> +                                    u64 *eoi_exit_bitmap);
>> +
>> 
>>  #endif
>> diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
>> index 656fa45..ff6d40e 100644
>> --- a/virt/kvm/irq_comm.c
>> +++ b/virt/kvm/irq_comm.c
>> @@ -22,6 +22,7 @@
>> 
>>  #include <linux/kvm_host.h> #include <linux/slab.h> +#include
>>  <linux/export.h> #include <trace/events/kvm.h>
>>  
>>  #include <asm/msidef.h>
>> @@ -237,6 +238,28 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int
> irq_source_id, u32 irq, int level)
>>      return ret;
>>  }
>> +bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
>> +{
>> +    struct kvm_irq_ack_notifier *kian;
>> +    struct hlist_node *n;
>> +    int gsi;
>> +
>> +    rcu_read_lock();
>> +    gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
>> +    if (gsi != -1)
>> +            hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list,
>> +                                     link)
>> +                    if (kian->gsi == gsi) {
>> +                            rcu_read_unlock();
>> +                            return true;
>> +                    }
>> +
>> +    rcu_read_unlock();
>> +
>> +    return false;
>> +}
>> +EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
>> +
>>  void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned
>>  pin) {      struct kvm_irq_ack_notifier *kian; @@ -261,6 +284,7 @@ void
>>  kvm_register_irq_ack_notifier(struct kvm *kvm,
>>      mutex_lock(&kvm->irq_lock);     hlist_add_head_rcu(&kian->link,
>>  &kvm->irq_ack_notifier_list);       mutex_unlock(&kvm->irq_lock);
>>  +   kvm_ioapic_make_eoibitmap_request(kvm); }
>>  
>>  void kvm_unregister_irq_ack_notifier(struct kvm *kvm, @@ -270,6 +294,7
>>  @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
>>      hlist_del_init_rcu(&kian->link);        mutex_unlock(&kvm->irq_lock);
>>      synchronize_rcu(); +    kvm_ioapic_make_eoibitmap_request(kvm); }
>>  
>>  int kvm_request_irq_source_id(struct kvm *kvm)
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index 5e709eb..7aabc24 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -217,6 +217,11 @@ void kvm_make_mclock_inprogress_request(struct
> kvm *kvm)
>>      make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
>>  }
>> +void kvm_make_update_eoibitmap_request(struct kvm *kvm)
>> +{
>> +    make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP);
>> +}
>> +
>>  int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
>>  {
>>      struct page *page;
>> --
>> 1.7.1
> 
> --
>                       Gleb.


Best regards,
Yang


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to