Re: [PATCH v7 2/2] KVM: LAPIC: Inject timer interrupt via posted interrupt

2019-07-17 Thread Paolo Bonzini
On 06/07/19 03:26, Wanpeng Li wrote:
> +
> +void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
> +{
> + if (!lapic_timer_int_injected(vcpu))
> + return;
> +
> + __kvm_wait_lapic_expire(vcpu);
> +}
>  EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);

I changed this to

if (lapic_timer_int_injected(vcpu))
__kvm_wait_lapic_expire(vcpu);

and queued the two patches.

Paolo


[PATCH v7 2/2] KVM: LAPIC: Inject timer interrupt via posted interrupt

2019-07-05 Thread Wanpeng Li
From: Wanpeng Li 

Dedicated instances are currently disturbed by unnecessary jitter due 
to the emulated lapic timers fire on the same pCPUs which vCPUs resident.
There is no hardware virtual timer on Intel for guest like ARM. Both 
programming timer in guest and the emulated timer fires incur vmexits.
This patch tries to avoid vmexit which is incurred by the emulated 
timer fires in dedicated instance scenario. 

When nohz_full is enabled in dedicated instances scenario, the emulated 
timers can be offload to the nearest busy housekeeping cpus since APICv 
is really common in recent years. The guest timer interrupt is injected 
by posted-interrupt which is delivered by housekeeping cpu once the emulated 
timer fires. 

The host admin should fine tuned, e.g. dedicated instances scenario w/ 
nohz_full cover the pCPUs which vCPUs resident, several pCPUs surplus 
for busy housekeeping, disable mwait/hlt/pause vmexits to keep in non-root  
mode, ~3% redis performance benefit can be observed on Skylake server.

w/o patch:

VM-EXIT  Samples  Samples%  Time%   Min Time  Max Time   Avg time

EXTERNAL_INTERRUPT4291649.43%   39.30%   0.47us   106.09us   0.71us ( 
+-   1.09% )

w/ patch:

VM-EXIT  Samples  Samples%  Time%   Min Time  Max Time Avg 
time

EXTERNAL_INTERRUPT6871 9.29% 2.96%   0.44us57.88us   0.72us ( 
+-   4.02% )

Cc: Paolo Bonzini 
Cc: Radim Krčmář 
Cc: Marcelo Tosatti 

Signed-off-by: Wanpeng Li 
---
 arch/x86/kvm/lapic.c| 101 ++--
 arch/x86/kvm/lapic.h|   1 +
 arch/x86/kvm/vmx/vmx.c  |   3 +-
 arch/x86/kvm/x86.c  |   6 +++
 arch/x86/kvm/x86.h  |   2 +
 include/linux/sched/isolation.h |   2 +
 kernel/sched/isolation.c|   6 +++
 7 files changed, 85 insertions(+), 36 deletions(-)

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 707ca9c..4869691 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -128,6 +128,17 @@ static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
return apic->vcpu->vcpu_id;
 }
 
+bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
+{
+   return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_can_post_timer_interrupt);
+
+static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
+{
+   return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == 
IN_GUEST_MODE;
+}
+
 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
switch (map->mode) {
@@ -1436,29 +1447,6 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
}
 }
 
-static void apic_timer_expired(struct kvm_lapic *apic)
-{
-   struct kvm_vcpu *vcpu = apic->vcpu;
-   struct swait_queue_head *q = >wq;
-   struct kvm_timer *ktimer = >lapic_timer;
-
-   if (atomic_read(>lapic_timer.pending))
-   return;
-
-   atomic_inc(>lapic_timer.pending);
-   kvm_set_pending_timer(vcpu);
-
-   /*
-* For x86, the atomic_inc() is serialized, thus
-* using swait_active() is safe.
-*/
-   if (swait_active(q))
-   swake_up_one(q);
-
-   if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
-   ktimer->expired_tscdeadline = ktimer->tscdeadline;
-}
-
 /*
  * On APICv, this test will cause a busy wait
  * during a higher-priority task.
@@ -1532,7 +1520,7 @@ static inline void adjust_lapic_timer_advance(struct 
kvm_vcpu *vcpu,
apic->lapic_timer.timer_advance_ns = timer_advance_ns;
 }
 
-void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
+static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
 {
struct kvm_lapic *apic = vcpu->arch.apic;
u64 guest_tsc, tsc_deadline;
@@ -1540,9 +1528,6 @@ void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
if (apic->lapic_timer.expired_tscdeadline == 0)
return;
 
-   if (!lapic_timer_int_injected(vcpu))
-   return;
-
tsc_deadline = apic->lapic_timer.expired_tscdeadline;
apic->lapic_timer.expired_tscdeadline = 0;
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
@@ -1554,8 +1539,59 @@ void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
if (unlikely(!apic->lapic_timer.timer_advance_adjust_done))
adjust_lapic_timer_advance(vcpu, 
apic->lapic_timer.advance_expire_delta);
 }
+
+void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
+{
+   if (!lapic_timer_int_injected(vcpu))
+   return;
+
+   __kvm_wait_lapic_expire(vcpu);
+}
 EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
 
+static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
+{
+   struct kvm_timer *ktimer = >lapic_timer;
+
+   kvm_apic_local_deliver(apic, APIC_LVTT);
+   if (apic_lvtt_tscdeadline(apic))
+   ktimer->tscdeadline = 0;
+   if (apic_lvtt_oneshot(apic)) {
+