Batch pte updates and tlb flushes in lazy MMU mode.
v1->v2:
- report individual hypercall error code, have multicall return number of
processed entries.
- cover entire multicall duration with slots_lock instead of
acquiring/reacquiring.
v2->v3:
- change to one ioctl per paravirt feature
Signed-off-by: Marcelo Tosatti <[EMAIL PROTECTED]>
Index: kvm.paravirt3/arch/x86/kernel/kvm.c
===================================================================
--- kvm.paravirt3.orig/arch/x86/kernel/kvm.c
+++ kvm.paravirt3/arch/x86/kernel/kvm.c
@@ -25,6 +25,77 @@
#include <linux/kvm_para.h>
#include <linux/cpu.h>
#include <linux/mm.h>
+#include <linux/hardirq.h>
+
+#define MAX_MULTICALL_NR (PAGE_SIZE / sizeof(struct kvm_multicall_entry))
+
+struct kvm_para_state {
+ struct kvm_multicall_entry queue[MAX_MULTICALL_NR];
+ int queue_index;
+ enum paravirt_lazy_mode mode;
+};
+
+static DEFINE_PER_CPU(struct kvm_para_state, para_state);
+
+static int can_defer_hypercall(struct kvm_para_state *state, unsigned int nr)
+{
+ if (state->mode == PARAVIRT_LAZY_MMU) {
+ switch (nr) {
+ case KVM_HYPERCALL_MMU_WRITE:
+ case KVM_HYPERCALL_FLUSH_TLB:
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void hypercall_queue_flush(struct kvm_para_state *state)
+{
+ long ret;
+
+ if (state->queue_index) {
+ ret = kvm_hypercall2(KVM_HYPERCALL_MULTICALL,
+ __pa(&state->queue), state->queue_index);
+ WARN_ON (ret != state->queue_index);
+ state->queue_index = 0;
+ }
+}
+
+static void kvm_hypercall_defer(struct kvm_para_state *state,
+ unsigned int nr,
+ unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3)
+{
+ struct kvm_multicall_entry *entry;
+
+ BUG_ON(preemptible());
+
+ if (state->queue_index == MAX_MULTICALL_NR)
+ hypercall_queue_flush(state);
+
+ entry = &state->queue[state->queue_index++];
+ entry->nr = nr;
+ entry->a0 = a0;
+ entry->a1 = a1;
+ entry->a2 = a2;
+ entry->a3 = a3;
+}
+
+static long kvm_hypercall(unsigned int nr, unsigned long a0,
+ unsigned long a1, unsigned long a2,
+ unsigned long a3)
+{
+ struct kvm_para_state *state = &get_cpu_var(para_state);
+ long ret = 0;
+
+ if (can_defer_hypercall(state, nr))
+ kvm_hypercall_defer(state, nr, a0, a1, a2, a3);
+ else
+ ret = kvm_hypercall4(nr, a0, a1, a2, a3);
+
+ put_cpu_var(para_state);
+ return ret;
+}
/*
* No need for any "IO delay" on KVM
@@ -44,8 +115,8 @@ static void kvm_mmu_write(void *dest, co
if (size == 2)
a1 = *(u32 *)&p[4];
#endif
- kvm_hypercall3(KVM_HYPERCALL_MMU_WRITE, (unsigned long)__pa(dest), a0,
- a1);
+ kvm_hypercall(KVM_HYPERCALL_MMU_WRITE, (unsigned long)__pa(dest), a0,
+ a1, 0);
}
/*
@@ -110,12 +181,31 @@ static void kvm_set_pud(pud_t *pudp, pud
static void kvm_flush_tlb(void)
{
- kvm_hypercall0(KVM_HYPERCALL_FLUSH_TLB);
+ kvm_hypercall(KVM_HYPERCALL_FLUSH_TLB, 0, 0, 0, 0);
}
static void kvm_release_pt(u32 pfn)
{
- kvm_hypercall1(KVM_HYPERCALL_RELEASE_PT, pfn << PAGE_SHIFT);
+ kvm_hypercall(KVM_HYPERCALL_RELEASE_PT, pfn << PAGE_SHIFT, 0, 0, 0);
+}
+
+static void kvm_enter_lazy_mmu(void)
+{
+ struct kvm_para_state *state
+ = &per_cpu(para_state, smp_processor_id());
+
+ paravirt_enter_lazy_mmu();
+ state->mode = paravirt_get_lazy_mode();
+}
+
+static void kvm_leave_lazy_mmu(void)
+{
+ struct kvm_para_state *state
+ = &per_cpu(para_state, smp_processor_id());
+
+ hypercall_queue_flush(state);
+ paravirt_leave_lazy(paravirt_get_lazy_mode());
+ state->mode = paravirt_get_lazy_mode();
}
static void paravirt_ops_setup(void)
@@ -144,6 +234,11 @@ static void paravirt_ops_setup(void)
pv_mmu_ops.release_pt = kvm_release_pt;
pv_mmu_ops.release_pd = kvm_release_pt;
}
+
+ if (kvm_para_has_feature(KVM_FEATURE_MULTICALL)) {
+ pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
+ pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
+ }
}
void __init kvm_guest_init(void)
Index: kvm.paravirt3/arch/x86/kvm/x86.c
===================================================================
--- kvm.paravirt3.orig/arch/x86/kvm/x86.c
+++ kvm.paravirt3/arch/x86/kvm/x86.c
@@ -73,6 +73,7 @@ struct kvm_stats_debugfs_item debugfs_en
{ "halt_exits", VCPU_STAT(halt_exits) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "hypercalls", VCPU_STAT(hypercalls) },
+ { "multicalls", VCPU_STAT(multicalls), },
{ "request_irq", VCPU_STAT(request_irq_exits) },
{ "irq_exits", VCPU_STAT(irq_exits) },
{ "host_state_reload", VCPU_STAT(host_state_reload) },
@@ -798,6 +799,7 @@ int kvm_dev_ioctl_check_extension(long e
case KVM_CAP_EXT_CPUID:
case KVM_CAP_CLOCKSOURCE:
case KVM_CAP_NOP_IO_DELAY:
+ case KVM_CAP_MULTICALL:
r = 1;
break;
case KVM_CAP_VAPIC:
@@ -1766,7 +1768,8 @@ mmio:
return X86EMUL_UNHANDLEABLE;
}
-int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
+
+int __emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes)
{
int ret;
@@ -1782,6 +1785,17 @@ int emulator_write_phys(struct kvm_vcpu
return 1;
}
+static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const void *val, int bytes)
+{
+ int ret;
+
+ down_read(&vcpu->kvm->slots_lock);
+ ret =__emulator_write_phys(vcpu, gpa, val, bytes);
+ up_read(&vcpu->kvm->slots_lock);
+ return ret;
+}
+
static int emulator_write_emulated_onepage(unsigned long addr,
const void *val,
unsigned int bytes,
@@ -2371,6 +2385,63 @@ int kvm_emulate_halt(struct kvm_vcpu *vc
}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
+static int dispatch_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
+ unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3)
+{
+ ++vcpu->stat.hypercalls;
+
+ switch (nr) {
+ case KVM_HC_VAPIC_POLL_IRQ:
+ return 0;
+ case KVM_HYPERCALL_MMU_WRITE:
+ return kvm_hypercall_mmu_write(vcpu, a0, a1, a2);
+ case KVM_HYPERCALL_FLUSH_TLB:
+ return kvm_hypercall_flush_tlb(vcpu);
+ case KVM_HYPERCALL_RELEASE_PT:
+ return kvm_hypercall_release_pt(vcpu, a0);
+ }
+
+ return -KVM_ENOSYS;
+}
+
+static int kvm_hypercall_multicall(struct kvm_vcpu *vcpu, gpa_t addr, u32
nents)
+{
+ int i, nr_processed = 0;
+
+ ++vcpu->stat.multicalls;
+
+ down_read(&vcpu->kvm->slots_lock);
+ for (i = 0; i < nents; i++) {
+ struct kvm_multicall_entry mc;
+ int ret;
+
+ ret = kvm_read_guest(vcpu->kvm, addr, &mc, sizeof(mc));
+ if (ret) {
+ up_read(&vcpu->kvm->slots_lock);
+ return nr_processed;
+ }
+
+ mc.error_code = dispatch_hypercall(vcpu, mc.nr, mc.a0, mc.a1,
+ mc.a2, mc.a3);
+ if (mc.error_code) {
+ ret = kvm_write_guest(vcpu->kvm, addr, &mc,
+ sizeof(mc));
+ if (ret) {
+ up_read(&vcpu->kvm->slots_lock);
+ return nr_processed;
+ }
+ } else
+ nr_processed++;
+
+ addr += sizeof(mc);
+ }
+ up_read(&vcpu->kvm->slots_lock);
+
+ return nr_processed;
+}
+
+
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
unsigned long nr, a0, a1, a2, a3, ret;
@@ -2391,26 +2462,13 @@ int kvm_emulate_hypercall(struct kvm_vcp
a3 &= 0xFFFFFFFF;
}
- switch (nr) {
- case KVM_HC_VAPIC_POLL_IRQ:
- ret = 0;
- break;
- case KVM_HYPERCALL_MMU_WRITE:
- ret = kvm_hypercall_mmu_write(vcpu, a0, a1, a2);
- break;
- case KVM_HYPERCALL_FLUSH_TLB:
- ret = kvm_hypercall_flush_tlb(vcpu);
- break;
- case KVM_HYPERCALL_RELEASE_PT:
- ret = kvm_hypercall_release_pt(vcpu, a0);
- break;
- default:
- ret = -KVM_ENOSYS;
- break;
- }
+ if (nr == KVM_HYPERCALL_MULTICALL)
+ ret = kvm_hypercall_multicall(vcpu, a0, a1);
+ else
+ ret = dispatch_hypercall(vcpu, nr, a0, a1, a2, a3);
+
vcpu->arch.regs[VCPU_REGS_RAX] = ret;
kvm_x86_ops->decache_regs(vcpu);
- ++vcpu->stat.hypercalls;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
Index: kvm.paravirt3/include/asm-x86/kvm_host.h
===================================================================
--- kvm.paravirt3.orig/include/asm-x86/kvm_host.h
+++ kvm.paravirt3/include/asm-x86/kvm_host.h
@@ -329,6 +329,7 @@ struct kvm_vcpu_stat {
u32 insn_emulation;
u32 insn_emulation_fail;
u32 hypercalls;
+ u32 multicalls;
};
struct descriptor_table {
@@ -426,7 +427,7 @@ int kvm_hypercall_mmu_write(struct kvm_v
int kvm_hypercall_flush_tlb(struct kvm_vcpu *vcpu);
int kvm_hypercall_release_pt(struct kvm_vcpu *vcpu, gpa_t addr);
-int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
+int __emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes);
extern bool tdp_enabled;
Index: kvm.paravirt3/include/linux/kvm.h
===================================================================
--- kvm.paravirt3.orig/include/linux/kvm.h
+++ kvm.paravirt3/include/linux/kvm.h
@@ -238,6 +238,7 @@ struct kvm_vapic_addr {
#define KVM_CAP_NR_MEMSLOTS 10 /* returns max memory slots per vm */
#define KVM_CAP_NOP_IO_DELAY 11
#define KVM_CAP_MMU_WRITE 12
+#define KVM_CAP_MULTICALL 13
/*
* ioctls for VM fds
Index: kvm.paravirt3/include/linux/kvm_para.h
===================================================================
--- kvm.paravirt3.orig/include/linux/kvm_para.h
+++ kvm.paravirt3/include/linux/kvm_para.h
@@ -13,11 +13,13 @@
#define KVM_ENOSYS 1000
#define KVM_EFAULT EFAULT
#define KVM_E2BIG E2BIG
+#define KVM_EINVAL EINVAL
#define KVM_HC_VAPIC_POLL_IRQ 1
#define KVM_HYPERCALL_MMU_WRITE 2
#define KVM_HYPERCALL_FLUSH_TLB 3
#define KVM_HYPERCALL_RELEASE_PT 4
+#define KVM_HYPERCALL_MULTICALL 5
/*
* hypercalls use architecture specific
Index: kvm.paravirt3/include/asm-x86/kvm_para.h
===================================================================
--- kvm.paravirt3.orig/include/asm-x86/kvm_para.h
+++ kvm.paravirt3/include/asm-x86/kvm_para.h
@@ -13,6 +13,7 @@
#define KVM_FEATURE_CLOCKSOURCE 0
#define KVM_FEATURE_NOP_IO_DELAY 1
#define KVM_FEATURE_MMU_WRITE 2
+#define KVM_FEATURE_MULTICALL 3
#define MSR_KVM_WALL_CLOCK 0x11
#define MSR_KVM_SYSTEM_TIME 0x12
@@ -37,6 +38,14 @@ struct kvm_wall_clock {
uint32_t wc_nsec;
} __attribute__((__packed__));
+struct kvm_multicall_entry {
+ u32 nr;
+ u32 error_code;
+ u64 a0;
+ u64 a1;
+ u64 a2;
+ u64 a3;
+};
extern void kvmclock_init(void);
Index: kvm.paravirt3/arch/x86/kvm/mmu.c
===================================================================
--- kvm.paravirt3.orig/arch/x86/kvm/mmu.c
+++ kvm.paravirt3/arch/x86/kvm/mmu.c
@@ -1846,7 +1846,7 @@ int kvm_hypercall_mmu_write(struct kvm_v
bytes = 4;
}
- if (!emulator_write_phys(vcpu, addr, &value, bytes))
+ if (!__emulator_write_phys(vcpu, addr, &value, bytes))
return -KVM_EFAULT;
return 0;
--
-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2008.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/kvm-devel