From: Christian Ehrhardt <[EMAIL PROTECTED]>
Other existing kvm statistics are either just counters (kvm_stat) reported for
kvm generally or trace based aproaches like kvm_trace.
For kvm on powerpc we had the need to track the timings of the different exit
types. While this could be achieved parsing data created with a kvm_trace
extension this adds too muhc overhead (at least on embedded powerpc) slowing
down the workloads we wanted to measure.
Therefore this patch adds a in kernel exit timing statistic to the powerpc kvm
code. These statistic is available per vm&vcpu under the kvm debugfs directory.
As this statistic is low, but still some overhead it can be enabled via a
.config entry and should be off by default.
Since this patch touched all powerpc kvm_stat code anyway this code is now
merged and simpliefied together with the exit timing statistic code (still
working with exit timing disabled in .config).
Here is a sample output (after postprocessing with the awk script I'll post in
reply to this patch) how the results look like.
sum of time 27504898
MMIO: count 824 min 51 max 555 sum
75825 sum_quad 9232871 avg 92.0206 stddev 52.318 %
0.28
DCR: count 140 min 44 max 92 sum
6746 sum_quad 327658 avg 48.1857 stddev 4.307 %
0.02
SIGNAL: count 2 min 309 max 993 sum
1302 sum_quad 1081530 avg 651.0000 stddev 342.000 %
0.00
ITLBREAL: count 293 min 11 max 14 sum
3515 sum_quad 42175 avg 11.9966 stddev 0.155 %
0.01
ITLBVIRT: count 113822 min 20 max 338 sum
2595412 sum_quad 60256824 avg 22.8024 stddev 3.074
% 9.44
DTLBREAL: count 242 min 11 max 14 sum
2908 sum_quad 34974 avg 12.0165 stddev 0.352 %
0.01
DTLBVIRT: count 66687 min 21 max 329 sum
1530048 sum_quad 35434926 avg 22.9437 stddev 2.224
% 5.56
SYSCALL: count 72 min 9 max 10 sum
649 sum_quad 5851 avg 9.0139 stddev 0.117 %
0.00
ISI: count 56 min 9 max 10 sum
506 sum_quad 4574 avg 9.0357 stddev 0.186 %
0.00
DSI: count 49 min 9 max 10 sum
448 sum_quad 4102 avg 9.1429 stddev 0.350 %
0.00
EMULINST: count 211220 min 7 max 7700 sum
3292984 sum_quad 5797023806 avg 15.5903 stddev 164.931
% 11.97
DEC: count 6582 min 49 max 322 sum
367567 sum_quad 22996737 avg 55.8443 stddev 19.373 %
1.34
EXTINT: count 4 min 79 max 513 sum
797 sum_quad 290423 avg 199.2500 stddev 181.398 %
0.00
TIMEINGUEST: count 399993 min 0 max 3952 sum
19626191 sum_quad 61148587807 avg 49.0663 stddev 387.900
% 71.36
Signed-off-by: Christian Ehrhardt <[EMAIL PROTECTED]>
---
[diffstat]
arch/powerpc/include/asm/kvm_host.h | 49 ++++++++
arch/powerpc/include/asm/kvm_timing_stats.h | 168 ++++++++++++++++++++++++++++
arch/powerpc/include/asm/mmu-44x.h | 1
arch/powerpc/kernel/asm-offsets.c | 11 +
arch/powerpc/kvm/Kconfig | 9 +
arch/powerpc/kvm/booke.c | 35 +++--
arch/powerpc/kvm/booke.h | 7 +
arch/powerpc/kvm/booke_interrupts.S | 24 ++++
arch/powerpc/kvm/powerpc.c | 162 ++++++++++++++++++++++++++-
diff --git a/arch/powerpc/include/asm/kvm_host.h
b/arch/powerpc/include/asm/kvm_host.h
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -71,7 +71,50 @@
u32 word2;
};
+enum kvm_exit_types {
+ MMIO_EXITS,
+ DCR_EXITS,
+ SIGNAL_EXITS,
+ ITLB_REAL_MISS_EXITS,
+ ITLB_VIRT_MISS_EXITS,
+ DTLB_REAL_MISS_EXITS,
+ DTLB_VIRT_MISS_EXITS,
+ SYSCALL_EXITS,
+ ISI_EXITS,
+ DSI_EXITS,
+ EMULATED_INST_EXITS,
+ EMULATED_MTMSRWE_EXITS,
+ EMULATED_COREOP_EXITS,
+ EMULATED_COREMTSPR_EXITS,
+ EMULATED_COREMFSPR_EXITS,
+ EMULATED_COREMTMSR_EXITS,
+ EMULATED_TLBSX_EXITS,
+ EMULATED_TLBWE_EXITS,
+ EMULATED_RFI_EXITS,
+ DEC_EXITS,
+ EXT_INTR_EXITS,
+ HALT_WAKEUP,
+ USR_PR_INST,
+ FP_UNAVAIL,
+ DEBUG_EXITS,
+ TIMEINGUEST,
+ __NUMBER_OF_KVM_EXIT_TYPES
+};
+
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+/* allow access to big endian 32bit upper/lower parts and 64bit var */
+typedef union {
+ u64 tv64;
+ struct {
+ u32 tbu, tbl;
+ } tv32;
+} exit_timing_t;
+#endif
+
struct kvm_arch {
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+ unsigned int vm_id;
+#endif
};
struct kvm_vcpu_arch {
@@ -130,6 +173,19 @@
u32 dbcr0;
u32 dbcr1;
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+ exit_timing_t timing_exit;
+ exit_timing_t timing_last_enter;
+ u32 last_exit_type;
+ u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_last_exit;
+ struct dentry *debugfs_exit_timing;
+#endif
+
u32 last_inst;
ulong fault_dear;
ulong fault_esr;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h
b/arch/powerpc/include/asm/kvm_ppc.h
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -30,10 +30,18 @@
#include <linux/kvm_host.h>
enum emulation_result {
- EMULATE_DONE, /* no further processing */
- EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
- EMULATE_DO_DCR, /* kvm_run filled with DCR request */
- EMULATE_FAIL, /* can't emulate this instruction */
+ EMULATE_DONE, /* no further processing */
+ EMULATE_COREOP_DONE, /* no further processing */
+ EMULATE_COREMTSPR_DONE, /* no further processing */
+ EMULATE_COREMFSPR_DONE, /* no further processing */
+ EMULATE_COREMTMSR_DONE, /* no further processing */
+ EMULATE_MTMSRWE_DONE, /* no further processing */
+ EMULATE_TLBSX_DONE, /* no further processing */
+ EMULATE_TLBWE_DONE, /* no further processing */
+ EMULATE_RFI_DONE, /* no further processing */
+ EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
+ EMULATE_DO_DCR, /* kvm_run filled with DCR request */
+ EMULATE_FAIL, /* can't emulate this instruction */
};
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kernel/asm-offsets.c
b/arch/powerpc/kernel/asm-offsets.c
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -381,5 +381,16 @@
DEFINE(PTE_SHIFT, PTE_SHIFT);
#endif
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+ DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
+ arch.timing_exit.tv32.tbu));
+ DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu,
+ arch.timing_exit.tv32.tbl));
+ DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu,
+ arch.timing_last_enter.tv32.tbu));
+ DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu,
+ arch.timing_last_enter.tv32.tbl));
+#endif
+
return 0;
}
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -48,7 +48,7 @@
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
- int emulated = EMULATE_DONE;
+ int emulated = EMULATE_COREOP_DONE;
int dcrn;
int ra;
int rb;
@@ -63,6 +63,7 @@
switch (get_xop(inst)) {
case XOP_RFI:
kvmppc_emul_rfi(vcpu);
+ emulated = EMULATE_RFI_DONE;
*advance = 0;
break;
@@ -82,7 +83,10 @@
case XOP_MTMSR:
rs = get_rs(inst);
- kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
+ if (kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]) ==
EMULATE_MTMSRWE_DONE)
+ emulated = EMULATE_MTMSRWE_DONE;
+ else
+ emulated = EMULATE_COREMTMSR_DONE;
break;
case XOP_WRTEE:
@@ -276,7 +280,7 @@
return EMULATE_FAIL;
}
- return EMULATE_DONE;
+ return EMULATE_COREMTSPR_DONE;
}
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
@@ -357,6 +361,6 @@
return EMULATE_FAIL;
}
- return EMULATE_DONE;
+ return EMULATE_COREMFSPR_DONE;
}
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -457,7 +457,7 @@
KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0,
tlbe->word1, tlbe->word2, handler);
- return EMULATE_DONE;
+ return EMULATE_TLBWE_DONE;
}
int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
@@ -480,5 +480,5 @@
}
vcpu->arch.gpr[rt] = gtlb_index;
- return EMULATE_DONE;
+ return EMULATE_TLBSX_DONE;
}
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -32,6 +32,15 @@
If unsure, say N.
+config KVM_BOOKE_EXIT_TIMING
+ bool "Trace detailed exit Timing"
+ depends on KVM && 44x
+ ---help---
+ Inserts code to trace timestamps for every exit/enter cycle. A per
vcpu
+ report is available in debugfs kvm/VM_###/VPCU_###_exit_timing.
+ The overhead is relatively small, however it is not recommended for
+ production environments.
+
config KVM_TRACE
bool "KVM trace support"
depends on KVM && MARKERS && SYSFS
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -28,6 +28,7 @@
#include <asm/cputable.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
+#include <asm/kvm_timing_stats.h>
#include <asm/cacheflush.h>
#include <asm/kvm_44x.h>
@@ -185,6 +186,9 @@
enum emulation_result er;
int r = RESUME_HOST;
+ /* update before a new last_exit_type is rewritten */
+ update_timing_stats(vcpu);
+
local_irq_enable();
run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -198,7 +202,7 @@
break;
case BOOKE_INTERRUPT_EXTERNAL:
- vcpu->stat.ext_intr_exits++;
+ account_exit(vcpu, EXT_INTR_EXITS);
if (need_resched())
cond_resched();
r = RESUME_GUEST;
@@ -208,8 +212,7 @@
/* Since we switched IVPR back to the host's value, the host
* handled this interrupt the moment we enabled interrupts.
* Now we just offer it a chance to reschedule the guest. */
-
- vcpu->stat.dec_exits++;
+ account_exit(vcpu, DEC_EXITS);
if (need_resched())
cond_resched();
r = RESUME_GUEST;
@@ -222,20 +225,15 @@
vcpu->arch.esr = vcpu->arch.fault_esr;
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
r = RESUME_GUEST;
+ account_exit(vcpu, USR_PR_INST);
break;
}
er = kvmppc_emulate_instruction(run, vcpu);
switch (er) {
- case EMULATE_DONE:
- /* Future optimization: only reload non-volatiles if
- * they were actually modified by emulation. */
- vcpu->stat.emulated_inst_exits++;
- r = RESUME_GUEST_NV;
- break;
case EMULATE_DO_DCR:
run->exit_reason = KVM_EXIT_DCR;
- vcpu->stat.dcr_exits++;
+ account_exit(vcpu, DCR_EXITS);
r = RESUME_HOST;
break;
case EMULATE_FAIL:
@@ -249,12 +247,45 @@
r = RESUME_HOST;
break;
default:
- BUG();
+ switch (er) {
+ case EMULATE_DONE:
+ account_exit(vcpu, EMULATED_INST_EXITS);
+ break;
+ case EMULATE_COREOP_DONE:
+ account_exit(vcpu, EMULATED_COREOP_EXITS);
+ break;
+ case EMULATE_COREMTSPR_DONE:
+ account_exit(vcpu, EMULATED_COREMTSPR_EXITS);
+ break;
+ case EMULATE_COREMFSPR_DONE:
+ account_exit(vcpu, EMULATED_COREMFSPR_EXITS);
+ break;
+ case EMULATE_COREMTMSR_DONE:
+ account_exit(vcpu, EMULATED_COREMTMSR_EXITS);
+ break;
+ case EMULATE_MTMSRWE_DONE:
+ account_exit(vcpu, EMULATED_MTMSRWE_EXITS);
+ break;
+ case EMULATE_TLBSX_DONE:
+ account_exit(vcpu, EMULATED_TLBSX_EXITS);
+ break;
+ case EMULATE_TLBWE_DONE:
+ account_exit(vcpu, EMULATED_TLBWE_EXITS);
+ break;
+ case EMULATE_RFI_DONE:
+ account_exit(vcpu, EMULATED_RFI_EXITS);
+ break;
+ default:
+ BUG();
+ }
+ r = RESUME_GUEST_NV;
+ break;
}
break;
case BOOKE_INTERRUPT_FP_UNAVAIL:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
+ account_exit(vcpu, FP_UNAVAIL);
r = RESUME_GUEST;
break;
@@ -262,20 +293,20 @@
vcpu->arch.dear = vcpu->arch.fault_dear;
vcpu->arch.esr = vcpu->arch.fault_esr;
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
- vcpu->stat.dsi_exits++;
+ account_exit(vcpu, DSI_EXITS);
r = RESUME_GUEST;
break;
case BOOKE_INTERRUPT_INST_STORAGE:
vcpu->arch.esr = vcpu->arch.fault_esr;
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
- vcpu->stat.isi_exits++;
+ account_exit(vcpu, ISI_EXITS);
r = RESUME_GUEST;
break;
case BOOKE_INTERRUPT_SYSCALL:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
- vcpu->stat.syscall_exits++;
+ account_exit(vcpu, SYSCALL_EXITS);
r = RESUME_GUEST;
break;
@@ -294,7 +325,7 @@
kvmppc_booke_queue_irqprio(vcpu,
BOOKE_IRQPRIO_DTLB_MISS);
vcpu->arch.dear = vcpu->arch.fault_dear;
vcpu->arch.esr = vcpu->arch.fault_esr;
- vcpu->stat.dtlb_real_miss_exits++;
+ account_exit(vcpu, DTLB_REAL_MISS_EXITS);
r = RESUME_GUEST;
break;
}
@@ -312,13 +343,13 @@
* invoking the guest. */
kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed,
gtlbe->tid,
gtlbe->word2, get_tlb_bytes(gtlbe),
gtlb_index);
- vcpu->stat.dtlb_virt_miss_exits++;
+ account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
r = RESUME_GUEST;
} else {
/* Guest has mapped and accessed a page which is not
* actually RAM. */
r = kvmppc_emulate_mmio(run, vcpu);
- vcpu->stat.mmio_exits++;
+ account_exit(vcpu, MMIO_EXITS);
}
break;
@@ -340,11 +371,11 @@
if (gtlb_index < 0) {
/* The guest didn't have a mapping for it. */
kvmppc_booke_queue_irqprio(vcpu,
BOOKE_IRQPRIO_ITLB_MISS);
- vcpu->stat.itlb_real_miss_exits++;
+ account_exit(vcpu, ITLB_REAL_MISS_EXITS);
break;
}
- vcpu->stat.itlb_virt_miss_exits++;
+ account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
gpaddr = tlb_xlate(gtlbe, eaddr);
@@ -378,6 +409,7 @@
mtspr(SPRN_DBSR, dbsr);
run->exit_reason = KVM_EXIT_DEBUG;
+ account_exit(vcpu, DEBUG_EXITS);
r = RESUME_HOST;
break;
}
@@ -398,7 +430,7 @@
if (signal_pending(current)) {
run->exit_reason = KVM_EXIT_INTR;
r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
- vcpu->stat.signal_exits++;
+ account_exit(vcpu, SIGNAL_EXITS);
}
}
@@ -417,6 +449,8 @@
/* Eye-catching number so we know if the guest takes an interrupt
* before it's programmed its own IVPR. */
vcpu->arch.ivpr = 0x55550000;
+
+ init_timing_stats(vcpu);
return kvmppc_core_vcpu_setup(vcpu);
}
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
+#include <asm/kvm_timing_stats.h>
/* interrupt priortity ordering */
#define BOOKE_IRQPRIO_DATA_STORAGE 0
@@ -43,15 +44,17 @@
/* Helper function for "full" MSR writes. No need to call this if only EE is
* changing. */
-static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
+static inline int kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
{
if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
vcpu->arch.msr = new_msr;
- if (vcpu->arch.msr & MSR_WE)
+ if (vcpu->arch.msr & MSR_WE) {
kvm_vcpu_block(vcpu);
+ return EMULATE_MTMSRWE_DONE;
+ }
}
#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_interrupts.S
b/arch/powerpc/kvm/booke_interrupts.S
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -106,6 +106,18 @@
li r6, 1
slw r6, r6, r5
+
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+ /* save exit time */
+..exit_tbu_overflow_loop:
+ mfspr r7, SPRN_TBRU
+ mfspr r8, SPRN_TBRL
+ mfspr r9, SPRN_TBRU
+ cmpw r9, r7
+ bne ..exit_tbu_overflow_loop
+ stw r8, VCPU_TIMING_EXIT_TBL(r4)
+ stw r9, VCPU_TIMING_EXIT_TBU(r4)
+#endif
/* Save the faulting instruction and all GPRs for emulation. */
andi. r7, r6, NEED_INST_MASK
@@ -375,6 +387,18 @@
lwz r3, VCPU_SPRG7(r4)
mtspr SPRN_SPRG7, r3
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+ /* save enter time */
+..enter_tbu_overflow_loop:
+ mfspr r6, SPRN_TBRU
+ mfspr r7, SPRN_TBRL
+ mfspr r8, SPRN_TBRU
+ cmpw r8, r6
+ bne ..enter_tbu_overflow_loop
+ stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
+ stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
+#endif
+
/* Finish loading guest volatiles and jump to guest. */
lwz r3, VCPU_CTR(r4)
mtctr r3
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -28,7 +28,10 @@
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
#include <asm/tlbflush.h>
-
+#include <asm/kvm_timing_stats.h>
+#include <asm/atomic.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{
@@ -101,6 +104,160 @@
*(int *)rtn = kvmppc_core_check_processor_compat();
}
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = {
+ "MMIO",
+ "DCR",
+ "SIGNAL",
+ "ITLBREAL",
+ "ITLBVIRT",
+ "DTLBREAL",
+ "DTLBVIRT",
+ "SYSCALL",
+ "ISI",
+ "DSI",
+ "EMULINST",
+ "EMUL_MSR_WE",
+ "EMUL_CORE",
+ "EMUL_MTSPR",
+ "EMUL_MFSPR",
+ "EMUL_MTMSR",
+ "EMUL_TLBSX",
+ "EMUL_TLBWE",
+ "EMUL_RFI",
+ "DEC",
+ "EXTINT",
+ "HALT",
+ "USR_PR_INST",
+ "FP_UNAVAIL",
+ "DEBUG",
+ "TIMEINGUEST"
+};
+
+/* assign a unique number to each vm created */
+static atomic_t vm_count = ATOMIC_INIT(0);
+
+static int kvmppc_exit_timing_show(struct seq_file *m, void *private)
+{
+ struct kvm_vcpu *vcpu = m->private;
+ int i;
+ u64 min, max;
+
+ for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
+ if (vcpu->arch.timing_min_duration[i] == 0xFFFFFFFF)
+ min = 0;
+ else
+ min = vcpu->arch.timing_min_duration[i];
+ if (vcpu->arch.timing_max_duration[i] == 0)
+ max = 0;
+ else
+ max = vcpu->arch.timing_max_duration[i];
+
+ seq_printf(m, "%12s: count %10d min %10lld "
+ "max %10lld sum %20lld sum_quad %20lld\n",
+ kvm_exit_names[i], vcpu->arch.timing_count_type[i],
+ vcpu->arch.timing_min_duration[i],
+ vcpu->arch.timing_max_duration[i],
+ vcpu->arch.timing_sum_duration[i],
+ vcpu->arch.timing_sum_quad_duration[i]);
+ }
+ return 0;
+}
+
+static ssize_t kvmppc_exit_timing_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ size_t len;
+ int err;
+ const char __user *p;
+ char c;
+
+ len = 0;
+ p = user_buf;
+ while (len < count) {
+ if (get_user(c, p++))
+ err = -EFAULT;
+ if (c == 0 || c == '\n')
+ break;
+ len++;
+ }
+
+ if (len > 1) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (copy_from_user(&c, user_buf, sizeof(c))) {
+ err = -EFAULT;
+ goto done;
+ }
+
+ if (c == 'c') {
+ struct seq_file *seqf = (struct seq_file *)file->private_data;
+ struct kvm_vcpu *vcpu = seqf->private;
+ /* write does not affect out buffers previsously generated with
+ * show. Seq file is locked here to prevent races of init with
+ * a show call */
+ mutex_lock(&seqf->lock);
+ init_timing_stats(vcpu);
+ mutex_unlock(&seqf->lock);
+ err = count;
+ } else {
+ err = -EINVAL;
+ goto done;
+ }
+
+done:
+ return err;
+}
+
+static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, kvmppc_exit_timing_show, inode->i_private);
+}
+
+static struct file_operations kvmppc_exit_timing_fops = {
+ .owner = THIS_MODULE,
+ .open = kvmppc_exit_timing_open,
+ .read = seq_read,
+ .write = kvmppc_exit_timing_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id)
+{
+ static char dbg_fname[28];
+ struct dentry *debugfs_file;
+
+ snprintf(dbg_fname, sizeof(dbg_fname), "VM_%03u_VCPU_%03u_exit_timing",
+ vcpu->kvm->arch.vm_id, id);
+ debugfs_file = debugfs_create_file(dbg_fname, 0666,
+ kvm_debugfs_dir, vcpu,
+ &kvmppc_exit_timing_fops);
+
+ if (!debugfs_file) {
+ printk(KERN_ERR"%s: error creating debugfs file %s\n",
+ __func__, dbg_fname);
+ return;
+ }
+
+ vcpu->arch.debugfs_exit_timing = debugfs_file;
+}
+
+static void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.debugfs_exit_timing) {
+ debugfs_remove(vcpu->arch.debugfs_exit_timing);
+ vcpu->arch.debugfs_exit_timing = NULL;
+ }
+}
+#else
+#define kvmppc_create_vcpu_debugfs(x, y) do { } while (0)
+#define kvmppc_remove_vcpu_debugfs(x) do { } while (0)
+#endif /* CONFIG_KVM_BOOKE_EXIT_TIMING */
+
struct kvm *kvm_arch_create_vm(void)
{
struct kvm *kvm;
@@ -108,6 +265,10 @@
kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
if (!kvm)
return ERR_PTR(-ENOMEM);
+
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+ kvm->arch.vm_id = atomic_inc_return(&vm_count);
+#endif
return kvm;
}
@@ -170,11 +331,15 @@
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
- return kvmppc_core_vcpu_create(kvm, id);
+ struct kvm_vcpu *vcpu;
+ vcpu = kvmppc_core_vcpu_create(kvm, id);
+ kvmppc_create_vcpu_debugfs(vcpu, id);
+ return vcpu;
}
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
+ kvmppc_remove_vcpu_debugfs(vcpu);
kvmppc_core_vcpu_free(vcpu);
}
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html